Spaces:
Sleeping
Sleeping
Sean MacAvaney
commited on
Commit
•
924cd68
1
Parent(s):
1e7bc58
update
Browse files
README.md
CHANGED
@@ -9,107 +9,7 @@ app_file: app.py
|
|
9 |
pinned: false
|
10 |
---
|
11 |
|
12 |
-
|
13 |
-
.transformer {
|
14 |
-
display: inline-block;
|
15 |
-
background: #8facdb;
|
16 |
-
position: relative;
|
17 |
-
height: 60px;
|
18 |
-
line-height: 60px;
|
19 |
-
padding: 0 24px;
|
20 |
-
margin: 0 18px;
|
21 |
-
color: #333;
|
22 |
-
cursor: help;
|
23 |
-
}
|
24 |
-
.transformer::before {
|
25 |
-
content: "";
|
26 |
-
position: absolute;
|
27 |
-
bottom: 0;
|
28 |
-
top: 0;
|
29 |
-
left: -15px;
|
30 |
-
border-top: 30px solid #8facdb;
|
31 |
-
border-bottom: 30px solid #8facdb;
|
32 |
-
border-left: 15px solid transparent;
|
33 |
-
}
|
34 |
-
.transformer::after {
|
35 |
-
content: "";
|
36 |
-
position: absolute;
|
37 |
-
bottom: 0;
|
38 |
-
top: 0;
|
39 |
-
right: -15px;
|
40 |
-
border-top: 30px solid transparent;
|
41 |
-
border-bottom: 30px solid transparent;
|
42 |
-
border-left: 15px solid #8facdb;
|
43 |
-
}
|
44 |
-
.transformer.boring {
|
45 |
-
background: #ddd;
|
46 |
-
}
|
47 |
-
.transformer.boring::before {
|
48 |
-
border-top-color: #ddd;
|
49 |
-
border-bottom-color: #ddd;
|
50 |
-
}
|
51 |
-
.transformer.boring::after {
|
52 |
-
border-left-color: #ddd;
|
53 |
-
}
|
54 |
-
.df {
|
55 |
-
width: 24px;
|
56 |
-
line-height: 24px;
|
57 |
-
text-align: center;
|
58 |
-
border: 3px double #888;
|
59 |
-
background-color: #eee;
|
60 |
-
color: #333;
|
61 |
-
border-radius: 4px;
|
62 |
-
display: inline-block;
|
63 |
-
box-sizing: content-box;
|
64 |
-
cursor: help;
|
65 |
-
margin: 0 -25px;
|
66 |
-
opacity: 0.5;
|
67 |
-
z-index: 1;
|
68 |
-
position: relative;
|
69 |
-
}
|
70 |
-
.df:hover {
|
71 |
-
opacity: 1;
|
72 |
-
}
|
73 |
-
.pipeline {
|
74 |
-
text-align: center;
|
75 |
-
}
|
76 |
-
.artefact {
|
77 |
-
width: 32px;
|
78 |
-
line-height: 32px;
|
79 |
-
background: #eee;
|
80 |
-
display: inline-block;
|
81 |
-
box-sizing: content-box;
|
82 |
-
cursor: help;
|
83 |
-
margin: 0 -25px;
|
84 |
-
z-index: 1;
|
85 |
-
opacity: 0.5;
|
86 |
-
position: relative;
|
87 |
-
color: #333;
|
88 |
-
text-align: center;
|
89 |
-
border: 3px double #888;
|
90 |
-
border-radius: 50%
|
91 |
-
}
|
92 |
-
.artefact:hover {
|
93 |
-
opacity: 1;
|
94 |
-
}
|
95 |
-
.transformer .artefact {
|
96 |
-
bottom: -12px;
|
97 |
-
left: 50%;
|
98 |
-
margin-left: -16px;
|
99 |
-
}
|
100 |
-
</style>
|
101 |
|
102 |
This is a demonstration of [PyTerrier's SPLADE package](https://github.com/cmacdonald/pyt_splade). The SPLADE model encodes queries and documents
|
103 |
into sparse representations, which can then be used for indexing and retrieval.
|
104 |
-
|
105 |
-
### Query Encoding
|
106 |
-
|
107 |
-
Let's start by exploring SPLADE's query encoder. The query encoder is a `Q→Q` (query rewriting, query-to-query) transformer, and can be used in pipelines accordingly.
|
108 |
-
It maps a query string into [MatchOp](https://terrier-core.readthedocs.io/en/latest/querylanguage.html#matching-op-query-language) query with terms from the
|
109 |
-
query re-weighted and weighted expansion terms added.
|
110 |
-
|
111 |
-
<div class="pipeline">
|
112 |
-
<div class="df" title="Query Frame">Q</div>
|
113 |
-
<div class="transformer" title="SPLADE Query Transformer">SPLADE</div>
|
114 |
-
<div class="df" title="Query Frame">Q</div>
|
115 |
-
</div>
|
|
|
9 |
pinned: false
|
10 |
---
|
11 |
|
12 |
+
# 🐕 PyTerrier: SPLADE
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
This is a demonstration of [PyTerrier's SPLADE package](https://github.com/cmacdonald/pyt_splade). The SPLADE model encodes queries and documents
|
15 |
into sparse representations, which can then be used for indexing and retrieval.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
app.py
CHANGED
@@ -1,14 +1,13 @@
|
|
1 |
import re
|
2 |
import json
|
3 |
-
import base64
|
4 |
import pandas as pd
|
5 |
import gradio as gr
|
6 |
import pyterrier as pt
|
7 |
pt.init()
|
8 |
import pyt_splade
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
|
13 |
COLAB_NAME = 'pyterrier_splade.ipynb'
|
14 |
COLAB_INSTALL = '''
|
@@ -16,30 +15,6 @@ COLAB_INSTALL = '''
|
|
16 |
!pip install -q git+https://github.com/seanmacavaney/pyt_splade@misc
|
17 |
'''.strip()
|
18 |
|
19 |
-
def df2code(df):
|
20 |
-
rows = []
|
21 |
-
for row in df.itertuples(index=False):
|
22 |
-
rows.append(f' {dict(row._asdict())},')
|
23 |
-
rows = '\n'.join(rows)
|
24 |
-
return f'''pd.DataFrame([
|
25 |
-
{rows}
|
26 |
-
])'''
|
27 |
-
|
28 |
-
def code2colab(code):
|
29 |
-
enc_code = base64.b64encode((COLAB_INSTALL + '\n\n' + code.strip()).encode()).decode()
|
30 |
-
dec = base64.b64decode(enc_code)
|
31 |
-
url = f'https://colaburl.macavaney.us/?py64={enc_code}&name={COLAB_NAME}'
|
32 |
-
return f'<div style="text-align: center; margin-bottom: -16px;"><a href="{url}" rel="nofollow" target="_blank"><img src="https://colab.research.google.com/assets/colab-badge.svg" alt="Open In Colab" style="margin: 0; display: inline-block;" /></a></div>'
|
33 |
-
|
34 |
-
def code2md(code):
|
35 |
-
return f'''
|
36 |
-
{code2colab(code)}
|
37 |
-
|
38 |
-
```python
|
39 |
-
{code.strip()}
|
40 |
-
```
|
41 |
-
'''
|
42 |
-
|
43 |
def generate_vis(df, mode='Document'):
|
44 |
if len(df) == 0:
|
45 |
return ''
|
@@ -50,11 +25,11 @@ def generate_vis(df, mode='Document'):
|
|
50 |
if mode == 'Query':
|
51 |
tok_scores = {m.group(2): float(m.group(1)) for m in re.finditer(r'combine:0=([0-9.]+)\(([^)]+)\)', row.query)}
|
52 |
max_score = max(tok_scores.values())
|
53 |
-
orig_tokens =
|
54 |
id = row.qid
|
55 |
else:
|
56 |
tok_scores = row.toks
|
57 |
-
orig_tokens =
|
58 |
id = row.docno
|
59 |
def toks2span(toks):
|
60 |
return '<kbd> </kbd>'.join(f'<kbd style="background-color: rgba(66, 135, 245, {tok_scores.get(t, 0)/max_score});">{t}</kbd>' for t in toks)
|
@@ -71,170 +46,68 @@ def generate_vis(df, mode='Document'):
|
|
71 |
''')
|
72 |
return '\n'.join(result)
|
73 |
|
74 |
-
def predict_query(input):
|
75 |
code = f'''import pandas as pd
|
76 |
import pyterrier as pt ; pt.init()
|
77 |
import pyt_splade
|
78 |
|
79 |
-
factory = pyt_splade.SpladeFactory()
|
80 |
|
81 |
query_pipeline = factory.query()
|
82 |
|
83 |
query_pipeline({df2code(input)})
|
84 |
'''
|
85 |
-
|
|
|
|
|
|
|
|
|
86 |
vis = generate_vis(res, mode='Query')
|
87 |
-
return (res, code2md(code), vis)
|
88 |
|
89 |
-
def predict_doc(input):
|
90 |
code = f'''import pandas as pd
|
91 |
import pyterrier as pt ; pt.init()
|
92 |
import pyt_splade
|
93 |
|
94 |
-
factory = pyt_splade.SpladeFactory()
|
95 |
|
96 |
doc_pipeline = factory.indexing()
|
97 |
|
98 |
doc_pipeline({df2code(input)})
|
99 |
'''
|
100 |
-
|
|
|
|
|
|
|
|
|
101 |
vis = generate_vis(res, mode='Document')
|
102 |
res['toks'] = [json.dumps({k: round(v, 4) for k, v in t.items()}) for t in res['toks']]
|
103 |
-
return (res, code2md(code), vis)
|
104 |
-
|
105 |
-
|
106 |
-
|
107 |
-
|
108 |
-
|
109 |
-
|
|
|
110 |
{'qid': '1112389', 'query': 'what is the county for grand rapids, mn'},
|
111 |
-
])
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
row_count=1,
|
122 |
-
wrap=True,
|
123 |
-
value=example_inp,
|
124 |
-
))
|
125 |
-
submit_btn = gr.Button("Submit", variant="primary")
|
126 |
-
with gr.Column(scale=2):
|
127 |
-
with gr.Tab('Pipeline Output'):
|
128 |
-
outputs.append(gr.Dataframe(
|
129 |
-
headers=["qid", "query", "docno", "score", "rank", "text"],
|
130 |
-
datatype=["str", "str", "str", "number", "number", "str"],
|
131 |
-
col_count=6,
|
132 |
-
row_count=1,
|
133 |
-
wrap=True,
|
134 |
-
value=example_out[0],
|
135 |
-
))
|
136 |
-
with gr.Tab('Code'):
|
137 |
-
outputs.append(gr.Markdown(value=example_out[1]))
|
138 |
-
with gr.Tab('Visualisation'):
|
139 |
-
outputs.append(gr.HTML(value=example_out[2]))
|
140 |
-
submit_btn.click(predict_query, inputs, outputs, api_name="predict_query", scroll_to_output=True)
|
141 |
-
|
142 |
-
gr.Markdown('''
|
143 |
-
### Document Encoding
|
144 |
-
|
145 |
-
The document encoder works similarly to the query encoder: it is a `D→D` (document rewriting, doc-to-doc) transformer, and can be used in pipelines accordingly.
|
146 |
-
It maps a document's text into a dictionary with terms from the document re-weighted and weighted expansion terms added.
|
147 |
-
|
148 |
-
<div class="pipeline">
|
149 |
-
<div class="df" title="Document Frame">D</div>
|
150 |
-
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
|
151 |
-
<div class="df" title="Document Frame">D</div>
|
152 |
-
</div>
|
153 |
-
|
154 |
-
''')
|
155 |
-
|
156 |
-
example_inp = pd.DataFrame([
|
157 |
{'docno': '0', 'text': 'The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.'},
|
158 |
-
])
|
159 |
-
|
160 |
-
|
161 |
-
|
162 |
-
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
datatype=["str", "str"],
|
167 |
-
col_count=(2, "fixed"),
|
168 |
-
row_count=1,
|
169 |
-
wrap=True,
|
170 |
-
value=example_inp,
|
171 |
-
))
|
172 |
-
submit_btn = gr.Button("Submit", variant="primary")
|
173 |
-
with gr.Column(scale=2):
|
174 |
-
with gr.Tab("Pipeline Output"):
|
175 |
-
outputs.append(gr.Dataframe(
|
176 |
-
headers=["qid", "query", "docno", "score", "rank", "text"],
|
177 |
-
datatype=["str", "str", "str", "number", "number", "str"],
|
178 |
-
col_count=6,
|
179 |
-
row_count=1,
|
180 |
-
wrap=True,
|
181 |
-
value=example_out[0],
|
182 |
-
))
|
183 |
-
with gr.Tab('Code'):
|
184 |
-
outputs.append(gr.Markdown(value=example_out[1]))
|
185 |
-
with gr.Tab('Visualisation'):
|
186 |
-
outputs.append(gr.HTML(value=example_out[2]))
|
187 |
-
submit_btn.click(predict_doc, inputs, outputs, api_name="predict_doc", scroll_to_output=True)
|
188 |
-
|
189 |
-
gr.Markdown('''
|
190 |
-
### Putting it all together
|
191 |
-
|
192 |
-
When you use the document encoder in an indexing pipeline, the rewritting document contents are indexed:
|
193 |
-
|
194 |
-
<div class="pipeline">
|
195 |
-
<div class="df" title="Document Frame">D</div>
|
196 |
-
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
|
197 |
-
<div class="df" title="Document Frame">D</div>
|
198 |
-
<div class="transformer boring" title="Indexer">Indexer</div>
|
199 |
-
<div class="artefact" title="SPLADE Index">IDX</div>
|
200 |
-
</div>
|
201 |
-
|
202 |
-
```python
|
203 |
-
import pyterrer as pt
|
204 |
-
pt.init(version='snapshot')
|
205 |
-
import pyt_splade
|
206 |
-
|
207 |
-
dataset = pt.get_dataset('irds:msmarco-passage')
|
208 |
-
factory = pyt_splade.SpladeFactory()
|
209 |
-
|
210 |
-
indexer = pt.IterDictIndexer('./msmarco_psg', pretokenized=True)
|
211 |
-
|
212 |
-
indxer_pipe = factory.indexing() >> indexer
|
213 |
-
indxer_pipe.index(dataset.get_corpus_iter())
|
214 |
-
```
|
215 |
-
|
216 |
-
Once you built an index, you can build a retrieval pipeline that first encodes the query,
|
217 |
-
and then performs retrieval:
|
218 |
-
|
219 |
-
<div class="pipeline">
|
220 |
-
<div class="df" title="Query Frame">Q</div>
|
221 |
-
<div class="transformer" title="SPLADE Query Transformer">SPLADE</div>
|
222 |
-
<div class="df" title="Query Frame">Q</div>
|
223 |
-
<div class="transformer boring" title="Term Frequency Transformer">TF Retriever <div class="artefact" title="SPLADE Index">IDX</div></div>
|
224 |
-
<div class="df" title="Result Frame">R</div>
|
225 |
-
</div>
|
226 |
-
|
227 |
-
```python
|
228 |
-
splade_retr = factory.query() >> pt.BatchRetrieve('./msmarco_psg', wmodel='Tf')
|
229 |
-
```
|
230 |
-
|
231 |
-
### References & Credits
|
232 |
-
|
233 |
-
This package uses [Naver's SPLADE repository](https://github.com/naver/splade).
|
234 |
-
|
235 |
-
- Thibault Formal, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE: Sparse Lexical and Expansion Model for First Stage Ranking](https://arxiv.org/abs/2107.05720). SIGIR 2021.
|
236 |
-
- Craig Macdonald, Nicola Tonellotto, Sean MacAvaney, Iadh Ounis. [PyTerrier: Declarative Experimentation in Python from BM25 to Dense Retrieval](https://dl.acm.org/doi/abs/10.1145/3459637.3482013). CIKM 2021.
|
237 |
-
''')
|
238 |
-
|
239 |
-
|
240 |
-
demo.launch(share=False)
|
|
|
1 |
import re
|
2 |
import json
|
|
|
3 |
import pandas as pd
|
4 |
import gradio as gr
|
5 |
import pyterrier as pt
|
6 |
pt.init()
|
7 |
import pyt_splade
|
8 |
+
from pyterrier_gradio import Demo, MarkdownFile, interface, df2code, code2md
|
9 |
+
factory_max = pyt_splade.SpladeFactory(agg='max')
|
10 |
+
factory_sum = pyt_splade.SpladeFactory(agg='sum')
|
11 |
|
12 |
COLAB_NAME = 'pyterrier_splade.ipynb'
|
13 |
COLAB_INSTALL = '''
|
|
|
15 |
!pip install -q git+https://github.com/seanmacavaney/pyt_splade@misc
|
16 |
'''.strip()
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
def generate_vis(df, mode='Document'):
|
19 |
if len(df) == 0:
|
20 |
return ''
|
|
|
25 |
if mode == 'Query':
|
26 |
tok_scores = {m.group(2): float(m.group(1)) for m in re.finditer(r'combine:0=([0-9.]+)\(([^)]+)\)', row.query)}
|
27 |
max_score = max(tok_scores.values())
|
28 |
+
orig_tokens = factory_max.tokenizer.tokenize(row.query_0)
|
29 |
id = row.qid
|
30 |
else:
|
31 |
tok_scores = row.toks
|
32 |
+
orig_tokens = factory_max.tokenizer.tokenize(row.text)
|
33 |
id = row.docno
|
34 |
def toks2span(toks):
|
35 |
return '<kbd> </kbd>'.join(f'<kbd style="background-color: rgba(66, 135, 245, {tok_scores.get(t, 0)/max_score});">{t}</kbd>' for t in toks)
|
|
|
46 |
''')
|
47 |
return '\n'.join(result)
|
48 |
|
49 |
+
def predict_query(input, agg):
|
50 |
code = f'''import pandas as pd
|
51 |
import pyterrier as pt ; pt.init()
|
52 |
import pyt_splade
|
53 |
|
54 |
+
factory = pyt_splade.SpladeFactory(agg={agg})
|
55 |
|
56 |
query_pipeline = factory.query()
|
57 |
|
58 |
query_pipeline({df2code(input)})
|
59 |
'''
|
60 |
+
pipeline = {
|
61 |
+
'max': factory_max,
|
62 |
+
'sum': factory_sum
|
63 |
+
}[agg].query()
|
64 |
+
res = pipeline(input)
|
65 |
vis = generate_vis(res, mode='Query')
|
66 |
+
return (res, code2md(code, COLAB_INSTALL, COLAB_NAME), vis)
|
67 |
|
68 |
+
def predict_doc(input, agg):
|
69 |
code = f'''import pandas as pd
|
70 |
import pyterrier as pt ; pt.init()
|
71 |
import pyt_splade
|
72 |
|
73 |
+
factory = pyt_splade.SpladeFactory(agg={agg})
|
74 |
|
75 |
doc_pipeline = factory.indexing()
|
76 |
|
77 |
doc_pipeline({df2code(input)})
|
78 |
'''
|
79 |
+
pipeline = {
|
80 |
+
'max': factory_max,
|
81 |
+
'sum': factory_sum
|
82 |
+
}[agg].indexing()
|
83 |
+
res = pipeline(input)
|
84 |
vis = generate_vis(res, mode='Document')
|
85 |
res['toks'] = [json.dumps({k: round(v, 4) for k, v in t.items()}) for t in res['toks']]
|
86 |
+
return (res, code2md(code, COLAB_INSTALL, COLAB_NAME), vis)
|
87 |
+
|
88 |
+
interface(
|
89 |
+
MarkdownFile('README.md'),
|
90 |
+
MarkdownFile('query.md'),
|
91 |
+
Demo(
|
92 |
+
predict_query,
|
93 |
+
pd.DataFrame([
|
94 |
{'qid': '1112389', 'query': 'what is the county for grand rapids, mn'},
|
95 |
+
]),
|
96 |
+
[
|
97 |
+
gr.Dropdown(choices=['max', 'sum'], value='max', label='Aggregation'),
|
98 |
+
],
|
99 |
+
scale=2/3
|
100 |
+
),
|
101 |
+
MarkdownFile('doc.md'),
|
102 |
+
Demo(
|
103 |
+
predict_doc,
|
104 |
+
pd.DataFrame([
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
105 |
{'docno': '0', 'text': 'The presence of communication amid scientific minds was equally important to the success of the Manhattan Project as scientific intellect was. The only cloud hanging over the impressive achievement of the atomic researchers and engineers is what their success truly meant; hundreds of thousands of innocent lives obliterated.'},
|
106 |
+
]),
|
107 |
+
[
|
108 |
+
gr.Dropdown(choices=['max', 'sum'], value='max', label='Aggregation'),
|
109 |
+
],
|
110 |
+
scale=2/3
|
111 |
+
),
|
112 |
+
MarkdownFile('wrapup.md'),
|
113 |
+
).launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
doc.md
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Document Encoding
|
2 |
+
|
3 |
+
The document encoder works similarly to the query encoder: it is a `D→D` (document rewriting, doc-to-doc) transformer, and can be used in pipelines accordingly.
|
4 |
+
It maps a document's text into a dictionary with terms from the document re-weighted and weighted expansion terms added.
|
5 |
+
|
6 |
+
<div class="pipeline">
|
7 |
+
<div class="df" title="Document Frame">D</div>
|
8 |
+
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
|
9 |
+
<div class="df" title="Document Frame">D</div>
|
10 |
+
</div>
|
query.md
ADDED
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Query Encoding
|
2 |
+
|
3 |
+
Let's start by exploring SPLADE's query encoder. The query encoder is a `Q→Q` (query rewriting, query-to-query) transformer, and can be used in pipelines accordingly.
|
4 |
+
It maps a query string into [MatchOp](https://terrier-core.readthedocs.io/en/latest/querylanguage.html#matching-op-query-language) query with terms from the
|
5 |
+
query re-weighted and weighted expansion terms added.
|
6 |
+
|
7 |
+
<div class="pipeline">
|
8 |
+
<div class="df" title="Query Frame">Q</div>
|
9 |
+
<div class="transformer" title="SPLADE Query Transformer">SPLADE</div>
|
10 |
+
<div class="df" title="Query Frame">Q</div>
|
11 |
+
</div>
|
requirements.txt
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
torch
|
2 |
python-terrier
|
|
|
3 |
git+https://github.com/naver/splade
|
4 |
git+https://github.com/seanmacavaney/pyt_splade@misc
|
|
|
1 |
torch
|
2 |
python-terrier
|
3 |
+
git+https://github.com/seanmacavaney/pyterrier_gradio
|
4 |
git+https://github.com/naver/splade
|
5 |
git+https://github.com/seanmacavaney/pyt_splade@misc
|
wrapup.md
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
### Putting it all together
|
2 |
+
|
3 |
+
When you use the document encoder in an indexing pipeline, the rewritting document contents are indexed:
|
4 |
+
|
5 |
+
<div class="pipeline">
|
6 |
+
<div class="df" title="Document Frame">D</div>
|
7 |
+
<div class="transformer" title="SPLADE Indexing Transformer">SPLADE</div>
|
8 |
+
<div class="df" title="Document Frame">D</div>
|
9 |
+
<div class="transformer boring" title="Indexer">Indexer</div>
|
10 |
+
<div class="artefact" title="SPLADE Index">IDX</div>
|
11 |
+
</div>
|
12 |
+
|
13 |
+
```python
|
14 |
+
import pyterrer as pt
|
15 |
+
pt.init(version='snapshot')
|
16 |
+
import pyt_splade
|
17 |
+
|
18 |
+
dataset = pt.get_dataset('irds:msmarco-passage')
|
19 |
+
factory = pyt_splade.SpladeFactory()
|
20 |
+
|
21 |
+
indexer = pt.IterDictIndexer('./msmarco_psg', pretokenized=True)
|
22 |
+
|
23 |
+
indxer_pipe = factory.indexing() >> indexer
|
24 |
+
indxer_pipe.index(dataset.get_corpus_iter())
|
25 |
+
```
|
26 |
+
|
27 |
+
Once you built an index, you can build a retrieval pipeline that first encodes the query,
|
28 |
+
and then performs retrieval:
|
29 |
+
|
30 |
+
<div class="pipeline">
|
31 |
+
<div class="df" title="Query Frame">Q</div>
|
32 |
+
<div class="transformer" title="SPLADE Query Transformer">SPLADE</div>
|
33 |
+
<div class="df" title="Query Frame">Q</div>
|
34 |
+
<div class="transformer boring" title="Term Frequency Transformer">TF Retriever <div class="artefact" title="SPLADE Index">IDX</div></div>
|
35 |
+
<div class="df" title="Result Frame">R</div>
|
36 |
+
</div>
|
37 |
+
|
38 |
+
```python
|
39 |
+
splade_retr = factory.query() >> pt.BatchRetrieve('./msmarco_psg', wmodel='Tf')
|
40 |
+
```
|
41 |
+
|
42 |
+
### References & Credits
|
43 |
+
|
44 |
+
This package uses [Naver's SPLADE repository](https://github.com/naver/splade).
|
45 |
+
|
46 |
+
- Thibault Formal, Benjamin Piwowarski, Stéphane Clinchant. [SPLADE: Sparse Lexical and Expansion Model for First Stage Ranking](https://arxiv.org/abs/2107.05720). SIGIR 2021.
|
47 |
+
- Craig Macdonald, Nicola Tonellotto, Sean MacAvaney, Iadh Ounis. [PyTerrier: Declarative Experimentation in Python from BM25 to Dense Retrieval](https://dl.acm.org/doi/abs/10.1145/3459637.3482013). CIKM 2021.
|