Spaces:
Runtime error
Runtime error
Commit
Β·
165843f
1
Parent(s):
f953e7d
push new default settings
Browse files
app.py
CHANGED
@@ -143,7 +143,7 @@ def init_models():
|
|
143 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
question_answerer = pipeline(
|
145 |
"question-answering", model='sultan/BioM-ELECTRA-Large-SQuAD2-BioASQ8B',
|
146 |
-
device=device
|
147 |
)
|
148 |
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device)
|
149 |
# queryexp_tokenizer = AutoTokenizer.from_pretrained("doc2query/all-with_prefix-t5-base-v1")
|
@@ -213,16 +213,16 @@ st.markdown("""
|
|
213 |
with st.expander("Settings (strictness, context limit, top hits)"):
|
214 |
concat_passages = st.radio(
|
215 |
"Concatenate passages as one long context?",
|
216 |
-
('
|
217 |
support_all = st.radio(
|
218 |
"Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?",
|
219 |
-
('
|
220 |
support_abstracts = st.radio(
|
221 |
"Use abstracts as a source document?",
|
222 |
('yes', 'no', 'abstract only'))
|
223 |
strict_lenient_mix = st.radio(
|
224 |
"Type of strict+lenient combination: Fallback or Mix? If fallback, strict is run first then if the results are less than context_lim we also search lenient. Mix will search them both and let reranking sort em out",
|
225 |
-
('
|
226 |
confidence_threshold = st.slider('Confidence threshold for answering questions? This number represents how confident the model should be in the answers it gives. The number is out of 100%', 0, 100, 1)
|
227 |
use_reranking = st.radio(
|
228 |
"Use Reranking? Reranking will rerank the top hits using semantic similarity of document and query.",
|
@@ -323,7 +323,7 @@ def run_query(query, progress_bar):
|
|
323 |
progress_bar.progress(50)
|
324 |
if concat_passages == 'yes':
|
325 |
context = '\n---'.join(contexts)
|
326 |
-
model_results = qa_model(question=query, context=context, top_k=10)
|
327 |
else:
|
328 |
context = ['\n---\n'+ctx for ctx in contexts]
|
329 |
model_results = qa_model(question=[query]*len(contexts), context=context)
|
|
|
143 |
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
144 |
question_answerer = pipeline(
|
145 |
"question-answering", model='sultan/BioM-ELECTRA-Large-SQuAD2-BioASQ8B',
|
146 |
+
device=device, handle_impossible_answer=True,
|
147 |
)
|
148 |
reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device)
|
149 |
# queryexp_tokenizer = AutoTokenizer.from_pretrained("doc2query/all-with_prefix-t5-base-v1")
|
|
|
213 |
with st.expander("Settings (strictness, context limit, top hits)"):
|
214 |
concat_passages = st.radio(
|
215 |
"Concatenate passages as one long context?",
|
216 |
+
('yes', 'no'))
|
217 |
support_all = st.radio(
|
218 |
"Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?",
|
219 |
+
('no', 'yes'))
|
220 |
support_abstracts = st.radio(
|
221 |
"Use abstracts as a source document?",
|
222 |
('yes', 'no', 'abstract only'))
|
223 |
strict_lenient_mix = st.radio(
|
224 |
"Type of strict+lenient combination: Fallback or Mix? If fallback, strict is run first then if the results are less than context_lim we also search lenient. Mix will search them both and let reranking sort em out",
|
225 |
+
('mix', 'fallback'))
|
226 |
confidence_threshold = st.slider('Confidence threshold for answering questions? This number represents how confident the model should be in the answers it gives. The number is out of 100%', 0, 100, 1)
|
227 |
use_reranking = st.radio(
|
228 |
"Use Reranking? Reranking will rerank the top hits using semantic similarity of document and query.",
|
|
|
323 |
progress_bar.progress(50)
|
324 |
if concat_passages == 'yes':
|
325 |
context = '\n---'.join(contexts)
|
326 |
+
model_results = qa_model(question=query, context=context, top_k=10, doc_stride=512 // 2, max_answer_len=128, max_seq_len=512)
|
327 |
else:
|
328 |
context = ['\n---\n'+ctx for ctx in contexts]
|
329 |
model_results = qa_model(question=[query]*len(contexts), context=context)
|