domenicrosati commited on
Commit
2454b21
Β·
1 Parent(s): 165843f

control handle impossible

Browse files
Files changed (1) hide show
  1. app.py +6 -3
app.py CHANGED
@@ -143,7 +143,7 @@ def init_models():
143
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
144
  question_answerer = pipeline(
145
  "question-answering", model='sultan/BioM-ELECTRA-Large-SQuAD2-BioASQ8B',
146
- device=device, handle_impossible_answer=True,
147
  )
148
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device)
149
  # queryexp_tokenizer = AutoTokenizer.from_pretrained("doc2query/all-with_prefix-t5-base-v1")
@@ -214,6 +214,9 @@ with st.expander("Settings (strictness, context limit, top hits)"):
214
  concat_passages = st.radio(
215
  "Concatenate passages as one long context?",
216
  ('yes', 'no'))
 
 
 
217
  support_all = st.radio(
218
  "Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?",
219
  ('no', 'yes'))
@@ -323,10 +326,10 @@ def run_query(query, progress_bar):
323
  progress_bar.progress(50)
324
  if concat_passages == 'yes':
325
  context = '\n---'.join(contexts)
326
- model_results = qa_model(question=query, context=context, top_k=10, doc_stride=512 // 2, max_answer_len=128, max_seq_len=512)
327
  else:
328
  context = ['\n---\n'+ctx for ctx in contexts]
329
- model_results = qa_model(question=[query]*len(contexts), context=context)
330
 
331
  results = []
332
 
 
143
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
144
  question_answerer = pipeline(
145
  "question-answering", model='sultan/BioM-ELECTRA-Large-SQuAD2-BioASQ8B',
146
+ device=device, handle_impossible_answer=False,
147
  )
148
  reranker = CrossEncoder('cross-encoder/ms-marco-MiniLM-L-6-v2', device=device)
149
  # queryexp_tokenizer = AutoTokenizer.from_pretrained("doc2query/all-with_prefix-t5-base-v1")
 
214
  concat_passages = st.radio(
215
  "Concatenate passages as one long context?",
216
  ('yes', 'no'))
217
+ present_impossible = st.radio(
218
+ "Present impossible answers? (if the model thinks its impossible to answer should it still try?)",
219
+ ('no', 'yes'))
220
  support_all = st.radio(
221
  "Use abstracts and titles as a ranking signal (if the words are matched in the abstract then the document is more relevant)?",
222
  ('no', 'yes'))
 
326
  progress_bar.progress(50)
327
  if concat_passages == 'yes':
328
  context = '\n---'.join(contexts)
329
+ model_results = qa_model(question=query, context=context, top_k=10, doc_stride=512 // 2, max_answer_len=128, max_seq_len=512, handle_impossible_answer=present_impossible=='yes')
330
  else:
331
  context = ['\n---\n'+ctx for ctx in contexts]
332
+ model_results = qa_model(question=[query]*len(contexts), context=context, handle_impossible_answer=present_impossible=='yes')
333
 
334
  results = []
335