legacy107 commited on
Commit
29475be
·
1 Parent(s): 45ff783

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -4
app.py CHANGED
@@ -33,7 +33,7 @@ max_target_length = 200
33
  # Load your dataset
34
  dataset = datasets.load_dataset("minh21/COVID-QA-Chunk-64-testset-biencoder-data-90_10", split="train")
35
  dataset = dataset.shuffle()
36
- dataset = dataset.select(range(10))
37
 
38
  # Context chunking
39
  min_sentences_per_chunk = 3
@@ -145,6 +145,7 @@ def retrieve_context(question, contexts):
145
  def generate_answer(question, context, ground, do_pretrained, do_natural, do_pretrained_natural):
146
  contexts = chunk_splitter(clean_data(context))
147
  context = retrieve_context(question, contexts)
 
148
 
149
  # Combine question and context
150
  input_text = f"question: {question} context: {context}"
@@ -182,7 +183,7 @@ def generate_answer(question, context, ground, do_pretrained, do_natural, do_pre
182
  if do_pretrained_natural:
183
  pretrained_paraphrased_answer = paraphrase_answer(question, generated_answer, True)
184
 
185
- return generated_answer, context, paraphrased_answer, pretrained_answer, pretrained_paraphrased_answer
186
 
187
 
188
  # Define a function to list examples from the dataset
@@ -209,10 +210,11 @@ iface = gr.Interface(
209
  ],
210
  outputs=[
211
  Textbox(label="Generated Answer"),
212
- Textbox(label="Retrieved Context"),
213
  Textbox(label="Natural Answer"),
 
214
  Textbox(label="Pretrained Model's Answer"),
215
- Textbox(label="Pretrained Model's Natural Answer")
 
216
  ],
217
  examples=list_examples(),
218
  examples_per_page=1,
 
33
  # Load your dataset
34
  dataset = datasets.load_dataset("minh21/COVID-QA-Chunk-64-testset-biencoder-data-90_10", split="train")
35
  dataset = dataset.shuffle()
36
+ dataset = dataset.select([6, 18, 24, 156, 650, 19, 31, 97, 133, 183])
37
 
38
  # Context chunking
39
  min_sentences_per_chunk = 3
 
145
  def generate_answer(question, context, ground, do_pretrained, do_natural, do_pretrained_natural):
146
  contexts = chunk_splitter(clean_data(context))
147
  context = retrieve_context(question, contexts)
148
+ ground_in_context = ground in context
149
 
150
  # Combine question and context
151
  input_text = f"question: {question} context: {context}"
 
183
  if do_pretrained_natural:
184
  pretrained_paraphrased_answer = paraphrase_answer(question, generated_answer, True)
185
 
186
+ return generated_answer, paraphrased_answer, ground_in_context, pretrained_answer, pretrained_paraphrased_answer, context
187
 
188
 
189
  # Define a function to list examples from the dataset
 
210
  ],
211
  outputs=[
212
  Textbox(label="Generated Answer"),
 
213
  Textbox(label="Natural Answer"),
214
+ Checkbox(label="Ground truth in the retrieved context"),
215
  Textbox(label="Pretrained Model's Answer"),
216
+ Textbox(label="Pretrained Model's Natural Answer"),
217
+ Textbox(label="Retrieved Context")
218
  ],
219
  examples=list_examples(),
220
  examples_per_page=1,