legacy107 commited on
Commit
70cdcf7
·
1 Parent(s): e6f124d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -3
app.py CHANGED
@@ -1,16 +1,20 @@
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import torch
 
4
 
5
  # Load your fine-tuned model and tokenizer
6
  model_name = "legacy107/flan-t5-large-bottleneck-adapter-cpgQA-unique"
7
  tokenizer = AutoTokenizer.from_pretrained(model_name)
8
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
9
  model.set_active_adapters("question_answering")
10
-
11
  max_length = 512
12
  max_target_length = 128
13
 
 
 
 
 
14
  # Define your function to generate answers
15
  def generate_answer(question, context):
16
  # Combine question and context
@@ -22,7 +26,7 @@ def generate_answer(question, context):
22
  return_tensors="pt",
23
  padding="max_length",
24
  truncation=True,
25
- max_length=512,
26
  ).input_ids
27
 
28
  # Generate the answer
@@ -34,6 +38,17 @@ def generate_answer(question, context):
34
 
35
  return generated_answer
36
 
 
 
 
 
 
 
 
 
 
 
 
37
  # Create a Gradio interface
38
  iface = gr.Interface(
39
  fn=generate_answer,
@@ -41,7 +56,8 @@ iface = gr.Interface(
41
  gr.inputs.Textbox(label="Question"),
42
  gr.inputs.Textbox(label="Context")
43
  ],
44
- outputs=gr.outputs.Textbox(label="Generated Answer")
 
45
  )
46
 
47
  # Launch the Gradio interface
 
1
  import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
3
  import torch
4
+ import datasets
5
 
6
  # Load your fine-tuned model and tokenizer
7
  model_name = "legacy107/flan-t5-large-bottleneck-adapter-cpgQA-unique"
8
  tokenizer = AutoTokenizer.from_pretrained(model_name)
9
  model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
10
  model.set_active_adapters("question_answering")
 
11
  max_length = 512
12
  max_target_length = 128
13
 
14
+ # Load your dataset
15
+ dataset = datasets.load_dataset("minh21/cpgQA-v1.0-unique-context-test-10-percent", split="test[:10]")
16
+
17
+
18
  # Define your function to generate answers
19
  def generate_answer(question, context):
20
  # Combine question and context
 
26
  return_tensors="pt",
27
  padding="max_length",
28
  truncation=True,
29
+ max_length=max_length,
30
  ).input_ids
31
 
32
  # Generate the answer
 
38
 
39
  return generated_answer
40
 
41
+
42
+ # Define a function to list examples from the dataset
43
+ def list_examples():
44
+ examples = []
45
+ for example in dataset:
46
+ context = example["context"]
47
+ question = example["question"]
48
+ examples.append([context, question])
49
+ return examples
50
+
51
+
52
  # Create a Gradio interface
53
  iface = gr.Interface(
54
  fn=generate_answer,
 
56
  gr.inputs.Textbox(label="Question"),
57
  gr.inputs.Textbox(label="Context")
58
  ],
59
+ outputs=gr.outputs.Textbox(label="Generated Answer"),
60
+ examples=list_examples()
61
  )
62
 
63
  # Launch the Gradio interface