Spaces:
Runtime error
Runtime error
Commit
·
bf3d25c
1
Parent(s):
394becd
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
import gradio as gr
|
2 |
-
import
|
3 |
import io
|
4 |
-
import
|
|
|
5 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
6 |
|
7 |
# Download and load pre-trained model and tokenizer
|
@@ -9,34 +10,36 @@ model_name = "distilbert-base-cased-distilled-squad"
|
|
9 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
10 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
11 |
|
12 |
-
def
|
13 |
-
#
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
|
|
|
|
|
33 |
|
34 |
inputs = [
|
35 |
gr.inputs.File(label="PDF document"),
|
36 |
-
gr.inputs.Textbox(label="
|
37 |
]
|
38 |
|
39 |
-
outputs = gr.outputs.
|
40 |
|
41 |
-
gr.Interface(fn=
|
42 |
-
description="Upload a PDF document and ask
|
|
|
1 |
import gradio as gr
|
2 |
+
import PyPDF2
|
3 |
import io
|
4 |
+
import requests
|
5 |
+
import torch
|
6 |
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
|
7 |
|
8 |
# Download and load pre-trained model and tokenizer
|
|
|
10 |
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
11 |
model = AutoModelForQuestionAnswering.from_pretrained(model_name)
|
12 |
|
13 |
+
def answer_questions(pdf_file, questions):
|
14 |
+
# Load PDF file and extract text
|
15 |
+
pdf_reader = PyPDF2.PdfFileReader(io.BytesIO(pdf_file.read()))
|
16 |
+
text = ""
|
17 |
+
for i in range(pdf_reader.getNumPages()):
|
18 |
+
page = pdf_reader.getPage(i)
|
19 |
+
text += page.extractText()
|
20 |
+
text = text.strip()
|
21 |
+
|
22 |
+
answers = []
|
23 |
+
for question in questions:
|
24 |
+
# Tokenize question and text
|
25 |
+
input_ids = tokenizer.encode(question, text)
|
26 |
+
|
27 |
+
# Perform question answering
|
28 |
+
outputs = model(torch.tensor([input_ids]), return_dict=True)
|
29 |
+
answer_start = outputs.start_logits.argmax().item()
|
30 |
+
answer_end = outputs.end_logits.argmax().item()
|
31 |
+
answer = tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end+1]))
|
32 |
+
|
33 |
+
answers.append(answer)
|
34 |
+
|
35 |
+
return answers
|
36 |
|
37 |
inputs = [
|
38 |
gr.inputs.File(label="PDF document"),
|
39 |
+
gr.inputs.Textbox(label="Questions (one per line)", type="textarea")
|
40 |
]
|
41 |
|
42 |
+
outputs = gr.outputs.Textarea(label="Answers")
|
43 |
|
44 |
+
gr.Interface(fn=answer_questions, inputs=inputs, outputs=outputs, title="PDF Question Answering Tool",
|
45 |
+
description="Upload a PDF document and ask multiple questions. The app will use a pre-trained model to find the answers.").launch()
|