File size: 2,005 Bytes
2a2c864
b8db52d
 
6da417a
2a2c864
 
 
 
33ff5cc
 
 
 
6da417a
33ff5cc
 
debe187
cc29eef
 
08009f0
 
 
b8db52d
8cea305
debe187
8cea305
08009f0
debe187
 
8cea305
 
33ff5cc
08009f0
cc29eef
 
08009f0
cc29eef
6da417a
33ff5cc
 
cc29eef
08009f0
33ff5cc
 
 
b8db52d
33ff5cc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
import gradio as gr
from transformers import pipeline

qa_pipeline = pipeline(task="question-answering",model="Intel/bert-base-uncased-squadv1.1-sparse-80-1x4-block-pruneofa")

def greet(name):
    return "Hello " + name + "!!"

def predict(context="There are seven continents in the world.",question="How many continents are there in the world?"):
    '''
    Sample prediction should return a dictionary of the form:
    {'score': 0.9376363158226013, 'start': 10, 'end': 15, 'answer': 'seven'}
    
    '''
    predictions = qa_pipeline(context=context,question=question)
    print(f'predictions={predictions}')
    score = predictions['score']
    answer = predictions['answer']
    start = predictions['start']
    end = predictions['end']
    return score,answer,start

md = """
If you came looking for chatGPT, sorry to disappoint, but this is different. This prediction model is designed to answer a question about a text. It is designed to do reading comprehension. The model does not just answer questions in general -- it only works from the text that you provide. However, accomplishing accurate reading comprehension can be a very valuable task, especially if you are attempting to get quick answers from a large (and maybe boring!) document.

The model is based on the Zafrir et al. (2021) paper: [Prune Once for All: Sparse Pre-Trained Language Models](https://arxiv.org/abs/2111.05754) paper.
Training dataset: SQuADv1.1, based on the Rajpurkar et al. (2016) paper: [SQuAD: 100,000+ Questions for Machine Comprehension of Text](https://aclanthology.org/D16-1264/)

"""

# predict()
context=gr.Text(lines=10,label="Context")
question=gr.Text(label="Question")
score=gr.Text(label="Score")
start=gr.Text(label="Answer found at character")
answer=gr.Text(label="Answer")

iface = gr.Interface(
    fn=predict, 
    inputs=[context,question],
    outputs=[score,start,answer],
    title = "Question & Answer with Sparse BERT using the SQuAD dataset",
    description = md
    )

iface.launch()