abhilash1910 commited on
Commit
e3d993b
·
1 Parent(s): 49e0baa

Initial Commit

Browse files
Files changed (1) hide show
  1. app.py +37 -3
app.py CHANGED
@@ -1,12 +1,46 @@
1
-
2
  from transformers import AutoTokenizer,AutoModelForQuestionAnswering
3
- from transformers import pipeline
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
  model=AutoModelForQuestionAnswering.from_pretrained('abhilash1910/albert-squad-v2')
5
  tokenizer=AutoTokenizer.from_pretrained('abhilash1910/albert-squad-v2')
 
6
  nlp_QA=pipeline('question-answering',model=model,tokenizer=tokenizer)
7
  QA_inp={
8
  'question': 'How many parameters does Bert large have?',
9
  'context': 'Bert large is really big... it has 24 layers, for a total of 340M parameters.Altogether it is 1.34 GB so expect it to take a couple minutes to download to your Colab instance.'
10
  }
 
11
  result=nlp_QA(QA_inp)
12
- result
 
 
 
 
 
1
+ import gradio as gr
2
  from transformers import AutoTokenizer,AutoModelForQuestionAnswering
3
+ import torch
4
+
5
+ def inference(question,context):
6
+ question_first=bool(tokenizer.padding_side=='right')
7
+ max_answer_len=5
8
+ encoded_text=tokenizer.encode_plus(question,context,padding='longest',
9
+ truncation="longest_first" ,
10
+ max_length=512,
11
+ stride=30,
12
+ return_tensors="pt",
13
+ return_token_type_ids=False,
14
+ return_overflowing_tokens=False,
15
+ return_offsets_mapping=False,
16
+ return_special_tokens_mask=False)
17
+ input_ids=encoded_text['input_ids'].tolist()[0]
18
+ tokens=tokenizer.convert_ids_to_tokens(input_ids)
19
+ with torch.no_grad():
20
+ outputs=model(**encoded_text)
21
+ # answer_st=outputs.start_logits
22
+ # answer_et=outputs.end_logits
23
+ start_,end_=outputs[:2]
24
+ answer_start=torch.argmax(start_)
25
+ answer_end=torch.argmax(end_)+1
26
+ answer=tokenizer.convert_tokens_to_string(tokenizer.convert_ids_to_tokens(input_ids[answer_start:answer_end]))
27
+ print(answer)
28
+
29
+
30
+
31
+
32
  model=AutoModelForQuestionAnswering.from_pretrained('abhilash1910/albert-squad-v2')
33
  tokenizer=AutoTokenizer.from_pretrained('abhilash1910/albert-squad-v2')
34
+ '''
35
  nlp_QA=pipeline('question-answering',model=model,tokenizer=tokenizer)
36
  QA_inp={
37
  'question': 'How many parameters does Bert large have?',
38
  'context': 'Bert large is really big... it has 24 layers, for a total of 340M parameters.Altogether it is 1.34 GB so expect it to take a couple minutes to download to your Colab instance.'
39
  }
40
+
41
  result=nlp_QA(QA_inp)
42
+ '''
43
+ question='How many parameters does Bert large have?'
44
+ context='Bert large is really big... it has 24 layers, for a total of 340M parameters.Altogether it is 1.34 GB so expect it to take a couple minutes to download to your Colab instance.'
45
+ title = 'Question Answering demo with Albert QA transformer and gradio'
46
+ interface = gr.Interface.from_pipeline(inference,title = title,theme = "peach",examples = [[context, question]]).launch()