Spaces:
Sleeping
Sleeping
flyboytarantino14
commited on
Commit
·
6f4a27c
1
Parent(s):
ac436d4
Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import os
|
2 |
import gradio as gr
|
3 |
|
@@ -13,22 +14,14 @@ def get_question(context, answer):
|
|
13 |
#encoding = question_tokenizer.encode_plus(text, max_length=max_len, padding='max_length', truncation=True, return_tensors="pt")
|
14 |
encoding = question_tokenizer.encode_plus(text, return_tensors="pt")
|
15 |
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
|
16 |
-
#outs = question_model.generate(input_ids=input_ids,
|
17 |
-
# attention_mask=attention_mask,
|
18 |
-
# early_stopping=True,
|
19 |
-
# num_beams=3, # Use fewer beams to generate fewer but higher-quality questions
|
20 |
-
# num_return_sequences=3,
|
21 |
-
# no_repeat_ngram_size=3, # Allow some repetition to avoid generating nonsensical questions
|
22 |
-
# max_length=256) # Use a shorter max length to focus on generating more relevant questions
|
23 |
-
|
24 |
outs = question_model.generate(input_ids=input_ids,
|
25 |
-
|
26 |
-
|
27 |
-
|
28 |
-
|
29 |
-
|
|
|
30 |
|
31 |
-
|
32 |
dec = [question_tokenizer.decode(ids) for ids in outs]
|
33 |
questions = ""
|
34 |
for i, question in enumerate(dec):
|
@@ -49,4 +42,46 @@ interface = gr.Interface(
|
|
49 |
outputs=output_question
|
50 |
)
|
51 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
52 |
interface.launch()
|
|
|
1 |
+
"""
|
2 |
import os
|
3 |
import gradio as gr
|
4 |
|
|
|
14 |
#encoding = question_tokenizer.encode_plus(text, max_length=max_len, padding='max_length', truncation=True, return_tensors="pt")
|
15 |
encoding = question_tokenizer.encode_plus(text, return_tensors="pt")
|
16 |
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
17 |
outs = question_model.generate(input_ids=input_ids,
|
18 |
+
attention_mask=attention_mask,
|
19 |
+
early_stopping=True,
|
20 |
+
num_beams=3, # Use fewer beams to generate fewer but higher-quality questions
|
21 |
+
num_return_sequences=3,
|
22 |
+
no_repeat_ngram_size=3, # Allow some repetition to avoid generating nonsensical questions
|
23 |
+
max_length=256) # Use a shorter max length to focus on generating more relevant questions
|
24 |
|
|
|
25 |
dec = [question_tokenizer.decode(ids) for ids in outs]
|
26 |
questions = ""
|
27 |
for i, question in enumerate(dec):
|
|
|
42 |
outputs=output_question
|
43 |
)
|
44 |
|
45 |
+
interface.launch()
|
46 |
+
"""
|
47 |
+
|
48 |
+
import gradio as gr
|
49 |
+
from transformers import T5ForConditionalGeneration,T5Tokenizer
|
50 |
+
|
51 |
+
question_model = T5ForConditionalGeneration.from_pretrained('ramsrigouthamg/t5_squad_v1')
|
52 |
+
question_tokenizer = T5Tokenizer.from_pretrained('t5-base')
|
53 |
+
|
54 |
+
def get_question(sentence,answer):
|
55 |
+
text = "context: {} answer: {} </s>".format(sentence,answer)
|
56 |
+
print (text)
|
57 |
+
max_len = 256
|
58 |
+
encoding = question_tokenizer.encode_plus(text,max_length=max_len, pad_to_max_length=True, return_tensors="pt")
|
59 |
+
|
60 |
+
input_ids, attention_mask = encoding["input_ids"], encoding["attention_mask"]
|
61 |
+
|
62 |
+
outs = question_model.generate(input_ids=input_ids,
|
63 |
+
attention_mask=attention_mask,
|
64 |
+
early_stopping=True,
|
65 |
+
num_beams=5,
|
66 |
+
num_return_sequences=1,
|
67 |
+
no_repeat_ngram_size=2,
|
68 |
+
max_length=200)
|
69 |
+
|
70 |
+
|
71 |
+
dec = [question_tokenizer.decode(ids) for ids in outs]
|
72 |
+
|
73 |
+
Question = dec[0].replace("question:","")
|
74 |
+
Question= Question.strip()
|
75 |
+
return Question
|
76 |
+
|
77 |
+
input_context = gr.Textbox()
|
78 |
+
input_answer = gr.Textbox()
|
79 |
+
output_question = gr.Textbox()
|
80 |
+
|
81 |
+
interface = gr.Interface(
|
82 |
+
fn=get_question,
|
83 |
+
inputs=[input_context, input_answer],
|
84 |
+
outputs=output_question
|
85 |
+
)
|
86 |
+
|
87 |
interface.launch()
|