Spaces:
Sleeping
Sleeping
Commit
·
79b9061
1
Parent(s):
92e2176
Update app.py
Browse files
app.py
CHANGED
@@ -91,13 +91,31 @@ def generate(starting_text):
|
|
91 |
# grad.Interface(generate, inputs=txt, outputs=out).launch()
|
92 |
|
93 |
#DistlGPT2
|
94 |
-
from transformers import pipeline, set_seed
|
95 |
-
import gradio as grad
|
96 |
-
gpt2_pipe = pipeline('text-generation', model='distilgpt2')
|
97 |
-
set_seed(42)
|
98 |
def generateDistlGPT2(starting_text):
|
99 |
response= gpt2_pipe(starting_text, max_length=20, num_return_sequences=5)
|
100 |
return response
|
101 |
-
txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
|
102 |
-
out=grad.Textbox(lines=1, label="Generated Text")
|
103 |
-
grad.Interface(generateDistlGPT2, inputs=txt, outputs=out).launch()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
91 |
# grad.Interface(generate, inputs=txt, outputs=out).launch()
|
92 |
|
93 |
#DistlGPT2
|
94 |
+
# from transformers import pipeline, set_seed
|
95 |
+
# import gradio as grad
|
96 |
+
# gpt2_pipe = pipeline('text-generation', model='distilgpt2')
|
97 |
+
# set_seed(42)
|
98 |
def generateDistlGPT2(starting_text):
|
99 |
response= gpt2_pipe(starting_text, max_length=20, num_return_sequences=5)
|
100 |
return response
|
101 |
+
# txt=grad.Textbox(lines=1, label="English", placeholder="English Text here")
|
102 |
+
# out=grad.Textbox(lines=1, label="Generated Text")
|
103 |
+
# grad.Interface(generateDistlGPT2, inputs=txt, outputs=out).launch()
|
104 |
+
|
105 |
+
#Text Generation
|
106 |
+
from transformers import AutoModelWithLMHead, AutoTokenizer
|
107 |
+
import gradio as grad
|
108 |
+
text2text_tkn = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
109 |
+
mdl = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-question-generation-ap")
|
110 |
+
def text2text(context,answer):
|
111 |
+
input_text = "answer: %s context: %s </s>" % (answer, context)
|
112 |
+
features = text2text_tkn ([input_text], return_tensors='pt')
|
113 |
+
output = mdl.generate(input_ids=features['input_ids'],
|
114 |
+
attention_mask=features['attention_mask'],
|
115 |
+
max_length=64)
|
116 |
+
response=text2text_tkn.decode(output[0])
|
117 |
+
return response
|
118 |
+
context=grad.Textbox(lines=10, label="English", placeholder="Context")
|
119 |
+
ans=grad.Textbox(lines=1, label="Answer")
|
120 |
+
out=grad.Textbox(lines=1, label="Genereated Question")
|
121 |
+
grad.Interface(text2text, inputs=[context,ans], outputs=out).launch()
|