Spaces:
Runtime error
Runtime error
Commit
Β·
c01bf0a
1
Parent(s):
745ad96
Update app.py
Browse files
app.py
CHANGED
@@ -5,6 +5,7 @@ import gradio as gr
|
|
5 |
import openai
|
6 |
import os
|
7 |
import torch
|
|
|
8 |
|
9 |
#Summarization Fine Tune Model
|
10 |
|
@@ -31,7 +32,7 @@ def sentiment_analysis(text, model_path="leadingbridge/sentiment-analysis", id2l
|
|
31 |
return result
|
32 |
|
33 |
|
34 |
-
# Open AI
|
35 |
openai.api_key = os.environ['openai_api']
|
36 |
|
37 |
def openai_chatbot(prompt):
|
@@ -97,6 +98,18 @@ def chatgpt_clone(input, history):
|
|
97 |
history.append((input, output))
|
98 |
return history, history
|
99 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
100 |
"""# **Gradio Model**"""
|
101 |
|
102 |
# Gradio Output Model
|
@@ -138,6 +151,9 @@ with gr.Blocks() as demo:
|
|
138 |
proceed_button = gr.Button("Summarize")
|
139 |
proceed_button.click(fn=summarize_text, inputs=inputs, outputs=outputs)
|
140 |
gr.Markdown("This model summarizes Chinese text using the MT5 language model. Enter a Chinese text in the input box and click the 'Summarize' button to get the summary.")
|
|
|
|
|
|
|
141 |
gr.Markdown('''
|
142 |
We are happy to share with you some Chinese language models that we've made using NLP. When we looked online, we noticed that there weren't many resources available for Chinese NLP, so we hope that our models can be useful to you.
|
143 |
We want to mention that these models aren't perfect and there is still room for improvement. Because of limited resources, there might be some mistakes or limitations in the models.
|
|
|
5 |
import openai
|
6 |
import os
|
7 |
import torch
|
8 |
+
import pipeline, AutoModelForQuestionAnswering, AutoTokenizer
|
9 |
|
10 |
#Summarization Fine Tune Model
|
11 |
|
|
|
32 |
return result
|
33 |
|
34 |
|
35 |
+
# Open AI Model
|
36 |
openai.api_key = os.environ['openai_api']
|
37 |
|
38 |
def openai_chatbot(prompt):
|
|
|
98 |
history.append((input, output))
|
99 |
return history, history
|
100 |
|
101 |
+
# Pretrained Question Answering Model
|
102 |
+
|
103 |
+
model = AutoModelForQuestionAnswering.from_pretrained('uer/roberta-base-chinese-extractive-qa')
|
104 |
+
tokenizer = AutoTokenizer.from_pretrained('uer/roberta-base-chinese-extractive-qa')
|
105 |
+
QA = pipeline('question-answering', model=model, tokenizer=tokenizer)
|
106 |
+
model.eval()
|
107 |
+
|
108 |
+
def cqa(question,context): #Chinese QA model function
|
109 |
+
QA_input = {'question': question,
|
110 |
+
'context': context}
|
111 |
+
return QA(QA_input)
|
112 |
+
|
113 |
"""# **Gradio Model**"""
|
114 |
|
115 |
# Gradio Output Model
|
|
|
151 |
proceed_button = gr.Button("Summarize")
|
152 |
proceed_button.click(fn=summarize_text, inputs=inputs, outputs=outputs)
|
153 |
gr.Markdown("This model summarizes Chinese text using the MT5 language model. Enter a Chinese text in the input box and click the 'Summarize' button to get the summary.")
|
154 |
+
with gr.Tab("Chinese Q&A"):
|
155 |
+
text_button = gr.Button("proceed")
|
156 |
+
text_button.click(fn=cqa, inputs=[gr.Textbox(lines=1,label="Question Input", placeholder="Enter the question you want to ask"),gr.Textbox(lines=9,label="Answer Soruce", placeholder="Enter the answer source article in here")],outputs=gr.Textbox(label="Answer Output"))
|
157 |
gr.Markdown('''
|
158 |
We are happy to share with you some Chinese language models that we've made using NLP. When we looked online, we noticed that there weren't many resources available for Chinese NLP, so we hope that our models can be useful to you.
|
159 |
We want to mention that these models aren't perfect and there is still room for improvement. Because of limited resources, there might be some mistakes or limitations in the models.
|