File size: 5,020 Bytes
e05abd0
 
 
 
672fa81
edadbff
ffacbee
edadbff
e05abd0
 
 
 
 
 
 
 
 
edadbff
 
2aec54c
edadbff
 
 
 
8310927
edadbff
 
 
 
 
 
 
f6650b3
edadbff
 
 
 
8fccea5
 
edadbff
 
 
 
 
 
 
 
 
 
e05abd0
c14ef23
8151f8b
c14ef23
 
 
d88c117
c14ef23
f6650b3
c14ef23
 
 
 
 
 
8151f8b
 
 
 
3f5673b
8151f8b
f6650b3
8151f8b
 
 
 
 
 
 
edadbff
e05abd0
08f3fa6
82a0bb9
 
edadbff
 
 
3e9caff
edadbff
a1f109b
82a0bb9
d8ebd16
6794548
3e9caff
6794548
82a0bb9
a55dec0
8151f8b
 
 
 
82a0bb9
a55dec0
75dfd0c
8151f8b
 
be2d5d0
6de3b6b
7c3c2eb
6de3b6b
7c3c2eb
51807bd
7c3c2eb
6de3b6b
8fccea5
c14ef23
 
a6eaf64
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from transformers import BertTokenizerFast,TFBertForSequenceClassification,TextClassificationPipeline
import numpy as np
import tensorflow as tf
import gradio as gr 
import openai
import os

# Sentiment Analysis Pre-Trained Model
model_path = "leadingbridge/sentiment-analysis"
tokenizer = BertTokenizerFast.from_pretrained(model_path)
model = TFBertForSequenceClassification.from_pretrained(model_path, id2label={0: 'negative', 1: 'positive'} )

def sentiment_analysis(text):
  pipe = TextClassificationPipeline(model=model, tokenizer=tokenizer)
  result = pipe(text)
  return result


# Open AI Chatbot Model
openai.api_key = os.environ['openai_api']

start_sequence = "\nAI:"
restart_sequence = "\nHuman: "

prompt = "You can discuss any topic with the Chinese Chatbot assistant by typing Chinese in here"

def openai_create(prompt):

    response = openai.Completion.create(
    model="text-davinci-003",
    prompt=prompt,
    temperature=0.9,
    max_tokens=2048,
    top_p=1,
    frequency_penalty=0,
    presence_penalty=0.6,
    stop=[" Human:", " AI:"]
    )

    return response.choices[0].text

def chatgpt_clone(input, history):
    history = history or []
    s = list(sum(history, ()))
    s.append(input)
    inp = ' '.join(s)
    output = openai_create(inp)
    history.append((input, output))
    return history, history


# Open AI Chinese Translation Model
def translate_to_chinese(text_to_translate):
    response = openai.Completion.create(
      model="text-davinci-003",
      prompt=f"Translate this short English sentence into Chinese:\n\n{text_to_translate}\n\n1.",
      temperature=0.3,
      max_tokens=2048,
      top_p=1.0,
      frequency_penalty=0.0,
      presence_penalty=0.0
    )
    return response.choices[0].text.strip()

# Open AI English Translation Model
def translate_to_english(text_to_translate):
    response = openai.Completion.create(
      model="text-davinci-003",
      prompt=f"Translate this short Chinese sentence into English:\n\n{text_to_translate}\n\n1.",
      temperature=0.3,
      max_tokens=2048,
      top_p=1.0,
      frequency_penalty=0.0,
      presence_penalty=0.0
    )
    return response.choices[0].text.strip()


# Gradio Output Model
with gr.Blocks() as demo:
    gr.Markdown('Welcome to the Chinese NLP Demo! Please select a model tab to interact with:')
    with gr.Tab("🗣️Chatbot"):
        gr.Markdown("This is a Chinese chatbot powered by the OpenAI language model. Enter your message below in Chinese and the chatbot will respond.")
        chatbot = gr.Chatbot()
        message = gr.Textbox(placeholder=prompt)
        state = gr.State()
        submit = gr.Button("Send")
        submit.click(chatgpt_clone, inputs=[message, state], outputs=[chatbot, state])
    with gr.Tab("🤗Sentiment Analysis"):
        gr.Markdown("This is a self-trained fine-tuned model using Chinese BERT for sentiment analysis. Enter a sentence in Chinese in the input box and click the 'proceed' button to get the sentiment analysis result.")
        inputs = gr.Textbox(placeholder="Type a Chinese sentence here, either positive or negative in sentiment.")
        outputs = gr.Textbox(label="Sentiment Analysis")
        proceed_button = gr.Button("Proceed")           
        proceed_button.click(fn=sentiment_analysis, inputs=inputs, outputs=outputs)
    with gr.Tab("🀄Chinese Translation"):
        gr.Markdown("This model translate an English sentence to Chinese using the OpenAI engine. Enter an English short sentence in the input box and click the 'Translate' button to get the translation result in Chinese.")
        inputs = gr.Textbox(placeholder="Enter a short English sentence to translate to Chinese here.")
        outputs = gr.Textbox(label="Translation Result")
        proceed_button = gr.Button("Translate")
        proceed_button.click(fn=translate_to_chinese, inputs=inputs, outputs=outputs)
    with gr.Tab("🔤English Translation"):
        gr.Markdown("This model translate a Chinese sentence to English using the OpenAI engine. Enter a Chinese short sentence in the input box and click the 'Translate' button to get the translation result in English.")
        inputs = gr.Textbox(placeholder="Enter a short Chinese sentence to translate to English here.")
        outputs = gr.Textbox(label="Translation Result")
        proceed_button = gr.Button("Translate")
        proceed_button.click(fn=translate_to_english, inputs=inputs, outputs=outputs)
    gr.Markdown('''
    We are happy to share with you some Chinese language models that we've made using NLP. When we looked online, we noticed that there weren't many resources available for Chinese NLP, so we hope that our models can be useful to you.

    We want to mention that these models aren't perfect and there is still room for improvement. Because of limited resources, there might be some mistakes or limitations in the models.

    However, We hope that you find them helpful and that you can help make them even better.
    ''')



demo.launch(inline=False)