elapt1c commited on
Commit
674523c
·
verified ·
1 Parent(s): 038814d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +105 -0
app.py CHANGED
@@ -0,0 +1,105 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import TFAutoModelForCausalLM, AutoTokenizer
2
+ import tensorflow as tf
3
+ import gradio as gr
4
+ from transformers import TFAutoModelForSequenceClassification
5
+ from transformers import AutoTokenizer
6
+ import plotly.express as px
7
+ import plotly.io as pio
8
+
9
+ # configuration params
10
+ pio.templates.default = "plotly_dark"
11
+
12
+ # setting up the text in the page
13
+ TITLE = "<center><h1>Talk with an AI</h1></center>"
14
+ DESCRIPTION = r"""<center>This application allows you to talk with a machine/robot with state-of-the-art technology!!<br>
15
+ In the back-end is using the Elapt1c/ElapticAI-1a model. One of the best models in text generation and comprehension.<br>
16
+ Language processing is done using RoBERTa for sentiment-analysis and spaCy for named-entity recognition and dependency plotting.<br>
17
+ The AI thinks he is a human, so please treat him as such, else he migh get angry!<br>
18
+ """
19
+ EXAMPLES = [
20
+ ["What is your favorite videogame?"],
21
+ ["What gets you really sad?"],
22
+ ["How can I make you really angry? "],
23
+ ["What do you do for work?"],
24
+ ["What are your hobbies?"],
25
+ ["What is your favorite food?"],
26
+ ]
27
+ ARTICLE = r"""<center>
28
+ Done by dr. Gabriel Lopez<br>
29
+ For more please visit: <a href='https://sites.google.com/view/dr-gabriel-lopez/home'>My Page</a><br>
30
+ For info about the chat-bot model can also see the <a href="https://arxiv.org/abs/1911.00536">ArXiv paper</a><br>
31
+ </center>"""
32
+
33
+ # Loading necessary NLP models
34
+ # dialog
35
+ checkpoint = "elapt1c/ElapticAI-1a" # tf
36
+ model_gtp2 = TFAutoModelForCausalLM.from_pretrained(checkpoint)
37
+ tokenizer_gtp2 = AutoTokenizer.from_pretrained(checkpoint)
38
+ # sentiment
39
+ checkpoint = f"cardiffnlp/twitter-roberta-base-emotion"
40
+ model_roberta = TFAutoModelForSequenceClassification.from_pretrained(checkpoint)
41
+ tokenizer_roberta = AutoTokenizer.from_pretrained(checkpoint)
42
+ # NER & Dependency
43
+ nlp = spacy.load("en_core_web_sm")
44
+
45
+ # test-to-test : chatting function -- GPT2
46
+ def chat_with_bot(user_input, chat_history_and_input=[]):
47
+ """Text generation using GPT2"""
48
+ emb_user_input = tokenizer_gtp2.encode(
49
+ user_input + tokenizer_gtp2.eos_token, return_tensors="tf"
50
+ )
51
+ if chat_history_and_input == []:
52
+ bot_input_ids = emb_user_input # first iteration
53
+ else:
54
+ bot_input_ids = tf.concat(
55
+ [chat_history_and_input, emb_user_input], axis=-1
56
+ ) # other iterations
57
+ chat_history_and_input = model_gtp2.generate(
58
+ bot_input_ids, max_length=1000, pad_token_id=tokenizer_gtp2.eos_token_id
59
+ ).numpy()
60
+ # print
61
+ bot_response = tokenizer_gtp2.decode(
62
+ chat_history_and_input[:, bot_input_ids.shape[-1] :][0],
63
+ skip_special_tokens=True,
64
+ )
65
+ return bot_response, chat_history_and_input
66
+
67
+
68
+ # text-to-sentiment
69
+ def text_to_sentiment(text_input):
70
+ """Sentiment analysis using RoBERTa"""
71
+ labels = ["anger", "joy", "optimism", "sadness"]
72
+ encoded_input = tokenizer_roberta(text_input, return_tensors="tf")
73
+ output = model_roberta(encoded_input)
74
+ scores = output[0][0].numpy()
75
+ scores = softmax(scores)
76
+ return px.histogram(x=labels, y=scores, height=200)
77
+
78
+
79
+ # text_to_semantics
80
+ def text_to_semantics(text_input):
81
+ """NER and Dependency plot using Spacy"""
82
+ processed_text = nlp(text_input)
83
+ # Dependency
84
+ html_dep = displacy.render(
85
+ processed_text,
86
+ style="dep",
87
+ options={"compact": True, "color": "white", "bg": "light-black"},
88
+ page=False,
89
+ )
90
+ html_dep = "" + html_dep + ""
91
+ # NER
92
+ pos_tokens = []
93
+ for token in processed_text:
94
+ pos_tokens.extend([(token.text, token.pos_), (" ", None)])
95
+ # html_ner = ("" + html_ner + "")s
96
+ return pos_tokens, html_dep
97
+
98
+
99
+ # gradio interface
100
+ blocks = gr.Blocks()
101
+ with blocks:
102
+ # physical elements
103
+ session_state = gr.State([])
104
+ gr.Markdown(TITLE)
105
+ gr.Mark