Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -11,32 +11,32 @@ top_k = 10
|
|
11 |
model_info = {
|
12 |
"single_conversation_withGPTdata_bs256": {
|
13 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withGPTdata_bs256.pt',
|
14 |
-
'description': "
|
15 |
'logo': 'π§'
|
16 |
},
|
17 |
"single_conversation_withGPTdata_withoutemotion": {
|
18 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withGPTdata_withoutemotion.pt',
|
19 |
-
'description': "
|
20 |
'logo': 'π§'
|
21 |
},
|
22 |
"single_conversation_withcontext": {
|
23 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withcontext.pt',
|
24 |
-
'description': "Trained on Facebook Emotion Dialogues dataset with context included for improved conversational understanding.",
|
25 |
'logo': 'πΉ'
|
26 |
},
|
27 |
"single_conversation_withemotion": {
|
28 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withemotion.pt',
|
29 |
-
'description': "Trained on Facebook Emotion Dialogues dataset, retaining emotion annotations for each conversation.",
|
30 |
'logo': 'πΊ'
|
31 |
},
|
32 |
"single_conversation_withoutemotion": {
|
33 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withoutemotion.pt',
|
34 |
-
'description': "Trained on Facebook Emotion Dialogues dataset, excluding emotion annotations for simpler conversations.",
|
35 |
'logo': 'π·'
|
36 |
},
|
37 |
"whole_conversation_withoutemotion": {
|
38 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/wholeConversation_withoutemotion.pt',
|
39 |
-
'description': "Trained on entire conversations from the Facebook Emotion Dialogues dataset, excluding tags other than <bot> and <human
|
40 |
'logo': 'π΅'
|
41 |
}
|
42 |
}
|
@@ -54,7 +54,7 @@ encode = lambda s: enc.encode(s, allowed_special={"<|endoftext|>"})
|
|
54 |
decode = lambda l: enc.decode(l)
|
55 |
|
56 |
|
57 |
-
def predict(input: str, history: list = None) -> tuple:
|
58 |
if history is None:
|
59 |
history = [] # Initialize history if not provided
|
60 |
# Generate a response using the respond function
|
@@ -70,10 +70,12 @@ def predict(input: str, history: list = None) -> tuple:
|
|
70 |
top_k=top_k,
|
71 |
)
|
72 |
|
73 |
-
response = response_data[1]
|
|
|
|
|
74 |
history.append((input, response)) # Append the user input and bot response to history
|
75 |
|
76 |
-
return history, history # Return updated history twice (for chatbot and state)
|
77 |
|
78 |
def prepare_model(selected_model):
|
79 |
global selected_model_name
|
@@ -85,9 +87,20 @@ def prepare_model(selected_model):
|
|
85 |
description = model_info[selected_model]['description']
|
86 |
return f"## {logo}Model Information\n<br>Model_name: {selected_model}\n<br>Description: {description}"
|
87 |
|
88 |
-
|
89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
90 |
|
|
|
|
|
|
|
|
|
91 |
with app:
|
92 |
gr.Markdown("# π« Chatbot for ML Project\n### π€ Chat with your chatbot!")
|
93 |
# Model Parameters interface
|
@@ -100,18 +113,35 @@ with app:
|
|
100 |
inp.change(prepare_model, inp, out)
|
101 |
|
102 |
# Chatbot interface
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
103 |
chat_interface = gr.Interface(
|
104 |
fn=predict,
|
105 |
inputs=[
|
106 |
-
|
107 |
-
|
|
|
|
|
108 |
],
|
109 |
outputs=[
|
110 |
-
|
111 |
-
|
|
|
112 |
],
|
113 |
description="Your AI-based chatbot powered by selected models!"
|
114 |
)
|
|
|
|
|
|
|
115 |
#TODO: add emotion/context here
|
116 |
if __name__ == "__main__":
|
117 |
app.launch(share=True)
|
|
|
11 |
model_info = {
|
12 |
"single_conversation_withGPTdata_bs256": {
|
13 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withGPTdata_bs256.pt',
|
14 |
+
'description': " We trained this model on on Facebook Emotion Dialogues dataset with additional GPT data, using a batch size of 256.",
|
15 |
'logo': 'π§'
|
16 |
},
|
17 |
"single_conversation_withGPTdata_withoutemotion": {
|
18 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withGPTdata_withoutemotion.pt',
|
19 |
+
'description': " We trained this model on Facebook Emotion Dialogues dataset with GPT data, excluding emotion tag, using a default batch size of 64.",
|
20 |
'logo': 'π§'
|
21 |
},
|
22 |
"single_conversation_withcontext": {
|
23 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withcontext.pt',
|
24 |
+
'description': "Trained on Facebook Emotion Dialogues dataset with context included for improved conversational understanding, using a default batch size of 64.",
|
25 |
'logo': 'πΉ'
|
26 |
},
|
27 |
"single_conversation_withemotion": {
|
28 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withemotion.pt',
|
29 |
+
'description': "Trained on Facebook Emotion Dialogues dataset, retaining emotion annotations for each conversation, using a default batch size of 64.",
|
30 |
'logo': 'πΊ'
|
31 |
},
|
32 |
"single_conversation_withoutemotion": {
|
33 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/singleConversation_withoutemotion.pt',
|
34 |
+
'description': "Trained on Facebook Emotion Dialogues dataset, excluding emotion annotations for simpler conversations, using a default batch size of 64.",
|
35 |
'logo': 'π·'
|
36 |
},
|
37 |
"whole_conversation_withoutemotion": {
|
38 |
'url': 'https://huggingface.co/HannahLin271/NanoGPT/resolve/main/wholeConversation_withoutemotion.pt',
|
39 |
+
'description': "Trained on entire conversations from the Facebook Emotion Dialogues dataset, excluding tags other than <bot> and <human>,, using a default batch size of 64",
|
40 |
'logo': 'π΅'
|
41 |
}
|
42 |
}
|
|
|
54 |
decode = lambda l: enc.decode(l)
|
55 |
|
56 |
|
57 |
+
def predict(input_hints, input: str, history: list = None) -> tuple:
|
58 |
if history is None:
|
59 |
history = [] # Initialize history if not provided
|
60 |
# Generate a response using the respond function
|
|
|
70 |
top_k=top_k,
|
71 |
)
|
72 |
|
73 |
+
response = response_data[1]
|
74 |
+
full_output = response_data[2]
|
75 |
+
print(f"full_output: {full_output}")
|
76 |
history.append((input, response)) # Append the user input and bot response to history
|
77 |
|
78 |
+
return history, history, full_output # Return updated history twice (for chatbot and state)
|
79 |
|
80 |
def prepare_model(selected_model):
|
81 |
global selected_model_name
|
|
|
87 |
description = model_info[selected_model]['description']
|
88 |
return f"## {logo}Model Information\n<br>Model_name: {selected_model}\n<br>Description: {description}"
|
89 |
|
90 |
+
def update_chat_with_model_selection(model, chat_history):
|
91 |
+
# Add a message about the selected model
|
92 |
+
if chat_history is None:
|
93 |
+
chat_history = []
|
94 |
+
chat_history.append(
|
95 |
+
(None,
|
96 |
+
f'<span style="background-color: #FFD700; padding: 4px; border-radius: 4px;">Now you are chatting with <strong>{model}</strong></span>')
|
97 |
+
)
|
98 |
+
return chat_history
|
99 |
|
100 |
+
|
101 |
+
default_model_info = f"## πModel Information\n<br>Model_name: single_conversation_withGPTdata_bs256\n<br>Description: We trained this model on Facebook Emotion Dialogues dataset with additional GPT data, using a batch size of 256."
|
102 |
+
app = gr.Blocks()
|
103 |
+
full_output = " "
|
104 |
with app:
|
105 |
gr.Markdown("# π« Chatbot for ML Project\n### π€ Chat with your chatbot!")
|
106 |
# Model Parameters interface
|
|
|
113 |
inp.change(prepare_model, inp, out)
|
114 |
|
115 |
# Chatbot interface
|
116 |
+
chatbot = gr.Chatbot(
|
117 |
+
label="Chatbot Response",
|
118 |
+
avatar_images=(
|
119 |
+
None, # User avatar (None for default)
|
120 |
+
"https://em-content.zobj.net/source/twitter/376/hugging-face_1f917.png" # Assistant avatar
|
121 |
+
)
|
122 |
+
)
|
123 |
+
user_input = gr.Textbox(lines=2, placeholder="Enter your message here...", label="User Input")
|
124 |
+
state = gr.State([])
|
125 |
+
debug_result = gr.Textbox(label="Debug: Full model output",value=full_output)
|
126 |
+
input_hints = gr.Markdown("## π Input Hints\n<br>1. Select a model from the dropdown list. \n<br> 2. Type your message in the text box, please try to input a complete sentence.\n<br> 3. Fill the [form](https://forms.office.com/e/PuTy4jrcQD) here to help us evaluate the model")
|
127 |
chat_interface = gr.Interface(
|
128 |
fn=predict,
|
129 |
inputs=[
|
130 |
+
input_hints,
|
131 |
+
user_input,
|
132 |
+
state, # Maintain conversation state
|
133 |
+
|
134 |
],
|
135 |
outputs=[
|
136 |
+
chatbot,
|
137 |
+
state,
|
138 |
+
debug_result
|
139 |
],
|
140 |
description="Your AI-based chatbot powered by selected models!"
|
141 |
)
|
142 |
+
|
143 |
+
inp.change(fn=update_chat_with_model_selection, inputs=[inp, state], outputs=[chatbot])
|
144 |
+
|
145 |
#TODO: add emotion/context here
|
146 |
if __name__ == "__main__":
|
147 |
app.launch(share=True)
|