Spaces:
Sleeping
Sleeping
File size: 5,706 Bytes
29b6bc4 7eb1fb9 29b6bc4 7eb1fb9 bfc9a54 29b6bc4 7eb1fb9 29b6bc4 cba1cd6 2827a07 7eb1fb9 2827a07 bfc9a54 2827a07 bfc9a54 2827a07 bfc9a54 29b6bc4 7eb1fb9 7d7d5bc 7eb1fb9 3029284 7eb1fb9 5c97131 7eb1fb9 2827a07 7eb1fb9 bfc9a54 2827a07 cba1cd6 2827a07 7eb1fb9 2827a07 29b6bc4 2827a07 bfc9a54 cba1cd6 bfc9a54 2827a07 e0fe578 bfc9a54 cba1cd6 bfc9a54 2827a07 e0fe578 bfc9a54 2827a07 50394da aa7b3a1 4d21101 aa7b3a1 f630f1d 50394da f630f1d efcab21 2827a07 aa7b3a1 f630f1d 2827a07 aa7b3a1 2827a07 aa7b3a1 5b23646 094acf3 f630f1d aa7b3a1 bfc9a54 aa7b3a1 29b6bc4 f630f1d 50394da 9a3513f 50394da c011795 50394da 29b6bc4 7eb1fb9 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 |
import gradio as gr
import requests
import json
import os
API_URL = "https://host.palple.polrambora.com/pmsq"
API_TOKEN = os.getenv("POLLY")
headers = {
"Authorization": f"{API_TOKEN}",
"Content-Type": "application/json",
}
ASSISTANT_PIC_PATH = "https://huggingface.co/spaces/PLRMB/P-MSQ-API-PREVIEW/resolve/main/API.png"
USER_PIC_PATH = "https://huggingface.co/spaces/PLRMB/P-MSQ-API-PREVIEW/resolve/main/usr.png"
def respond(message, history, system_message, max_tokens, top_p, temperature):
messages = []
for user_message, assistant_message, user_profile, assistant_profile, user_pic, assistant_pic in history:
if user_message:
messages.append({
"role": "user",
"content": user_message,
"profile": user_profile,
"picture": user_pic
})
if assistant_message:
messages.append({
"role": "assistant",
"content": assistant_message,
"profile": assistant_profile,
"picture": assistant_pic
})
data = {
"preferences": {
"max_char": max_tokens,
"temperature": temperature,
"top_p": top_p,
"system_message": system_message
},
"conversation_history": messages,
"input": message
}
response = requests.post(API_URL, headers=headers, data=json.dumps(data))
if response.status_code == 200:
response_json = response.json()
assistant_reply = response_json["msq"]["message"][0]
history.append((message, assistant_reply, "You", "P-ALPLE", USER_PIC_PATH, ASSISTANT_PIC_PATH))
return history, assistant_reply
else:
return history, "Error: " + response.json().get("error", "Unknown error occurred.")
def render_message(history):
messages_html = ""
for user_message, assistant_message, user_profile, assistant_profile, user_pic, assistant_pic in history:
if user_message:
messages_html += f"<div style='display: flex; align-items: center; margin-bottom: 10px;'>"
if user_pic:
messages_html += f"<img src='{user_pic}' style='width: 40px; height: 40px; border-radius: 50%; margin-right: 10px;'>"
messages_html += f"{user_message}</div><br>"
if assistant_message:
messages_html += f"<div style='display: flex; align-items: center; margin-bottom: 10px;'>"
if assistant_pic:
messages_html += f"<img src='{assistant_pic}' style='width: 40px; height: 40px; border-radius: 50%; margin-right: 10px;'>"
messages_html += f"{assistant_message}</div><br>"
return messages_html
with gr.Blocks(css=".chatbox {height: 400px; overflow-y: auto; border: 1px solid #ccc; padding: 10px; background-color: #f9f9f9;}") as demo:
gr.Markdown("## P-MSQ Chat Interface")
chatbot_output = gr.HTML(elem_id="chatbox")
msg_input = gr.Textbox(
show_label=False,
placeholder="Type your message and press Enter...",
lines=2,
elem_id="input-text"
)
send_btn = gr.Button("Send")
regen_btn = gr.Button("Clear")
system_message = gr.Textbox(value="You are P-MSQ (Messaging Service Query), a friendly AI Chatbot that can help in any situations.", label="System message")
gr.Markdown("### Settings")
max_tokens = gr.Slider(minimum=1, maximum=2048, value=1024, step=1, label="Max new tokens")
top_p = gr.Slider(minimum=0, maximum=2, value=0.8, step=0.1, label="Top P")
temperature = gr.Slider(minimum=0.1, maximum=1, value=0.7, step=0.1, label="Temperature")
history_state = gr.State([])
last_message_state = gr.State("")
def user_interaction(message, history, system_message, max_tokens, top_p, temperature):
history, assistant_reply = respond(message, history, system_message, max_tokens, top_p, temperature)
return render_message(history), history, "", message
def regenerate_response(history, last_message, system_message, max_tokens, top_p, temperature):
return "", []
msg_input.submit(user_interaction,
inputs=[msg_input, history_state, system_message, max_tokens, top_p, temperature],
outputs=[chatbot_output, history_state, msg_input, last_message_state])
send_btn.click(user_interaction,
inputs=[msg_input, history_state, system_message, max_tokens, top_p, temperature],
outputs=[chatbot_output, history_state, msg_input, last_message_state])
regen_btn.click(regenerate_response,
inputs=[history_state, last_message_state, system_message, max_tokens, top_p, temperature],
outputs=[chatbot_output, history_state])
with gr.Row():
send_btn
regen_btn
gr.HTML("""
<style>
#chatbox {
max-height: 400px;
overflow-y: auto;
border: 1px solid #ccc;
background-color: #242424;
padding: 10px;
}
#input-text {
width: 100%;
box-sizing: border-box;
}
.gr-button {
margin: 5px;
padding: 8px 16px;
font-size: 14px;
}
.gr-row {
justify-content: flex-end;
}
</style>
<script>
const chatbox = document.getElementById('chatbox');
function scrollToBottom() {
chatbox.scrollTop = chatbox.scrollHeight;
}
function handleNewMessage() {
setTimeout(scrollToBottom, 50);
}
window.addEventListener('message', handleNewMessage);
</script>
""")
if __name__ == "__main__":
demo.launch()
|