a / app.py
karrrr123456's picture
Update app.py
a27bd89 verified
import gradio as gr
from huggingface_hub import InferenceClient
import uuid
# Hugging Face inference client
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(message, history, system_message, max_tokens, temperature, top_p):
messages = [{"role": "system", "content": system_message}]
for user_msg, bot_msg in history:
if user_msg:
messages.append({"role": "user", "content": user_msg})
if bot_msg:
messages.append({"role": "assistant", "content": bot_msg})
messages.append({"role": "user", "content": message})
response = ""
for msg in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = msg.choices[0].delta.content
response += token
yield response
# ChatGPT-style UI with modern styling
css = """
body {
background-color: #121212;
color: white;
font-family: Arial, sans-serif;
}
.gradio-container {
max-width: 900px;
margin: auto;
padding: 20px;
}
.chat-container {
display: flex;
flex-direction: column;
height: 80vh;
overflow-y: auto;
padding: 20px;
border-radius: 10px;
background: #1E1E1E;
}
.chat-message {
max-width: 75%;
padding: 12px 16px;
margin: 8px 0;
border-radius: 10px;
font-size: 16px;
display: inline-block;
}
.user-message {
background-color: #007AFF;
align-self: flex-end;
color: white;
}
.bot-message {
background-color: #333;
align-self: flex-start;
}
.input-box {
background: #333;
border: none;
color: white;
border-radius: 8px;
padding: 12px;
font-size: 16px;
width: 100%;
}
.controls {
display: flex;
justify-content: center;
gap: 10px;
margin-top: 10px;
}
"""
with gr.Blocks(css=css) as demo:
gr.Markdown("""
<h1 style='text-align: center; color: #FFFFFF;'>💬 AI Chat Assistant</h1>
<p style='text-align: center;'>A sleek, modern chatbot experience.</p>
""")
chatbot = gr.ChatInterface(
respond,
additional_inputs=[],
textbox=gr.Textbox(placeholder="Type a message...", lines=1, interactive=True, elem_classes="input-box"),
)
demo.launch()