Spaces:
Sleeping
Sleeping
File size: 3,686 Bytes
bcf9fc0 cb9643e bcf9fc0 cb9643e bcf9fc0 cb9643e bcf9fc0 cb9643e bcf9fc0 279b72c c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e c80393e cb9643e 279b72c c80393e cb9643e 279b72c bcf9fc0 7c6b19a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 |
import gradio as gr
from huggingface_hub import InferenceClient
# Set up the InferenceClient for the chatbot
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
def respond(
message,
history: list[tuple[str, str]],
system_message,
max_tokens,
temperature,
top_p,
):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
# Define the interface with styling and customization
demo = gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message", lines=1),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"),
],
css="""
/* Custom CSS to style the interface with pink and cute aesthetic */
.gradio-container {
background-color: #f9e5e5; /* Soft pastel pink */
color: #ff8fa3; /* Pink text */
font-family: 'Comic Sans MS', sans-serif; /* Fun, playful font */
border-radius: 20px;
padding: 30px;
box-shadow: 0 4px 10px rgba(0, 0, 0, 0.1);
}
.gradio-button {
background-color: #ff7fa5; /* Lighter pink buttons */
color: white;
border-radius: 12px;
font-size: 18px;
padding: 16px 32px;
font-weight: bold;
transition: background-color 0.3s ease;
}
.gradio-button:hover {
background-color: #ff5885; /* Even lighter pink on hover */
}
.gradio-textbox {
background-color: #ffd3d3; /* Soft pink background for textboxes */
border-radius: 12px;
color: #ff4d6d; /* Bold pink text */
padding: 16px;
border: 2px solid #ff8fa3;
}
.gradio-chat {
background-color: #fff0f5; /* Very light pink for chat */
border-radius: 20px;
padding: 20px;
}
.gradio-chat .gradio-message-user {
background-color: #ff7fa5; /* Light pink background for user messages */
border-radius: 12px;
color: white;
padding: 12px;
}
.gradio-chat .gradio-message-assistant {
background-color: #ffd3d3; /* Soft pink for assistant's messages */
border-radius: 12px;
color: #ff4d6d;
padding: 12px;
}
.gradio-container h1 {
color: #ff4d6d; /* Pink header */
font-size: 40px;
text-align: center;
font-weight: bold;
margin-bottom: 30px;
}
.gradio-container p {
color: #ff8fa3; /* Soft pink text for descriptions */
text-align: center;
font-size: 16px;
margin-top: 10px;
}
.gradio-container .gradio-textbox input {
font-size: 18px;
}
"""
)
if __name__ == "__main__":
demo.launch()
|