demo_medical_QA / app.py
zerostratos's picture
Update app.py
17be31f verified
raw
history blame
2.08 kB
import gradio as gr
from llama_cpp import Llama
# Load the model
llm = Llama.from_pretrained(
repo_id="uonlp/Vistral-7B-Chat-gguf",
filename="ggml-vistral-7B-chat-f16.gguf"
)
# Define the function to interact with the model
def chat_with_model(user_input):
response = llm.create_chat_completion(
messages=[{"role": "user", "content": user_input}]
)
return response['choices'][0]['message']['content']
# Define CSS for patient-friendly appearance
custom_css = """
body {
background-color: #f0f9ff; /* Softer light blue background */
font-family: 'Arial', sans-serif;
}
.gradio-container {
border: 2px solid #b3e0ff;
border-radius: 15px;
padding: 30px;
background-color: #ffffff;
box-shadow: 0px 4px 8px rgba(0, 0, 0, 0.1);
}
h1 {
color: #4a90e2;
text-align: center;
font-size: 28px;
}
p {
font-size: 18px;
color: #333333;
text-align: center;
}
input, textarea {
border: 2px solid #b3d9ff;
border-radius: 10px;
padding: 15px;
width: 100%;
font-size: 18px;
}
.gr-button {
background-color: #4da6ff;
border: none;
border-radius: 10px;
color: white;
padding: 15px 25px;
font-size: 18px;
cursor: pointer;
margin-top: 20px;
display: block;
width: 100%;
}
.gr-button:hover {
background-color: #3399ff;
}
.gradio-container:before {
content: "💬 How can we help you today?";
display: block;
text-align: center;
font-size: 24px;
color: #2c6693;
margin-bottom: 20px;
}
"""
# Create the Gradio interface
iface = gr.Interface(
fn=chat_with_model,
inputs="text",
outputs="text",
title="Friendly Medical Chatbot",
description="Feel free to ask any questions. We’re here to help!",
theme="default",
css=custom_css
)
# Launch the interface
if __name__ == "__main__":
iface.launch()