Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -1,27 +1,63 @@
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
-
|
4 |
# Load the model
|
5 |
llm = Llama.from_pretrained(
|
6 |
-
repo_id
|
7 |
filename="ggml-vistral-7B-chat-f16.gguf"
|
8 |
)
|
|
|
9 |
# Define the function to interact with the model
|
10 |
def chat_with_model(user_input):
|
11 |
response = llm.create_chat_completion(
|
12 |
-
messages=[
|
13 |
-
{"role": "user", "content": user_input}
|
14 |
-
]
|
15 |
)
|
16 |
return response['choices'][0]['message']['content']
|
17 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
18 |
# Create the Gradio interface
|
19 |
iface = gr.Interface(
|
20 |
fn=chat_with_model,
|
21 |
inputs="text",
|
22 |
outputs="text",
|
23 |
-
title="QA-
|
24 |
-
description="Ask the model any medical question
|
|
|
|
|
25 |
)
|
26 |
|
27 |
# Launch the interface
|
|
|
1 |
import gradio as gr
|
2 |
from llama_cpp import Llama
|
3 |
+
|
4 |
# Load the model
|
5 |
llm = Llama.from_pretrained(
|
6 |
+
repo_id="uonlp/Vistral-7B-Chat-gguf",
|
7 |
filename="ggml-vistral-7B-chat-f16.gguf"
|
8 |
)
|
9 |
+
|
10 |
# Define the function to interact with the model
|
11 |
def chat_with_model(user_input):
|
12 |
response = llm.create_chat_completion(
|
13 |
+
messages=[{"role": "user", "content": user_input}]
|
|
|
|
|
14 |
)
|
15 |
return response['choices'][0]['message']['content']
|
16 |
|
17 |
+
# Define CSS for clinic-like appearance
|
18 |
+
custom_css = """
|
19 |
+
body {
|
20 |
+
background-color: #e6f7ff; /* Light clinic-blue background */
|
21 |
+
font-family: 'Arial', sans-serif;
|
22 |
+
}
|
23 |
+
.gradio-container {
|
24 |
+
border: 2px solid #cce7ff;
|
25 |
+
border-radius: 15px;
|
26 |
+
padding: 20px;
|
27 |
+
background-color: #ffffff;
|
28 |
+
}
|
29 |
+
h1 {
|
30 |
+
color: #2c6693;
|
31 |
+
}
|
32 |
+
input, textarea {
|
33 |
+
border: 1px solid #b3d9ff;
|
34 |
+
border-radius: 10px;
|
35 |
+
padding: 10px;
|
36 |
+
width: 100%;
|
37 |
+
font-size: 16px;
|
38 |
+
}
|
39 |
+
.gr-button {
|
40 |
+
background-color: #4da6ff;
|
41 |
+
border: none;
|
42 |
+
border-radius: 10px;
|
43 |
+
color: white;
|
44 |
+
padding: 10px 20px;
|
45 |
+
cursor: pointer;
|
46 |
+
}
|
47 |
+
.gr-button:hover {
|
48 |
+
background-color: #3399ff;
|
49 |
+
}
|
50 |
+
"""
|
51 |
+
|
52 |
# Create the Gradio interface
|
53 |
iface = gr.Interface(
|
54 |
fn=chat_with_model,
|
55 |
inputs="text",
|
56 |
outputs="text",
|
57 |
+
title="QA-Medical Chatbot",
|
58 |
+
description="Ask the model any medical question!",
|
59 |
+
theme="default",
|
60 |
+
css=custom_css
|
61 |
)
|
62 |
|
63 |
# Launch the interface
|