File size: 720 Bytes
941ffa1
41fa829
260e9ca
41fa829
 
b05a407
 
41fa829
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8d5282f
 
41fa829
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import gradio as gr
from llama_cpp import Llama

# Load the model
llm = Llama.from_pretrained(
    repo_id="Viet-Mistral/Vistral-7B-Chat",
   # filename="models-7B-F16.gguf"
)

# Define the function to interact with the model
def chat_with_model(user_input):
    response = llm.create_chat_completion(
        messages=[
            {"role": "user", "content": user_input}
        ]
    )
    return response['choices'][0]['message']['content']

# Create the Gradio interface
iface = gr.Interface(
    fn=chat_with_model,
    inputs="text",
    outputs="text",
    title="QA-medical Chatbot",
    description="Ask the model any medical question !"
)

# Launch the interface
if __name__ == "__main__":
    iface.launch()