Spaces:
Sleeping
Sleeping
File size: 775 Bytes
941ffa1 1c98a2f 41fa829 915ec53 b05a407 41fa829 1c98a2f 41fa829 8d5282f 41fa829 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 |
import gradio as gr
#from llama_cpp import Llama
from transformers import AutoModelForCausalLM
# Load the model
llm = AutoModelForCausalLM.from_pretrained("Viet-Mistral/Vistral-7B-Chat",trust_remote_code=True)
# filename="models-7B-F16.gguf"
# Define the function to interact with the model
def chat_with_model(user_input):
response = llm.generate(
messages=[
{"role": "user", "content": user_input}
]
)
return response['choices'][0]['message']['content']
# Create the Gradio interface
iface = gr.Interface(
fn=chat_with_model,
inputs="text",
outputs="text",
title="QA-medical Chatbot",
description="Ask the model any medical question !"
)
# Launch the interface
if __name__ == "__main__":
iface.launch()
|