run / app.py
YLX1965's picture
update app.py
4d880c5 verified
raw
history blame
541 Bytes
import gradio as gr
from llama_cpp import Llama
# 加载 GGUF 模型(替换成你的模型路径)
model_path = "YLX1965/medical-model/unsloth.Q8_0.gguf"
llm = Llama(model_path=model_path)
def chat(prompt):
output = llm(prompt, max_tokens=200)
return output["choices"][0]["text"]
# 创建 Gradio 界面
interface = gr.Interface(fn=chat, inputs="text", outputs="text",
title="Medical Chatbot",
description="使用 GGUF 量化模型进行医疗文本生成")
interface.launch()