Spaces:
Runtime error
Runtime error
File size: 1,340 Bytes
f2d8bf0 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 f675467 70760e5 08cf1b7 18ab628 70760e5 f675467 70760e5 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
# 1. Choose a bilingual or multilingual QA model
MODEL_NAME = "mrm8488/xlm-roberta-large-finetuned-squadv2"
# 2. Load model + tokenizer
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForQuestionAnswering.from_pretrained(MODEL_NAME)
# 3. Initialize QA pipeline
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)
# 4. Load or define custom knowledge base
with open("knowledge.txt", "r", encoding="utf-8") as f:
knowledge_text = f.read()
# 5. Define function to answer questions
def answer_question(question):
if not question.strip():
return "Please ask a valid question."
try:
result = qa_pipeline(question=question, context=knowledge_text)
return result["answer"]
except Exception as e:
return f"Error: {str(e)}"
# 6. Build Gradio interface
iface = gr.Interface(
fn=answer_question,
inputs=gr.Textbox(lines=2, placeholder="Enter your question here..."),
outputs="text",
title="Budtender LLM (Bilingual QA)",
description=(
"A bilingual Q&A model trained on Spanish and English data. "
"Ask your cannabis-related questions here!"
)
)
# 7. Launch app
if __name__ == "__main__":
iface.launch()
|