File size: 906 Bytes
6197243
ca0b264
6197243
 
 
 
 
 
ca0b264
 
1de115c
ca0b264
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline

# Load model and tokenizer
model = AutoModelForQuestionAnswering.from_pretrained("spyrosbriakos/greek_legal_bert_v2")
tokenizer = AutoTokenizer.from_pretrained("spyrosbriakos/greek_legal_bert_v2")

# Create QA pipeline
qa_pipeline = pipeline("question-answering", model=model, tokenizer=tokenizer)

# Title of the app
st.title("Question Answering in Greek")

# Input fields for context and question
context = st.text_area("Context", "Provide the context here...")
question = st.text_input("Question", "Ask your question here...")

# Generate answer when the user presses the button
if st.button("Get Answer"):
    if context and question:
        result = qa_pipeline(question=question, context=context)
        st.write(f"Answer: {result['answer']}")
    else:
        st.write("Please provide both context and a question.")