abdulllah01 commited on
Commit
a560191
·
verified ·
1 Parent(s): d7ddd71

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -6
app.py CHANGED
@@ -1,13 +1,11 @@
1
  import streamlit as st
2
- from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_checkpoint = "abdulllah01/checkpoints" # Replace with your actual checkpoint
6
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
7
- model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
8
-
9
  # Create a pipeline for question answering
10
- qa_pipeline = pipeline("text-generation", model=model, tokenizer=tokenizer)
11
 
12
  # Streamlit UI setup
13
  st.title("Question Answering App")
@@ -18,12 +16,19 @@ context = st.text_area("Context:", "")
18
 
19
  # Text input for the question
20
  question = st.text_input("Question:", "")
 
 
 
 
 
 
 
21
 
22
  if st.button("Get Answer"):
23
  if context and question:
24
  # Generate the answer using the pipeline
25
- result = qa_pipeline(question=question, context=context)
26
- answer = result['answer']
27
  st.write("**Answer:**", answer)
28
  else:
29
  st.write("Please enter both context and question.")
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForQuestionAnswering
3
 
4
  # Load the model and tokenizer from your Hugging Face Hub repository
5
  model_checkpoint = "abdulllah01/checkpoints" # Replace with your actual checkpoint
6
  tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_checkpoint)
 
8
  # Create a pipeline for question answering
 
9
 
10
  # Streamlit UI setup
11
  st.title("Question Answering App")
 
16
 
17
  # Text input for the question
18
  question = st.text_input("Question:", "")
19
+ # Example input question and context (document) from your dataset
20
+
21
+ # Prepare the input text
22
+ input_text = f"question: {question} context: {context}"
23
+ input_ids = tokenizer.encode(input_text, return_tensors="pt").to(device)
24
+
25
+ # Generate the answer
26
 
27
  if st.button("Get Answer"):
28
  if context and question:
29
  # Generate the answer using the pipeline
30
+ output_ids = model.generate(input_ids)
31
+ answer = tokenizer.decode(output_ids[0], skip_special_tokens=True)
32
  st.write("**Answer:**", answer)
33
  else:
34
  st.write("Please enter both context and question.")