kxx-kkk commited on
Commit
5154a59
Β·
verified Β·
1 Parent(s): 8a81b94

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -6
app.py CHANGED
@@ -21,13 +21,15 @@ st.write("This project is to develop a web-based automated question-and-answer s
21
  st.write("πŸ‘ Click 'Input Text' or 'Upload File' to start experience the system. ")
22
 
23
  # store the model in cache resources to enhance efficiency (ref: https://docs.streamlit.io/library/advanced-features/caching)
24
- @st.cache_resource(show_spinner=True)
25
  def question_model():
26
  # call my model for question answering
27
- model_name = "kxx-kkk/FYP_qa_final"
28
- tokenizer = AutoTokenizer.from_pretrained(model_name)
29
- model = AutoModelForQuestionAnswering.from_pretrained(model_name)
30
- question_answerer = pipeline("question-answering", model=model, tokenizer=tokenizer, handle_impossible_answer=True)
 
 
31
  return question_answerer
32
 
33
  qamodel = question_model()
@@ -75,6 +77,7 @@ def extract_text(file_path):
75
  def question_answering(context, question):
76
  with st.spinner(text="Loading question model..."):
77
  question_answerer = qamodel
 
78
  with st.spinner(text="Getting answer..."):
79
  segment_size = 45000
80
  overlap_size = 50
@@ -117,7 +120,6 @@ def question_answering(context, question):
117
  unsafe_allow_html=True)
118
 
119
 
120
-
121
  #-------------------- Main Webpage --------------------
122
  # choose the source with different tabs
123
  tab1, tab2 = st.tabs(["Input Text", "Upload File"])
 
21
  st.write("πŸ‘ Click 'Input Text' or 'Upload File' to start experience the system. ")
22
 
23
  # store the model in cache resources to enhance efficiency (ref: https://docs.streamlit.io/library/advanced-features/caching)
24
+ @st.cache_resource(show_spinner=False)
25
  def question_model():
26
  # call my model for question answering
27
+ with st.spinner(text="Loading question model..."):
28
+ model_name = "kxx-kkk/FYP_qa_final"
29
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
30
+ model = AutoModelForQuestionAnswering.from_pretrained(model_name)
31
+ question_answerer = pipeline("question-answering", model=model, tokenizer=tokenizer, handle_impossible_answer=True)
32
+ print("QA model is dowloaded and ready to use")
33
  return question_answerer
34
 
35
  qamodel = question_model()
 
77
  def question_answering(context, question):
78
  with st.spinner(text="Loading question model..."):
79
  question_answerer = qamodel
80
+ print("loading QA model...")
81
  with st.spinner(text="Getting answer..."):
82
  segment_size = 45000
83
  overlap_size = 50
 
120
  unsafe_allow_html=True)
121
 
122
 
 
123
  #-------------------- Main Webpage --------------------
124
  # choose the source with different tabs
125
  tab1, tab2 = st.tabs(["Input Text", "Upload File"])