fabiochiusano commited on
Commit
1b04eca
·
1 Parent(s): 0ba9aa2

sidebar loads before model

Browse files
Files changed (1) hide show
  1. app.py +15 -13
app.py CHANGED
@@ -17,19 +17,6 @@ urls = {
17
  }
18
 
19
  st.header("Extracting a Knowledge Base from text")
20
- st_model_load = st.text('Loading NER model... It may take a while.')
21
-
22
- @st.cache(allow_output_mutation=True)
23
- def load_model():
24
- print("Loading model...")
25
- tokenizer = AutoTokenizer.from_pretrained("Babelscape/rebel-large")
26
- model = AutoModelForSeq2SeqLM.from_pretrained("Babelscape/rebel-large")
27
- print("Model loaded!")
28
- return tokenizer, model
29
-
30
- tokenizer, model = load_model()
31
- st.success('Model loaded!')
32
- st_model_load.text("")
33
 
34
  # sidebar
35
  with st.sidebar:
@@ -46,6 +33,21 @@ with st.sidebar:
46
  st.header("Considerations")
47
  st.markdown("If you look closely at the extracted knowledge graphs, some extracted relations are false. Indeed, relation extraction models are still far from perfect and require further steps in the pipeline to build reliable knowledge graphs. Consider this demo as a starting step!")
48
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
49
  # Choose from where to generate the KB
50
  options = [
51
  "Text",
 
17
  }
18
 
19
  st.header("Extracting a Knowledge Base from text")
 
 
 
 
 
 
 
 
 
 
 
 
 
20
 
21
  # sidebar
22
  with st.sidebar:
 
33
  st.header("Considerations")
34
  st.markdown("If you look closely at the extracted knowledge graphs, some extracted relations are false. Indeed, relation extraction models are still far from perfect and require further steps in the pipeline to build reliable knowledge graphs. Consider this demo as a starting step!")
35
 
36
+ # Loading the model
37
+ st_model_load = st.text('Loading NER model... It may take a while.')
38
+
39
+ @st.cache(allow_output_mutation=True)
40
+ def load_model():
41
+ print("Loading model...")
42
+ tokenizer = AutoTokenizer.from_pretrained("Babelscape/rebel-large")
43
+ model = AutoModelForSeq2SeqLM.from_pretrained("Babelscape/rebel-large")
44
+ print("Model loaded!")
45
+ return tokenizer, model
46
+
47
+ tokenizer, model = load_model()
48
+ st.success('Model loaded!')
49
+ st_model_load.text("")
50
+
51
  # Choose from where to generate the KB
52
  options = [
53
  "Text",