Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
import nltk
|
4 |
-
import math
|
5 |
import torch
|
6 |
|
7 |
model_name = "afnanmmir/t5-base-abstract-to-plain-language-1"
|
@@ -13,15 +12,14 @@ st.header("Generate summaries")
|
|
13 |
|
14 |
st_model_load = st.text('Loading summary generator model...')
|
15 |
|
16 |
-
# @st.cache(allow_output_mutation=True)
|
17 |
-
@st.cache_data
|
18 |
-
def load_model():
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
return tokenizer, model
|
25 |
|
26 |
tokenizer, model = load_model()
|
27 |
st.success('Model loaded!')
|
|
|
1 |
import streamlit as st
|
2 |
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
|
3 |
import nltk
|
|
|
4 |
import torch
|
5 |
|
6 |
model_name = "afnanmmir/t5-base-abstract-to-plain-language-1"
|
|
|
12 |
|
13 |
st_model_load = st.text('Loading summary generator model...')
|
14 |
|
15 |
+
# # @st.cache(allow_output_mutation=True)
|
16 |
+
# @st.cache_data
|
17 |
+
# def load_model():
|
18 |
+
print("Loading model...")
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
20 |
+
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
|
21 |
+
nltk.download('punkt')
|
22 |
+
print("Model loaded!")
|
|
|
23 |
|
24 |
tokenizer, model = load_model()
|
25 |
st.success('Model loaded!')
|