Update summary.py
Browse files- summary.py +17 -5
summary.py
CHANGED
|
@@ -2,16 +2,28 @@ import torch
|
|
| 2 |
import streamlit as st
|
| 3 |
from transformers import PegasusForConditionalGeneration, AutoTokenizer
|
| 4 |
|
| 5 |
-
|
| 6 |
-
|
| 7 |
-
|
| 8 |
-
model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
|
| 10 |
-
# @st.cache(allow_output_mutation=True)
|
| 11 |
|
| 12 |
def summarize(passage):
|
| 13 |
txt = " ".join(passage)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 14 |
batch = tokenizer(txt, truncation=True, padding='longest', return_tensors="pt").to(device)
|
| 15 |
translated = model.generate(**batch)
|
| 16 |
summy = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
|
|
|
| 17 |
return summy
|
|
|
|
| 2 |
import streamlit as st
|
| 3 |
from transformers import PegasusForConditionalGeneration, AutoTokenizer
|
| 4 |
|
| 5 |
+
@st.cache(allow_output_mutation=True)
|
| 6 |
+
def do_summary(model_name):
|
| 7 |
+
model = PegasusForConditionalGeneration.from_pretrained(model_name)
|
| 8 |
+
return model
|
| 9 |
+
|
| 10 |
+
@st.cache(allow_output_mutation=True)
|
| 11 |
+
def do_tokenize(model_name):
|
| 12 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 13 |
+
return tokenizer
|
| 14 |
+
|
| 15 |
+
model = do_summary("google/pegasus-cnn_dailymail")
|
| 16 |
+
tokenizer = do_tokenize("google/pegasus-cnn_dailymail")
|
| 17 |
|
|
|
|
| 18 |
|
| 19 |
def summarize(passage):
|
| 20 |
txt = " ".join(passage)
|
| 21 |
+
#model_name = 'google/pegasus-cnn_dailymail'
|
| 22 |
+
device = 'cuda' if torch.cuda.is_available() else 'cpu'
|
| 23 |
+
#tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 24 |
+
#model = PegasusForConditionalGeneration.from_pretrained(model_name).to(device)
|
| 25 |
batch = tokenizer(txt, truncation=True, padding='longest', return_tensors="pt").to(device)
|
| 26 |
translated = model.generate(**batch)
|
| 27 |
summy = tokenizer.batch_decode(translated, skip_special_tokens=True)
|
| 28 |
+
print("summ end")
|
| 29 |
return summy
|