Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -4,7 +4,7 @@ import time
|
|
4 |
from transformers import pipeline
|
5 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
6 |
from transformers import BartTokenizer, BartForConditionalGeneration
|
7 |
-
from transformers import AutoTokenizer, EncoderDecoderModel
|
8 |
#from transformers import AutoTokenizer, LEDForConditionalGeneration
|
9 |
#from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
|
10 |
|
@@ -38,15 +38,15 @@ def bart_summarize(text):
|
|
38 |
return pp
|
39 |
|
40 |
#Encoder-Decoder
|
41 |
-
def encoder_decoder(text):
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
-
|
48 |
-
|
49 |
-
|
50 |
|
51 |
# Result
|
52 |
def result(summary):
|
@@ -90,13 +90,13 @@ if button:
|
|
90 |
result(summary)
|
91 |
except Exception:
|
92 |
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
|
93 |
-
elif model == "Encoder-Decoder":
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
|
101 |
#st.toast("Please wait while we summarize your text.")
|
102 |
#with st.spinner("Summarizing..."):
|
|
|
4 |
from transformers import pipeline
|
5 |
from transformers import T5Tokenizer, T5ForConditionalGeneration
|
6 |
from transformers import BartTokenizer, BartForConditionalGeneration
|
7 |
+
#from transformers import AutoTokenizer, EncoderDecoderModel
|
8 |
#from transformers import AutoTokenizer, LEDForConditionalGeneration
|
9 |
#from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
|
10 |
|
|
|
38 |
return pp
|
39 |
|
40 |
#Encoder-Decoder
|
41 |
+
# def encoder_decoder(text):
|
42 |
+
# model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
|
43 |
+
# tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
|
44 |
+
# # let's perform inference on a long piece of text
|
45 |
+
# input_ids = tokenizer(text, return_tensors="pt").input_ids
|
46 |
+
# # autoregressively generate summary (uses greedy decoding by default)
|
47 |
+
# generated_ids = model.generate(input_ids)
|
48 |
+
# generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
|
49 |
+
# return generated_text
|
50 |
|
51 |
# Result
|
52 |
def result(summary):
|
|
|
90 |
result(summary)
|
91 |
except Exception:
|
92 |
st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
|
93 |
+
# elif model == "Encoder-Decoder":
|
94 |
+
# st.write("You have selected Encoder-Decoder model.")
|
95 |
+
# try:
|
96 |
+
# summary = encoder_decoder(text)
|
97 |
+
# result(summary)
|
98 |
+
# except Exception:
|
99 |
+
# st.warning("π¨ Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
|
100 |
|
101 |
#st.toast("Please wait while we summarize your text.")
|
102 |
#with st.spinner("Summarizing..."):
|