Nihanvi commited on
Commit
0b6d9ac
Β·
1 Parent(s): 97f1042

Create App.py

Browse files
Files changed (1) hide show
  1. App.py +110 -0
App.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import time
3
+
4
+ from transformers import pipeline
5
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
6
+ from transformers import BartTokenizer, BartForConditionalGeneration
7
+ #from transformers import AutoTokenizer, EncoderDecoderModel
8
+ #from transformers import AutoTokenizer, LEDForConditionalGeneration
9
+ #from transformers import AutoTokenizer, FlaxLongT5ForConditionalGeneration
10
+
11
+ ##initializing models
12
+
13
+ #Transformers Approach
14
+ def transform_summarize(text):
15
+ pp = pipeline("summarization")
16
+ k=pp(text,max_length=100,do_sample=False)
17
+ return k
18
+
19
+ #T5
20
+ def t5_summarize(text):
21
+ tokenizer = T5Tokenizer.from_pretrained("t5-small")
22
+ model = T5ForConditionalGeneration.from_pretrained("t5-small")
23
+
24
+ input_text = "summarize: " + text
25
+ inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
26
+ outputs = model.generate(inputs, max_length=200, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
27
+ pp = tokenizer.decode(outputs[0], skip_special_tokens=True)
28
+ return pp
29
+
30
+ #BART
31
+ def bart_summarize(text):
32
+ tokenizer = BartTokenizer.from_pretrained("facebook/bart-large-cnn")
33
+ model = BartForConditionalGeneration.from_pretrained("facebook/bart-large-cnn")
34
+
35
+ inputs = tokenizer([text], max_length=1024, return_tensors="pt", truncation=True)
36
+ summary_ids = model.generate(inputs["input_ids"], num_beams=4, max_length=150, early_stopping=True)
37
+ pp = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
38
+ return pp
39
+
40
+ #Encoder-Decoder
41
+ # def encoder_decoder(text):
42
+ # model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
43
+ # tokenizer = AutoTokenizer.from_pretrained("patrickvonplaten/bert2bert_cnn_daily_mail")
44
+ # # let's perform inference on a long piece of text
45
+ # input_ids = tokenizer(text, return_tensors="pt").input_ids
46
+ # # autoregressively generate summary (uses greedy decoding by default)
47
+ # generated_ids = model.generate(input_ids)
48
+ # generated_text = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0]
49
+ # return generated_text
50
+
51
+ # Result
52
+ def result(summary):
53
+ st.success('Please wait while we process and summarize')
54
+ time.sleep(12)
55
+ st.subheader(":violet[Your summarized text is:]")
56
+ st.write(summary)
57
+
58
+ #Title
59
+
60
+ st.title("SummarizeEasy")
61
+ st.header(":violet[Summarize your text with ease!]")
62
+ st.divider()
63
+ st.write("Enter your text below and click on the button to summarize it.")
64
+ text = st.text_area("Enter your text here", height=200)
65
+ model = st.radio("Select the model you want to use", ("Transformer","T5", "BART"))
66
+ st.write("Click on the button to summarize your text.")
67
+ button = st.button("Summarize")
68
+ st.divider()
69
+ st.info("Please note that this is a beta version and summarized content may not be accurate. To get an accurate content the models need to be fined tuned and trained on respective context which requires GPUS. Please feel free to share your feedback with us.")
70
+ st.divider()
71
+ if button:
72
+ if text:
73
+ if model == "Transformer":
74
+ st.write("You have selected Transformer model.")
75
+ try:
76
+ summary = transform_summarize(text)
77
+ result(summary)
78
+ except Exception:
79
+ st.warning("🚨 Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
80
+ elif model == "T5":
81
+ st.write("You have selected T5 model.")
82
+ try:
83
+ summary = t5_summarize(text)
84
+ except Exception:
85
+ st.warning("🚨 Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
86
+ elif model == "BART":
87
+ st.write("You have selected BART model.")
88
+ try:
89
+ summary = bart_summarize(text)
90
+ result(summary)
91
+ except Exception:
92
+ st.warning("🚨 Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
93
+ # elif model == "Encoder-Decoder":
94
+ # st.write("You have selected Encoder-Decoder model.")
95
+ # try:
96
+ # summary = encoder_decoder(text)
97
+ # result(summary)
98
+ # except Exception:
99
+ # st.warning("🚨 Your input text is quite lengthy. For better results, consider providing a shorter text or breaking it into smaller chunks.")
100
+
101
+ #st.toast("Please wait while we summarize your text.")
102
+ #with st.spinner("Summarizing..."):
103
+ # time.sleep(5)
104
+ # st.toast("Done!!",icon="πŸŽ‰")
105
+ # st.success('Please wait while we process and summarize')
106
+ # time.sleep(15)
107
+ # st.subheader(":violet[Your summarized text is:]")
108
+ # st.write(summary)
109
+ else:
110
+ st.warning("Please enter the text !!")