Niansuh commited on
Commit
aec803b
·
verified ·
1 Parent(s): 592776e

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +131 -0
app.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # importing the necessary library
2
+ import re
3
+ import math
4
+
5
+ import spacy
6
+ import nltk
7
+ from nltk.tokenize import sent_tokenize
8
+ nltk.download('punkt')
9
+
10
+ import gradio as gr
11
+
12
+ from transformers import pipeline
13
+ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
14
+
15
+ from transformers import BartTokenizer, BartForConditionalGeneration
16
+
17
+
18
+ # initailizing the model pipeline
19
+ model = BartForConditionalGeneration.from_pretrained("sshleifer/distilbart-cnn-12-6")
20
+ tokenizer = BartTokenizer.from_pretrained("sshleifer/distilbart-cnn-12-6")
21
+ nlp = spacy.load("en_core_web_sm")
22
+
23
+
24
+ def clean_text(text):
25
+ text = text
26
+ text = text.encode("ascii", errors="ignore").decode(
27
+ "ascii"
28
+ ) # remove non-ascii, Chinese characters
29
+
30
+ text = re.sub(r"\n", " ", text)
31
+ text = re.sub(r"\n\n", " ", text)
32
+ text = re.sub(r"\t", " ", text)
33
+ text = text.strip(" ")
34
+ text = re.sub(
35
+ " +", " ", text
36
+ ).strip() # get rid of multiple spaces and replace with a single
37
+ return text
38
+
39
+
40
+ # Defining a function to get the summary of the article
41
+ def final_summary(text):
42
+ # reading in the text and tokenizing it into sentence
43
+ text = text
44
+ bullet_points = 10
45
+
46
+ while (bullet_points >= 10):
47
+
48
+ chunks = []
49
+ sentences = nlp(text)
50
+ for sentence in sentences.sents:
51
+ chunks.append(str(sentence))
52
+
53
+ output = []
54
+ sentences_remaining = len(chunks)
55
+ i = 0
56
+
57
+ #looping through the sentences in an equal batch based on their length and summarizing them
58
+ while sentences_remaining > 0:
59
+ chunks_remaining = math.ceil(sentences_remaining / 10.0)
60
+ next_chunk_size = math.ceil(sentences_remaining / chunks_remaining)
61
+ sentence = "".join(chunks[i:i+next_chunk_size])
62
+
63
+ i += next_chunk_size
64
+ sentences_remaining -= next_chunk_size
65
+
66
+ inputs = tokenizer(sentence, return_tensors="pt", padding="longest")
67
+ #inputs = inputs.to(DEVICE)
68
+ original_input_length = len(inputs["input_ids"][0])
69
+
70
+ # checking if the length of the input batch is less than 150
71
+ if original_input_length < 100:
72
+ split_sentences = nlp(sentence)
73
+ for split_sentence in split_sentences.sents:
74
+ output.append(str(split_sentence).rstrip("."))
75
+
76
+
77
+ # checking if the length of the input batch is greater than 1024
78
+ elif original_input_length > 1024:
79
+ sent = sent_tokenize(sentence)
80
+ length_sent = len(sent)
81
+
82
+ j = 0
83
+ sent_remaining = math.ceil(length_sent / 2)
84
+
85
+ # going through the batch that is greater than 1024 and dividing them
86
+ while length_sent > 0:
87
+ halved_sentence = "".join(sent[j:j+sent_remaining])
88
+ halved_inputs = tokenizer(halved_sentence, return_tensors="pt")
89
+ #halved_inputs = halved_inputs.to(DEVICE)
90
+ halved_summary_ids = model.generate(halved_inputs["input_ids"])
91
+ j += sent_remaining
92
+ length_sent -= sent_remaining
93
+
94
+ # checking if the length of the output summary is less than the original text
95
+ if len(halved_summary_ids[0]) < len(halved_inputs["input_ids"][0]):
96
+ halved_summary = tokenizer.batch_decode(halved_summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
97
+ output.append(halved_summary)
98
+
99
+ else:
100
+ summary_ids = model.generate(inputs["input_ids"])
101
+
102
+ if len(summary_ids[0]) < original_input_length:
103
+ summary = tokenizer.batch_decode(summary_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0]
104
+ output.append(summary)
105
+
106
+ final_output = []
107
+ for paragraphs in output:
108
+ lines = paragraphs.split(" . ")
109
+ for line in lines:
110
+ final_output.append(line.replace(" .", "").strip())
111
+ text = ".".join(final_output)
112
+ bullet_points = len(final_output)
113
+
114
+
115
+ for i in range(len(final_output)):
116
+ final_output[i] = "* " + final_output[i] + "."
117
+
118
+ # final sentences are incoherent, so we will join them by bullet separator
119
+ summary_bullet = "\n".join(final_output)
120
+
121
+ return summary_bullet
122
+
123
+ # creating an interface for the headline generator using gradio
124
+ demo = gr.Interface(final_summary, inputs=[gr.Textbox(label="Drop your article here")],
125
+ title = "chatgptbots.net | ARTICLE SUMMARIZER",
126
+ outputs=[gr.Textbox(label="Summary")],
127
+ )
128
+
129
+ # launching the app
130
+ if __name__ == "__main__":
131
+ demo.launch(debug=True)