pragnakalp commited on
Commit
e2313eb
1 Parent(s): 235e230

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -37
app.py CHANGED
@@ -23,43 +23,45 @@ t5_model = AutoModelWithLMHead.from_pretrained(t5_model_path)
23
  t5_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")
24
 
25
  def generate_text_summarization(sum_type,article):
26
-
27
- if sum_type == 'BART Extractive Text Summarization':
28
- inputs = bart_extractive_tokenizer([article], max_length=1024, return_tensors='pt')
29
- summary_ids = bart_extractive_model.generate(inputs['input_ids'], num_beams=4, min_length=60, max_length=300, early_stopping=True)
30
-
31
- summary = [bart_extractive_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
32
- print(type(summary))
33
- print(summary)
34
- summary= summary[0]
35
- doc = readablility_nlp(summary)
36
- summary_score = round(doc._.flesch_kincaid_reading_ease,2)
37
- summarized_data = {
38
- "summary" : summary,
39
- "score" : summary_score
40
- }
41
-
42
- if sum_type == 'T5 Abstractive Text Summarization':
43
- inputs = t5_tokenizer.encode(article, return_tensors="pt", max_length=2048)
44
- summary_ids = t5_model.generate(inputs,
45
- num_beams=2,
46
- no_repeat_ngram_size=2,
47
- min_length=100,
48
- max_length=300,
49
- early_stopping=True)
50
-
51
- summary = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
52
- print(type(summary))
53
- print(summary)
54
- doc = readablility_nlp(summary)
55
- summary_score = round(doc._.flesch_kincaid_reading_ease,2)
56
- summarized_data = {
57
- "summary" : summary,
58
- "score" : summary_score
59
- }
60
-
61
- save_data_and_sendmail(paragraph, sum_type, result_dic)
62
- return summary
 
 
63
 
64
  input_text=gr.Textbox(lines=5, label="Paragraph")
65
  input_radio= gr.Radio(['BART Extractive Text Summarization','T5 Abstractive Text Summarization'],label='Select summarization',value='BART Extractive Text Summarization')
 
23
  t5_tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")
24
 
25
  def generate_text_summarization(sum_type,article):
26
+ if article.strip():
27
+ if sum_type == 'BART Extractive Text Summarization':
28
+ inputs = bart_extractive_tokenizer([article], max_length=1024, return_tensors='pt')
29
+ summary_ids = bart_extractive_model.generate(inputs['input_ids'], num_beams=4, min_length=60, max_length=300, early_stopping=True)
30
+
31
+ summary = [bart_extractive_tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in summary_ids]
32
+ print(type(summary))
33
+ print(summary)
34
+ summary= summary[0]
35
+ doc = readablility_nlp(summary)
36
+ summary_score = round(doc._.flesch_kincaid_reading_ease,2)
37
+ summarized_data = {
38
+ "summary" : summary,
39
+ "score" : summary_score
40
+ }
41
+
42
+ if sum_type == 'T5 Abstractive Text Summarization':
43
+ inputs = t5_tokenizer.encode(article, return_tensors="pt", max_length=2048)
44
+ summary_ids = t5_model.generate(inputs,
45
+ num_beams=2,
46
+ no_repeat_ngram_size=2,
47
+ min_length=100,
48
+ max_length=300,
49
+ early_stopping=True)
50
+
51
+ summary = t5_tokenizer.decode(summary_ids[0], skip_special_tokens=True)
52
+ print(type(summary))
53
+ print(summary)
54
+ doc = readablility_nlp(summary)
55
+ summary_score = round(doc._.flesch_kincaid_reading_ease,2)
56
+ summarized_data = {
57
+ "summary" : summary,
58
+ "score" : summary_score
59
+ }
60
+
61
+ save_data_and_sendmail(paragraph, sum_type, result_dic)
62
+ return summary
63
+ else:
64
+ raise gr.Error("Please enter text in inputbox!!!!")
65
 
66
  input_text=gr.Textbox(lines=5, label="Paragraph")
67
  input_radio= gr.Radio(['BART Extractive Text Summarization','T5 Abstractive Text Summarization'],label='Select summarization',value='BART Extractive Text Summarization')