ShreyaRao commited on
Commit
bb9e1c7
·
1 Parent(s): 401205e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -18
app.py CHANGED
@@ -1,7 +1,6 @@
1
  import streamlit as st
2
- import torch
3
- from transformers import pipeline
4
- # from transformers import T5Tokenizer, T5ForConditionalGeneration
5
  # from transformers import BartTokenizer, BartForConditionalGeneration
6
  # from transformers import AutoTokenizer, EncoderDecoderModel
7
  #from transformers import AutoTokenizer, LEDForConditionalGeneration
@@ -17,21 +16,21 @@ Kathmandu, Nepal's capital, is set in a valley surrounded by the Himalayan mount
17
  ##initializing models
18
 
19
  #Transformers Approach
20
- def transform_summarize(text):
21
- summary = pipeline("summarization")
22
- k=summary(text,max_length=100,do_sample=False)
23
- return k
24
 
25
  #T5
26
- # #def t5_summarize(text):
27
- # tokenizer = T5Tokenizer.from_pretrained("t5-small")
28
- # model = T5ForConditionalGeneration.from_pretrained("t5-small")
29
-
30
- # input_text = "summarize: " + text
31
- # inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
32
- # outputs = model.generate(inputs, max_length=200, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
33
- # summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
- # return summary
35
 
36
  #BART
37
  # def bart_summarize(text):
@@ -55,8 +54,8 @@ def transform_summarize(text):
55
  # return generated_text
56
 
57
  #st.write("Generated Summaries are: ")
58
- l=transform_summarize(text)
 
59
  st.write(l)
60
- # print(t5_summarize(text))
61
  # print(bart_summarize(text))
62
  # print(encoder_decoder(text))
 
1
  import streamlit as st
2
+ #from transformers import pipeline
3
+ from transformers import T5Tokenizer, T5ForConditionalGeneration
 
4
  # from transformers import BartTokenizer, BartForConditionalGeneration
5
  # from transformers import AutoTokenizer, EncoderDecoderModel
6
  #from transformers import AutoTokenizer, LEDForConditionalGeneration
 
16
  ##initializing models
17
 
18
  #Transformers Approach
19
+ # def transform_summarize(text):
20
+ # summary = pipeline("summarization")
21
+ # k=summary(text,max_length=100,do_sample=False)
22
+ # return k
23
 
24
  #T5
25
+ def t5_summarize(text):
26
+ tokenizer = T5Tokenizer.from_pretrained("t5-small")
27
+ model = T5ForConditionalGeneration.from_pretrained("t5-small")
28
+
29
+ input_text = "summarize: " + text
30
+ inputs = tokenizer.encode(input_text, return_tensors="pt", max_length=1024, truncation=True)
31
+ outputs = model.generate(inputs, max_length=200, min_length=50, length_penalty=2.0, num_beams=4, early_stopping=True)
32
+ summary = tokenizer.decode(outputs[0], skip_special_tokens=True)
33
+ return summary
34
 
35
  #BART
36
  # def bart_summarize(text):
 
54
  # return generated_text
55
 
56
  #st.write("Generated Summaries are: ")
57
+ # l=transform_summarize(text)
58
+ l=t5_summarize(text)
59
  st.write(l)
 
60
  # print(bart_summarize(text))
61
  # print(encoder_decoder(text))