Soumen commited on
Commit
3d283a2
·
1 Parent(s): 004e256

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -62,18 +62,21 @@ def read_pdf(file):
62
  # # Display the extracted text
63
  # #st.text(extracted_text)
64
  # return extracted_text
65
-
66
- @st.cache(suppress_st_warning=True)
67
- def engsum(text):
68
  tokenizer = AutoTokenizer.from_pretrained('t5-base')
69
  model = AutoModelWithLMHead.from_pretrained('t5-base', return_dict=True)
 
 
 
 
70
  #st.text("Using Google T5 Transformer ..")
71
  inputs = tokenizer.encode("summarize: " + text,return_tensors='pt',
72
  max_length= 512,
73
  truncation=True)
74
  summary_ids = model.generate(inputs, max_length=150, min_length=80, length_penalty=5., num_beams=2)
75
  summary = tokenizer.decode(summary_ids[0])
76
- st.success(summary)
77
  @st.cache(suppress_st_warning=True)
78
  def bansum(text):
79
  def query(payload):
 
62
  # # Display the extracted text
63
  # #st.text(extracted_text)
64
  # return extracted_text
65
+ @st.cache
66
+ def l():
 
67
  tokenizer = AutoTokenizer.from_pretrained('t5-base')
68
  model = AutoModelWithLMHead.from_pretrained('t5-base', return_dict=True)
69
+ return tokenizer, model
70
+ @st.cache(suppress_st_warning=True)
71
+ def engsum(text):
72
+ tokenizer, model = l()
73
  #st.text("Using Google T5 Transformer ..")
74
  inputs = tokenizer.encode("summarize: " + text,return_tensors='pt',
75
  max_length= 512,
76
  truncation=True)
77
  summary_ids = model.generate(inputs, max_length=150, min_length=80, length_penalty=5., num_beams=2)
78
  summary = tokenizer.decode(summary_ids[0])
79
+ st.success(summary[5:-2])
80
  @st.cache(suppress_st_warning=True)
81
  def bansum(text):
82
  def query(payload):