Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -8,12 +8,12 @@ from transformers import AutoTokenizer, TFAutoModelForSeq2SeqLM, AutoModelForSeq
|
|
8 |
import numpy as np
|
9 |
|
10 |
loaded_model = AutoModelForSequenceClassification.from_pretrained("runaksh/financial_summary_T5_base")
|
11 |
-
loaded_tokenizer = AutoTokenizer.from_pretrained("runaksh/financial_summary_T5_base")
|
12 |
|
13 |
# Function for generating summary
|
14 |
def generate_summary(text,min_length=55,max_length=80):
|
15 |
text = "summarize: "+text
|
16 |
-
input = tokenizer(text,max_length=512,truncation=True,return_tensors='
|
17 |
op=model.generate(input,min_length=min_length,max_length=max_length)
|
18 |
decoded_op = tokenizer.batch_decode(op,skip_special_tokens=True)
|
19 |
return decoded_op
|
|
|
8 |
import numpy as np
|
9 |
|
10 |
loaded_model = AutoModelForSequenceClassification.from_pretrained("runaksh/financial_summary_T5_base")
|
11 |
+
loaded_tokenizer = AutoTokenizer.from_pretrained("runaksh/financial_summary_T5_base",local_files_only=True)
|
12 |
|
13 |
# Function for generating summary
|
14 |
def generate_summary(text,min_length=55,max_length=80):
|
15 |
text = "summarize: "+text
|
16 |
+
input = tokenizer(text,max_length=512,truncation=True,return_tensors='tf').input_ids
|
17 |
op=model.generate(input,min_length=min_length,max_length=max_length)
|
18 |
decoded_op = tokenizer.batch_decode(op,skip_special_tokens=True)
|
19 |
return decoded_op
|