Shiiirley commited on
Commit
b0643d0
·
verified ·
1 Parent(s): 81b1b2e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -6
app.py CHANGED
@@ -32,15 +32,12 @@ if predictions[0,0] >= 0.4:
32
 
33
  readout = "This news is probably a "+ judge + f" one. The fake probability is {100*predictions[0,0]:.4f}%."
34
 
35
- #from transformers import AutoModelWithLMHead, AutoTokenizer
36
 
37
- #tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news",use_fast=False)
38
- #model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")
39
 
40
- from transformers import AutoTokenizer, AutoModelForCausalLM
41
 
42
- tokenizer = AutoTokenizer.from_pretrained("pvduy/pythia-1B-sft-summarize-tldr")
43
- model = AutoModelForCausalLM.from_pretrained("pvduy/pythia-1B-sft-summarize-tldr")
44
 
45
  def summarize(text, max_length=150):
46
  input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)
 
32
 
33
  readout = "This news is probably a "+ judge + f" one. The fake probability is {100*predictions[0,0]:.4f}%."
34
 
35
+ from transformers import AutoModelWithLMHead, AutoTokenizer
36
 
37
+ tokenizer = AutoTokenizer.from_pretrained("mrm8488/t5-base-finetuned-summarize-news",use_fast=False)
38
+ model = AutoModelWithLMHead.from_pretrained("mrm8488/t5-base-finetuned-summarize-news")
39
 
 
40
 
 
 
41
 
42
  def summarize(text, max_length=150):
43
  input_ids = tokenizer.encode(text, return_tensors="pt", add_special_tokens=True)