zman1x1 commited on
Commit
1814537
·
unverified ·
1 Parent(s): bfadc34
models/distilbart_cnn_12_6.py CHANGED
@@ -4,7 +4,7 @@ from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
4
  # loading the model outside of the function makes it faster
5
  SUMMARIZATION_MODEL = "sshleifer/distilbart-cnn-12-6"
6
  tokenizer = AutoTokenizer.from_pretrained(SUMMARIZATION_MODEL)
7
- model = AutoModelForSeq2SeqLM.from_pretrained(SUMMARIZATION_MODEL, device_map="cuda:0")
8
 
9
  def summarize(text, max_len=20):
10
  """
 
4
  # loading the model outside of the function makes it faster
5
  SUMMARIZATION_MODEL = "sshleifer/distilbart-cnn-12-6"
6
  tokenizer = AutoTokenizer.from_pretrained(SUMMARIZATION_MODEL)
7
+ model = AutoModelForSeq2SeqLM.from_pretrained(SUMMARIZATION_MODEL)
8
 
9
  def summarize(text, max_len=20):
10
  """
models/t5_small_medium_title_generation.py CHANGED
@@ -4,7 +4,7 @@ import torch
4
 
5
  def t5model(prompt: str) -> str:
6
  tokenizer = AutoTokenizer.from_pretrained("fabiochiu/t5-small-medium-title-generation")
7
- model = AutoModelForSeq2SeqLM.from_pretrained("fabiochiu/t5-small-medium-title-generation", device_map="cuda:0", torch_dtype=torch.float16)
8
  inputs = tokenizer(
9
  ["summarize:" + prompt],
10
  return_tensors="pt",
 
4
 
5
  def t5model(prompt: str) -> str:
6
  tokenizer = AutoTokenizer.from_pretrained("fabiochiu/t5-small-medium-title-generation")
7
+ model = AutoModelForSeq2SeqLM.from_pretrained("fabiochiu/t5-small-medium-title-generation", torch_dtype=torch.float16)
8
  inputs = tokenizer(
9
  ["summarize:" + prompt],
10
  return_tensors="pt",