File size: 346 Bytes
17daafb |
1 2 3 4 5 6 7 8 9 10 |
from transformers import AutoModelForCausalLM, AutoTokenizer
repo_id = "MacLeanLuke/gemma-2b-tool-tuned"
model = AutoModelForCausalLM.from_pretrained(repo_id)
tokenizer = AutoTokenizer.from_pretrained(repo_id)
inputs = tokenizer("Hello, how are you?", return_tensors="pt")
outputs = model.generate(**inputs)
print(tokenizer.decode(outputs[0])) |