from transformers import AutoModelForCausalLM, AutoTokenizer | |
repo_id = "MacLeanLuke/gemma-2b-tool-tuned" | |
model = AutoModelForCausalLM.from_pretrained(repo_id) | |
tokenizer = AutoTokenizer.from_pretrained(repo_id) | |
inputs = tokenizer("Hello, how are you?", return_tensors="pt") | |
outputs = model.generate(**inputs) | |
print(tokenizer.decode(outputs[0])) |