Satyam-Singh's picture
Create gemma_model.py
e643b9e verified
raw
history blame
411 Bytes
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = "google/gemma-2b-it"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_text(prompt):
inputs = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(inputs, max_length=50)
return tokenizer.decode(outputs[0], skip_special_tokens=True)