import torch from transformers import AutoModelForCausalLM, AutoTokenizer def generate_text(prompt, max_length=100): model = AutoModelForCausalLM.from_pretrained("./results") tokenizer = AutoTokenizer.from_pretrained("./results") inputs = tokenizer(prompt, return_tensors="pt") outputs = model.generate( **inputs, max_length=max_length, num_return_sequences=1, temperature=0.7, top_p=0.9, do_sample=True ) return tokenizer.decode(outputs[0], skip_special_tokens=True)