File size: 551 Bytes
1f319a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer

def generate_text(prompt, max_length=100):
    model = AutoModelForCausalLM.from_pretrained("./results")
    tokenizer = AutoTokenizer.from_pretrained("./results")
    
    inputs = tokenizer(prompt, return_tensors="pt")
    outputs = model.generate(
        **inputs,
        max_length=max_length,
        num_return_sequences=1,
        temperature=0.7,
        top_p=0.9,
        do_sample=True
    )
    
    return tokenizer.decode(outputs[0], skip_special_tokens=True)