from transformers import GPT2LMHeadModel, GPT2Tokenizer MODEL_NAME = "gpt2" # Ändern Sie dies entsprechend tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME) model = GPT2LMHeadModel.from_pretrained(MODEL_NAME) prompt = "Was ist künstliche Intelligenz?" # Ändern Sie dies entsprechend inputs = tokenizer.encode(prompt, return_tensors="pt") outputs = model.generate(inputs, max_length=200, num_return_sequences=5) for i, output in enumerate(outputs): print(f"Output {i+1}: {tokenizer.decode(output)}")