Spaces:
Sleeping
Sleeping
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import torch | |
# Initialize the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained('gpt2-large') | |
model = AutoModelForCausalLM.from_pretrained('gpt2-large') | |
def generate_blog(topic, max_length=500, num_return_sequences=1): | |
# Encode the topic as input IDs | |
input_ids = tokenizer.encode(topic, return_tensors='pt') | |
# Generate the blog text | |
outputs = model.generate( | |
input_ids, | |
max_length=max_length, | |
num_return_sequences=num_return_sequences, | |
no_repeat_ngram_size=2, | |
early_stopping=True | |
) | |
# Decode the generated IDs to text | |
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs] | |
return generated_texts | |
# Example usage | |
topic = input(str("Enter the topic:")) | |
generated_blogs = generate_blog(topic) | |
for i, blog in enumerate(generated_blogs): | |
print(f"Blog {i+1}:\n{blog}\n") | |