Spaces:
Sleeping
Sleeping
File size: 1,199 Bytes
4300a8c 0b6d7d2 4300a8c 97239f1 0b6d7d2 4300a8c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 |
import streamlit as st
from transformers import GPT2LMHeadModel, AutoTokenizer, AutoModelForCausalLM
import torch
# Initialize the tokenizer and model
tokenizer = AutoTokenizer.from_pretrained('gpt2-large')
model = AutoModelForCausalLM.from_pretrained('gpt2-large')
def generate_blog(topic, max_length=500, num_return_sequences=1):
# Encode the topic as input IDs
input_ids = tokenizer.encode(topic, return_tensors='pt')
# Generate the blog text
outputs = model.generate(
input_ids,
max_length=max_length,
num_return_sequences=num_return_sequences,
no_repeat_ngram_size=2,
early_stopping=True
)
# Decode the generated IDs to text
generated_texts = [tokenizer.decode(output, skip_special_tokens=True) for output in outputs]
return generated_texts
# Streamlit UI
st.title("Blog Generator")
topic = st.text_input("Enter the topic's name:")
if st.button("Generate Blog"):
if topic:
generated_blogs = generate_blog(topic)
for i, blog in enumerate(generated_blogs):
st.subheader(f"Blog {i+1}")
st.write(blog)
else:
st.write("Please enter a topic to generate a blog.") |