Spaces:
Sleeping
Sleeping
File size: 1,386 Bytes
d47c47b 3356732 d47c47b 3356732 d47c47b 3356732 d47c47b 3356732 d47c47b 3356732 d47c47b 3356732 d47c47b 1a6dcb8 d47c47b 3356732 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer
model_name = 'openai-community/gpt2-large'
model = AutoModelForCausalLM.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name, use_fast=True)
def generate_blogpost(topic, max_length=500, temperature=0.7):
prompt = f"Write a blog post about {topic}:\n\n"
# Encode input:
inputs_encoded = tokenizer(prompt, return_tensors='pt')
# Model Output:
model_output = model.generate(
inputs_encoded["input_ids"],
max_new_tokens=max_length,
do_sample=True,
temperature=temperature
)[0]
# Decode the output
output = tokenizer.decode(model_output, skip_special_tokens=True)
# Remove the prompt from the generated text
blog_post = output[len(prompt):].strip()
return blog_post
# Create the Gradio interface
iface = gr.Interface(
fn=generate_blogpost,
inputs=[
gr.Textbox(lines=1, placeholder="Enter the blog post topic here..."),
gr.Slider(minimum=100, maximum=1000, step=50, label="Max Length", value=500),
gr.Slider(minimum=0.1, maximum=1.0, step=0.1, label="Temperature", value=0.7)
],
outputs="text",
title="GPT2 Blog Post Generator",
description="Enter a topic, and this app will generate a blog post using GPT-2."
)
# Launch the app
iface.launch()
|