Spaces:
Running
Running
import streamlit as st | |
from transformers import GPT2LMHeadModel, GPT2Tokenizer | |
def load_model(): | |
MODEL_NAME = "gpt2" # Ändern Sie dies entsprechend | |
tokenizer = GPT2Tokenizer.from_pretrained(MODEL_NAME) | |
model = GPT2LMHeadModel.from_pretrained(MODEL_NAME) | |
return model, tokenizer | |
def generate_text(prompt, model, tokenizer): | |
inputs = tokenizer.encode(prompt, return_tensors="pt") | |
outputs = model.generate(inputs, max_length=200, num_return_sequences=5) | |
generated_text = [tokenizer.decode(output) for output in outputs] | |
return generated_text | |
model, tokenizer = load_model() | |
st.title("Textgenerierung mit GPT-2") | |
prompt = st.text_input("Geben Sie einen Prompt ein:") | |
if prompt: | |
with st.spinner("Generieren von Text..."): | |
generated_text = generate_text(prompt, model, tokenizer) | |
st.header("Generierter Text:") | |
for i, text in enumerate(generated_text): | |
st.subheader(f"Option {i+1}:") | |
st.write(text) | |