Loewolf-Chat / app.py
Loewolf's picture
Update app.py
4ec67d2
raw
history blame
1.68 kB
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Initialisierung des Modells und des Tokenizers
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1")
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1")
def generate_text(prompt):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = torch.ones(input_ids.shape, dtype=torch.bool)
max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 50
beam_output = model.generate(
input_ids,
attention_mask=attention_mask,
max_length=max_length,
min_length=4,
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
temperature=0.9,
top_p=0.95,
top_k=70,
length_penalty=2.0,
do_sample=True,
eos_token_id=tokenizer.eos_token_id,
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
return text
h1 = "Löwolf Chat"
css = """
body { font-family: 'Helvetica Neue', Helvetica, Arial, sans-serif; }
.gradio_app { max-width: 600px; margin: 50px auto; }
.gradio_interface { box-shadow: 0 0 20px rgba(0,0,0,0.1); }
.gradio_input_container { width: 100%; }
.gradio_output_container { width: 100%; margin-bottom: 25px; }
.gradio_input { border-radius: 20px; }
.gradio_output { border-radius: 20px; }
button { border-radius: 20px; }
"""
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(placeholder="Type a message..."),
outputs="text",
css=css
)
iface.launch()