Loewolf-Chat / app.py
Loewolf's picture
Update app.py
0858488
raw
history blame
1.95 kB
import gradio as gr
from transformers import GPT2LMHeadModel, GPT2Tokenizer
import torch
# Initialisierung des Modells und des Tokenizers
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1")
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1")
def generate_text(prompt):
input_ids = tokenizer.encode(prompt, return_tensors="pt")
attention_mask = torch.ones(input_ids.shape, dtype=torch.bool) # Erstelle eine Attention-Mask, die überall '1' ist
max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 20
beam_output = model.generate(
input_ids,
attention_mask=attention_mask,
max_length=max_length,
min_length=4, # Mindestlänge der Antwort
num_beams=5,
no_repeat_ngram_size=2,
early_stopping=True,
temperature=0.9,
top_p=0.90,
top_k=50,
length_penalty=2.0,
do_sample=True,
eos_token_id=tokenizer.eos_token_id, # EOS Token setzen
pad_token_id=tokenizer.eos_token_id
)
text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
return text
css = """
body { font-family: Arial, sans-serif; }
.gradio_container { max-width: 700px; margin: auto; padding-top: 50px; }
.gradio_header { display: none; }
.gradio_input_box { border-radius: 10px; }
.gradio_output_box { border-radius: 10px; }
button { background-color: #29B3FF; color: white; padding: 10px 20px; border: none; border-radius: 5px; cursor: pointer; }
button:hover { background-color: #106ba3; }
"""
iface = gr.Interface(
fn=generate_text,
inputs=gr.Textbox(label="Schreibe hier...", placeholder="Stelle deine Frage..."),
outputs=gr.Textbox(label="Antwort"),
title="Löwolf Chat",
description="Willkommen beim Löwolf Chat. Stelle deine Fragen und erhalte Antworten vom KI-Chatbot.",
css=css
)
iface.launch()