Loewolf commited on
Commit
d9a0ed7
·
1 Parent(s): f372d1e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +9 -52
app.py CHANGED
@@ -3,57 +3,14 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
3
  import torch
4
 
5
  # Initialisierung des Modells und des Tokenizers
6
- tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT1")
7
- model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT1")
8
- model.to("cpu") # Stellen Sie sicher, dass das Modell auf der CPU läuft
9
 
10
- # Chat-Verlauf initialisieren
11
- chat_history = []
12
-
13
- def generate_text(input_text, history, temperature, top_k, top_p, max_length):
14
- global chat_history
15
- # Hinzufügen der neuen Eingabe zum Chat-Verlauf
16
- chat_history.append(f"Nutzer: {input_text}")
17
- new_input = tokenizer.encode(input_text + tokenizer.eos_token, return_tensors='pt')
18
- new_input = new_input.to("cpu") # Stellen Sie sicher, dass Tensoren auf der CPU sind
19
-
20
- # Generieren der Antwort des Modells
21
- chat_history_ids = tokenizer.encode(" ".join(chat_history) + tokenizer.eos_token, return_tensors='pt')
22
- chat_history_ids = chat_history_ids.to("cpu")
23
-
24
- # Achten Sie auf die Größe der Tokens für das Modell
25
- chat_history_ids = chat_history_ids[:, -tokenizer.model_max_length :]
26
-
27
- # Generieren der Antwort
28
- model_output = model.generate(chat_history_ids, max_length=max_length, pad_token_id=tokenizer.eos_token_id,
29
- temperature=temperature, top_k=top_k, top_p=top_p)
30
-
31
- # Antwort in Text umwandeln
32
- response = tokenizer.decode(model_output[:, chat_history_ids.shape[-1]:][0], skip_special_tokens=True)
33
- chat_history.append(f"Löwolf GPT: {response}")
34
-
35
- # Rückgabe des aktualisierten Chat-Verlaufs
36
- return " ".join(chat_history)
37
-
38
- # Erstellen der Gradio-Schnittstelle
39
- with gr.Blocks() as demo:
40
- with gr.Row():
41
- with gr.Column():
42
- history = gr.Textbox(label="Chatverlauf", value=" ".join(chat_history), lines=10, interactive=False)
43
- user_input = gr.Textbox(label="Deine Nachricht")
44
- submit_btn = gr.Button("Senden")
45
- with gr.Column():
46
- temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.7)
47
- top_k = gr.Slider(minimum=0, maximum=100, step=1, label="Top K", value=50)
48
- top_p = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=0.9)
49
- max_length = gr.Slider(minimum=1, maximum=100, step=1, label="Maximale Länge", value=60)
50
-
51
- submit_btn.click(
52
- generate_text,
53
- inputs=[user_input, history, temperature, top_k, top_p, max_length],
54
- outputs=[history]
55
- )
56
-
57
- # Starten der Gradio-App
58
- demo.launch()
59
 
 
 
 
3
  import torch
4
 
5
  # Initialisierung des Modells und des Tokenizers
6
+ tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
7
+ model = GPT2LMHeadModel.from_pretrained("gpt2")
 
8
 
9
+ def generate_text(prompt):
10
+ inputs = tokenizer.encode(prompt, return_tensors="pt")
11
+ outputs = model.generate(inputs, max_length=50, num_return_sequences=1)
12
+ text = tokenizer.decode(outputs[0], skip_special_tokens=True)
13
+ return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
 
15
+ iface = gr.Interface(fn=generate_text, inputs="text", outputs="text")
16
+ iface.launch()