Loewolf commited on
Commit
1b6e0a4
·
1 Parent(s): ad42d55

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -36
app.py CHANGED
@@ -1,44 +1,17 @@
1
  import gradio as gr
2
- import torch
3
  from transformers import pipeline, set_seed
4
 
5
  # Setzen eines Seeds für Reproduzierbarkeit
6
  set_seed(42)
7
 
8
- # Laden des GPT-Modells mit Hugging Face Pipeline
9
- model = pipeline("text-generation", model="Loewolf/GPT_1")
10
  tokenizer = model.tokenizer
11
 
12
  def generate_text(input_text, temp, top_k, top_p, length):
13
- # Konvertieren des Eingabetextes in Token-IDs
14
- input_ids = tokenizer.encode(input_text, return_tensors="pt")
15
-
16
- # Erstellung der Attention-Mask
17
- attention_mask = torch.ones(input_ids.shape, dtype=torch.bool)
18
-
19
- # Einstellung der maximalen Länge
20
- max_length = model.model.config.n_positions if len(input_ids[0]) > model.model.config.n_positions else len(input_ids[0]) + length
21
-
22
  # Textgenerierung mit spezifischen Parametern
23
- beam_output = model.model.generate(
24
- input_ids,
25
- attention_mask=attention_mask,
26
- max_length=max_length,
27
- min_length=4,
28
- num_beams=5,
29
- no_repeat_ngram_size=2,
30
- early_stopping=True,
31
- temperature=temp,
32
- top_p=top_p,
33
- top_k=top_k,
34
- length_penalty=2.0,
35
- do_sample=True,
36
- eos_token_id=tokenizer.eos_token_id,
37
- pad_token_id=tokenizer.eos_token_id
38
- )
39
-
40
- # Konvertieren der generierten Token-IDs zurück in Text
41
- return tokenizer.decode(beam_output[0], skip_special_tokens=True)
42
 
43
  def chat_with_model(user_input, history, temperature, top_k, top_p, length, system_prompt):
44
  combined_input = f"{history}\nNutzer: {user_input}\n{system_prompt}:"
@@ -49,15 +22,17 @@ def chat_with_model(user_input, history, temperature, top_k, top_p, length, syst
49
  # Erstellen der Gradio-Schnittstelle
50
  with gr.Blocks() as demo:
51
  with gr.Row():
52
- history = gr.Textbox(label="Chatverlauf", lines=10, interactive=False)
53
- user_input = gr.Textbox(label="Deine Nachricht")
54
- system_prompt = gr.Textbox(label="System Prompt", value="Löwolf GPT")
55
- with gr.Column(scale=1):
 
 
56
  temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.9)
57
  top_k = gr.Slider(minimum=0, maximum=100, step=1, label="Top K", value=50)
58
  top_p = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=0.9)
59
  length = gr.Slider(minimum=1, maximum=100, step=1, label="Länge", value=20)
60
- submit_btn = gr.Button("Senden")
61
  submit_btn.click(
62
  chat_with_model,
63
  inputs=[user_input, history, temperature, top_k, top_p, length, system_prompt],
 
1
  import gradio as gr
 
2
  from transformers import pipeline, set_seed
3
 
4
  # Setzen eines Seeds für Reproduzierbarkeit
5
  set_seed(42)
6
 
7
+ # Laden des GPT-Modells mit Hugging Face Pipeline für CPU
8
+ model = pipeline("text-generation", model="Loewolf/GPT_1", device=-1) # device=-1 für CPU
9
  tokenizer = model.tokenizer
10
 
11
  def generate_text(input_text, temp, top_k, top_p, length):
 
 
 
 
 
 
 
 
 
12
  # Textgenerierung mit spezifischen Parametern
13
+ generated_texts = model(input_text, max_length=length, temperature=temp, top_k=top_k, top_p=top_p, num_return_sequences=1)
14
+ return generated_texts[0]['generated_text']
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
 
16
  def chat_with_model(user_input, history, temperature, top_k, top_p, length, system_prompt):
17
  combined_input = f"{history}\nNutzer: {user_input}\n{system_prompt}:"
 
22
  # Erstellen der Gradio-Schnittstelle
23
  with gr.Blocks() as demo:
24
  with gr.Row():
25
+ with gr.Column():
26
+ history = gr.Textbox(label="Chatverlauf", lines=10, interactive=False)
27
+ user_input = gr.Textbox(label="Deine Nachricht")
28
+ submit_btn = gr.Button("Senden")
29
+ with gr.Column():
30
+ system_prompt = gr.Textbox(label="System Prompt", value="Löwolf GPT")
31
  temperature = gr.Slider(minimum=0, maximum=1, step=0.01, label="Temperature", value=0.9)
32
  top_k = gr.Slider(minimum=0, maximum=100, step=1, label="Top K", value=50)
33
  top_p = gr.Slider(minimum=0, maximum=1, step=0.01, label="Top P", value=0.9)
34
  length = gr.Slider(minimum=1, maximum=100, step=1, label="Länge", value=20)
35
+
36
  submit_btn.click(
37
  chat_with_model,
38
  inputs=[user_input, history, temperature, top_k, top_p, length, system_prompt],