Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
@@ -3,14 +3,14 @@ from transformers import GPT2LMHeadModel, GPT2Tokenizer
|
|
3 |
import torch
|
4 |
|
5 |
# Initialisierung des Modells und des Tokenizers
|
6 |
-
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1.5")
|
7 |
-
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1.5")
|
8 |
|
9 |
def generate_text(prompt):
|
10 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
11 |
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
|
12 |
|
13 |
-
max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) +
|
14 |
beam_output = model.generate(
|
15 |
input_ids,
|
16 |
attention_mask=attention_mask,
|
@@ -22,7 +22,7 @@ def generate_text(prompt):
|
|
22 |
temperature=0.6,
|
23 |
top_p=0.95,
|
24 |
top_k=10,
|
25 |
-
|
26 |
do_sample=True,
|
27 |
eos_token_id=tokenizer.eos_token_id,
|
28 |
pad_token_id=tokenizer.eos_token_id
|
@@ -31,13 +31,7 @@ def generate_text(prompt):
|
|
31 |
text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
|
32 |
return text
|
33 |
|
34 |
-
DESCRIPTION = """\
|
35 |
-
#Löwolf GPT1 Chat
|
36 |
|
37 |
-
<p>Es wird neues Löwolf GPT 1.5 verwendet.</p>
|
38 |
-
|
39 |
-
Löwolf Chat verwendet immer das aktuelle GPT Modell von Löwolf Community!
|
40 |
-
"""
|
41 |
css = """
|
42 |
h1 {
|
43 |
text-align: center;
|
|
|
3 |
import torch
|
4 |
|
5 |
# Initialisierung des Modells und des Tokenizers
|
6 |
+
tokenizer = GPT2Tokenizer.from_pretrained("Loewolf/GPT_1.5-medium")
|
7 |
+
model = GPT2LMHeadModel.from_pretrained("Loewolf/GPT_1.5-medium")
|
8 |
|
9 |
def generate_text(prompt):
|
10 |
input_ids = tokenizer.encode(prompt, return_tensors="pt")
|
11 |
attention_mask = torch.ones(input_ids.shape, dtype=torch.long)
|
12 |
|
13 |
+
max_length = model.config.n_positions if len(input_ids[0]) > model.config.n_positions else len(input_ids[0]) + 70
|
14 |
beam_output = model.generate(
|
15 |
input_ids,
|
16 |
attention_mask=attention_mask,
|
|
|
22 |
temperature=0.6,
|
23 |
top_p=0.95,
|
24 |
top_k=10,
|
25 |
+
|
26 |
do_sample=True,
|
27 |
eos_token_id=tokenizer.eos_token_id,
|
28 |
pad_token_id=tokenizer.eos_token_id
|
|
|
31 |
text = tokenizer.decode(beam_output[0], skip_special_tokens=True)
|
32 |
return text
|
33 |
|
|
|
|
|
34 |
|
|
|
|
|
|
|
|
|
35 |
css = """
|
36 |
h1 {
|
37 |
text-align: center;
|