Spaces:
Sleeping
Sleeping
set fine-tuned model
Browse files
app.py
CHANGED
@@ -4,14 +4,20 @@ from transformers import AutoTokenizer, AutoModelForCausalLM
|
|
4 |
import torch
|
5 |
|
6 |
# Modell und Tokenizer laden
|
|
|
|
|
7 |
|
8 |
# gpt2 outputs text!
|
9 |
#tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
10 |
#model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
11 |
|
12 |
# distilgpt2 is only 80MB -> NOK, no emojis
|
13 |
-
tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
|
14 |
-
model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
|
|
|
|
|
|
|
|
15 |
|
16 |
# tiny-gpt2 is only 20MB -> NOK, no emojis
|
17 |
#tokenizer = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
|
|
|
4 |
import torch
|
5 |
|
6 |
# Modell und Tokenizer laden
|
7 |
+
HF_USER = "ai01firebird"
|
8 |
+
MODEL_NAME = "emojinator-gpt2"
|
9 |
|
10 |
# gpt2 outputs text!
|
11 |
#tokenizer = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
12 |
#model = AutoModelForCausalLM.from_pretrained("openai-community/gpt2")
|
13 |
|
14 |
# distilgpt2 is only 80MB -> NOK, no emojis
|
15 |
+
#tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
|
16 |
+
#model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
17 |
+
|
18 |
+
# fine-tuned
|
19 |
+
model = AutoModelForCausalLM.from_pretrained(f"{HF_USER}/{MODEL_NAME}")
|
20 |
+
tokenizer = AutoTokenizer.from_pretrained(f"{HF_USER}/{MODEL_NAME}")
|
21 |
|
22 |
# tiny-gpt2 is only 20MB -> NOK, no emojis
|
23 |
#tokenizer = AutoTokenizer.from_pretrained("sshleifer/tiny-gpt2")
|