Spaces:
Sleeping
Sleeping
advanced the gpt model to faster inference
Browse files- app.py +6 -2
- requirements.txt +1 -0
app.py
CHANGED
@@ -1,9 +1,13 @@
|
|
1 |
-
from transformers import pipeline
|
2 |
import gradio as gr
|
3 |
|
4 |
model_id = "gpt2-large"
|
5 |
|
6 |
-
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def gpt(prompt, top_k, penalty_alpha):
|
9 |
return pipe(prompt, top_k=top_k, penalty_alpha=penalty_alpha)[0]["generated_text"]
|
|
|
1 |
+
from transformers import pipeline, AutoTokenizer
|
2 |
import gradio as gr
|
3 |
|
4 |
model_id = "gpt2-large"
|
5 |
|
6 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
7 |
+
model = AutoModelForCausalLM.from_pretrained(model_id)
|
8 |
+
model.to_bettertransformer()
|
9 |
+
|
10 |
+
pipe = pipeline("text-generation", model=model, tokenizer= tokenizer)
|
11 |
|
12 |
def gpt(prompt, top_k, penalty_alpha):
|
13 |
return pipe(prompt, top_k=top_k, penalty_alpha=penalty_alpha)[0]["generated_text"]
|
requirements.txt
CHANGED
@@ -1,3 +1,4 @@
|
|
1 |
transformers
|
2 |
gradio
|
3 |
torch
|
|
|
|
1 |
transformers
|
2 |
gradio
|
3 |
torch
|
4 |
+
optimum
|