plannist
commited on
Commit
·
1be9318
1
Parent(s):
0031fcc
my first commit KoAlpaca API
Browse files- app.py +30 -0
- requirements.txt +4 -0
app.py
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
2 |
+
import torch
|
3 |
+
import gradio as gr
|
4 |
+
|
5 |
+
model_name = "beomi/KoAlpaca-Polyglot-5.8B"
|
6 |
+
|
7 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
8 |
+
model = AutoModelForCausalLM.from_pretrained(
|
9 |
+
model_name,
|
10 |
+
torch_dtype=torch.float16,
|
11 |
+
device_map="auto"
|
12 |
+
)
|
13 |
+
|
14 |
+
pipe = pipeline(
|
15 |
+
"text-generation",
|
16 |
+
model=model,
|
17 |
+
tokenizer=tokenizer,
|
18 |
+
device=0,
|
19 |
+
max_new_tokens=256,
|
20 |
+
do_sample=True,
|
21 |
+
temperature=0.7,
|
22 |
+
top_p=0.9,
|
23 |
+
)
|
24 |
+
|
25 |
+
def chat_fn(prompt):
|
26 |
+
output = pipe(prompt)[0]["generated_text"]
|
27 |
+
return output
|
28 |
+
|
29 |
+
demo = gr.Interface(fn=chat_fn, inputs="text", outputs="text")
|
30 |
+
demo.launch()
|
requirements.txt
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
transformers
|
2 |
+
torch
|
3 |
+
gradio
|
4 |
+
accelerate
|