|
|
|
import streamlit as st |
|
|
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
|
|
|
|
tokenizer = AutoTokenizer.from_pretrained( |
|
'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b', cache_dir='./model_dir/', |
|
bos_token='[BOS]', eos_token='[EOS]', unk_token='[UNK]', pad_token='[PAD]', mask_token='[MASK]' |
|
) |
|
|
|
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') |
|
|
|
model = AutoModelForCausalLM.from_pretrained( |
|
'kakaobrain/kogpt', revision='KoGPT6B-ryan1.5b',cache_dir='./model_dir/', |
|
pad_token_id=tokenizer.eos_token_id, |
|
torch_dtype=torch.float16, low_cpu_mem_usage=True |
|
).to(device=device, non_blocking=True) |
|
_ = model.eval() |
|
|
|
print("Model loading done!") |
|
|
|
def gpt(prompt): |
|
with torch.no_grad(): |
|
tokens = tokenizer.encode(prompt, return_tensors='pt').to(device=device, non_blocking=True) |
|
gen_tokens = model.generate(tokens, do_sample=True, temperature=0.8, max_length=256) |
|
generated = tokenizer.batch_decode(gen_tokens)[0] |
|
|
|
return generated |
|
|
|
|
|
|
|
st.title("μ¬λ¬λΆλ€μ λ¬Έμ₯μ μμ±ν΄μ€λλ€. π€") |
|
st.markdown("μΉ΄μΉ΄μ€ gpt μ¬μ©ν©λλ€.") |
|
st.subheader("λͺκ°μ§ μμ : ") |
|
example_1_str = "μ€λμ λ μ¨λ λ무 λλΆμλ€. λ΄μΌμ " |
|
example_2_str = "μ°λ¦¬λ ν볡μ μΈμ λ κ°λ§νμ§λ§ νμ " |
|
example_1 = st.button(example_1_str) |
|
example_2 = st.button(example_2_str) |
|
textbox = st.text_area('μ€λμ μλ¦λ€μμ ν₯ν΄ λ¬λ¦¬κ³ ', '',height=100, max_chars=500 ) |
|
button = st.button('μμ±:') |
|
|
|
st.subheader("κ²°κ³Όκ°: ") |
|
if example_1: |
|
with st.spinner('In progress.......'): |
|
output_text = gpt(example_1_str) |
|
st.markdown("\n"+output_text) |
|
if example_2: |
|
with st.spinner('In progress.......'): |
|
output_text = gpt(example_2_str) |
|
st.markdown("\n"+output_text) |
|
if button: |
|
with st.spinner('In progress.......'): |
|
if textbox: |
|
output_text = gpt(textbox) |
|
else: |
|
output_text = " " |
|
st.markdown("\n" + output_text) |