Spaces:
Sleeping
Sleeping
File size: 1,460 Bytes
5ac8ef8 374b1f2 5ac8ef8 8ee74be 5ac8ef8 ff265c5 5ac8ef8 ff265c5 5ac8ef8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
# -*- coding: utf-8 -*-
import numpy as np
import streamlit as st
from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
st.set_page_config(
page_title="", layout="wide", initial_sidebar_state="expanded"
)
@st.cache
def load_model(model_name):
model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
return model
tokenizer = AutoTokenizer.from_pretrained("snoop2head/KoBrailleT5-small-v1")
model = load_model("snoop2head/KoBrailleT5-small-v1")
st.title("한국어 점역과 역점역")
st.write("Braille Pattern Conversion")
default_value = '⠍⠗⠠⠪⠋⠕⠀⠘⠪⠐⠗⠒⠊⠕⠐⠀⠘⠮⠐⠍⠨⠟⠀⠚⠣⠕⠚⠕⠂'
src_text = st.text_area(
"번역하고 싶은 문장을 입력하세요:",
default_value,
height=300,
max_chars=100,
)
print(src_text)
if src_text == "":
st.warning("Please **enter text** for translation")
else:
# translate into english sentence
src_text += "</s>"
translation_result = model.generate(
tokenizer(
src_text,
return_tensors="pt",
padding="max_length",
truncation=True,
max_length=64,
).input_ids,
)
translation_result = tokenizer.decode(
translation_result[0],
clean_up_tokenization_spaces=True,
skip_special_tokens=True,
)
print(f"{src_text} -> {translation_result}")
st.write(translation_result)
print(translation_result)
|