snoop2head commited on
Commit
2a9d3fa
ยท
1 Parent(s): 869d2d6

initial value hotfix

Browse files
Files changed (1) hide show
  1. app.py +40 -19
app.py CHANGED
@@ -4,7 +4,9 @@ import streamlit as st
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
6
 
7
- st.set_page_config(page_title='KoQuillBot', layout='wide', initial_sidebar_state='expanded')
 
 
8
 
9
  tokenizer = AutoTokenizer.from_pretrained("QuoQA-NLP/KE-T5-Ko2En-Base")
10
  ko2en_model = AutoModelForSeq2SeqLM.from_pretrained("QuoQA-NLP/KE-T5-Ko2En-Base")
@@ -13,31 +15,50 @@ en2ko_model = AutoModelForSeq2SeqLM.from_pretrained("QuoQA-NLP/KE-T5-En2Ko-Base"
13
 
14
  st.title("๐Ÿค– KoQuillBot")
15
 
16
- src_text = st.text_area("๋ฐ”๊พธ๊ณ  ์‹ถ์€ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”:",height=None,max_chars=None,key=None,help="Enter your text here")
17
 
18
- backtranslated = ""
 
 
 
 
 
 
19
 
20
- if st.button('๋ฌธ์žฅ ๋ณ€ํ™˜'):
 
21
  if src_text == "":
22
- st.warning('Please **enter text** for translation')
23
 
24
  else:
25
  translated = ko2en_model.generate(
26
- **tokenizer([src_text], return_tensors="pt", padding=True, max_length=64,),
27
- max_length=64,
28
- num_beams=5,
29
- repetition_penalty=1.3,
30
- no_repeat_ngram_size=3,
31
- num_return_sequences=1,
32
- )
33
- list_translated = [tokenizer.decode(t, skip_special_tokens=True) for t in translated]
 
 
 
 
 
 
 
 
34
  backtranslated = en2ko_model.generate(
35
- **tokenizer(list_translated, return_tensors="pt", padding=True, max_length=64,),
36
- max_length=64,
37
- num_beams=5,
38
- repetition_penalty=1.3,
39
- no_repeat_ngram_size=3,
40
- num_return_sequences=1,
 
 
 
 
 
41
  )
42
  else:
43
  pass
 
4
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM
5
 
6
 
7
+ st.set_page_config(
8
+ page_title="KoQuillBot", layout="wide", initial_sidebar_state="expanded"
9
+ )
10
 
11
  tokenizer = AutoTokenizer.from_pretrained("QuoQA-NLP/KE-T5-Ko2En-Base")
12
  ko2en_model = AutoModelForSeq2SeqLM.from_pretrained("QuoQA-NLP/KE-T5-Ko2En-Base")
 
15
 
16
  st.title("๐Ÿค– KoQuillBot")
17
 
 
18
 
19
+ default_value = "์•ˆ๋…•ํ•˜์„ธ์š”. ์ €๋Š” ๋ฌธ์žฅ์„ ๋‹ค์‹œ ์ž‘์„ฑํ•ด์ฃผ๋Š” KoQuillBot์ž…๋‹ˆ๋‹ค."
20
+ src_text = st.text_area(
21
+ "๋ฐ”๊พธ๊ณ  ์‹ถ์€ ๋ฌธ์žฅ์„ ์ž…๋ ฅํ•˜์„ธ์š”:",
22
+ default_value,
23
+ height=50,
24
+ max_chars=200,
25
+ )
26
 
27
+
28
+ if st.button("๋ฌธ์žฅ ๋ณ€ํ™˜"):
29
  if src_text == "":
30
+ st.warning("Please **enter text** for translation")
31
 
32
  else:
33
  translated = ko2en_model.generate(
34
+ **tokenizer(
35
+ [src_text],
36
+ return_tensors="pt",
37
+ padding=True,
38
+ max_length=64,
39
+ ),
40
+ max_length=64,
41
+ num_beams=5,
42
+ repetition_penalty=1.3,
43
+ no_repeat_ngram_size=3,
44
+ num_return_sequences=1,
45
+ )
46
+
47
+ list_translated = [
48
+ tokenizer.decode(t, skip_special_tokens=True) for t in translated
49
+ ]
50
  backtranslated = en2ko_model.generate(
51
+ **tokenizer(
52
+ list_translated,
53
+ return_tensors="pt",
54
+ padding=True,
55
+ max_length=64,
56
+ ),
57
+ max_length=64,
58
+ num_beams=5,
59
+ repetition_penalty=1.3,
60
+ no_repeat_ngram_size=3,
61
+ num_return_sequences=1,
62
  )
63
  else:
64
  pass