Spaces:
Runtime error
Runtime error
Upload 2 files
Browse files- app.py +61 -0
- requirements.txt +3 -0
app.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""
|
3 |
+
Created on Sun Feb 25 20:10:26 2024
|
4 |
+
|
5 |
+
@author: CSU5KOR
|
6 |
+
"""
|
7 |
+
|
8 |
+
# -*- coding: utf-8 -*-
|
9 |
+
"""
|
10 |
+
Created on Tue Jan 23 11:46:16 2024
|
11 |
+
|
12 |
+
@author: CSU5KOR
|
13 |
+
"""
|
14 |
+
|
15 |
+
|
16 |
+
from transformers import M2M100ForConditionalGeneration, M2M100Tokenizer
|
17 |
+
import streamlit as st
|
18 |
+
from torch import cuda
|
19 |
+
|
20 |
+
if cuda.is_available():
|
21 |
+
device='cuda'
|
22 |
+
else:
|
23 |
+
device='cpu'
|
24 |
+
|
25 |
+
@st.cache_resource()
|
26 |
+
|
27 |
+
def load_model():
|
28 |
+
model = M2M100ForConditionalGeneration.from_pretrained("facebook/m2m100_418M")
|
29 |
+
model.to(device)
|
30 |
+
tokenizer = M2M100Tokenizer.from_pretrained("facebook/m2m100_418M")
|
31 |
+
return model,tokenizer
|
32 |
+
|
33 |
+
model,tokenizer=load_model()
|
34 |
+
st.title("Chinese to multilingual translation app")
|
35 |
+
|
36 |
+
st.write("This app demonstrates translation capabilities of LLM")
|
37 |
+
|
38 |
+
|
39 |
+
|
40 |
+
col1,col2 = st.columns(2)
|
41 |
+
|
42 |
+
with col1:
|
43 |
+
source_language=st.radio("Select source language",["zh","de"])
|
44 |
+
user_text = st.text_area("Enter text for translation")
|
45 |
+
|
46 |
+
|
47 |
+
with col2:
|
48 |
+
target_language=st.radio("Select traget language",["en","de","bn","hi"])
|
49 |
+
if user_text:
|
50 |
+
tokenizer.src_lang = source_language#"zh"#"hi"
|
51 |
+
encoded_text = tokenizer(user_text, return_tensors="pt").to(device)
|
52 |
+
generated_tokens = model.generate(**encoded_text, forced_bos_token_id=tokenizer.get_lang_id(target_language))
|
53 |
+
m2m_translated=tokenizer.batch_decode(generated_tokens, skip_special_tokens=True)[0]
|
54 |
+
st.write(m2m_translated)
|
55 |
+
#st.snow()
|
56 |
+
|
57 |
+
|
58 |
+
|
59 |
+
|
60 |
+
|
61 |
+
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
transformers==4.37.2
|
2 |
+
torch==2.2.0
|
3 |
+
streamlit==1.31.1
|