Spaces:
Sleeping
Sleeping
old streamlit, let's try
Browse files
app.py
CHANGED
|
@@ -13,17 +13,17 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
| 13 |
from transformers import MarianMTModel, MarianTokenizer
|
| 14 |
model_name = 'Helsinki-NLP/opus-mt-ROMANCE-en'
|
| 15 |
|
| 16 |
-
@st.
|
| 17 |
def get_tokenizer(model_name):
|
| 18 |
return MarianTokenizer.from_pretrained(model_name)
|
| 19 |
|
| 20 |
-
@st.
|
| 21 |
def get_model(model_name):
|
| 22 |
return MarianMTModel.from_pretrained(model_name).to(device)
|
| 23 |
|
| 24 |
tokenizer = get_tokenizer(model_name)
|
| 25 |
model = get_model(model_name)
|
| 26 |
-
|
| 27 |
print(f"The model has {model.num_parameters():,d} parameters.")
|
| 28 |
|
| 29 |
input_text = st.text_input("Enter text to translate", "Hola, mi nombre es Juan")
|
|
@@ -68,4 +68,3 @@ with tokenizer.as_target_tokenizer():
|
|
| 68 |
|
| 69 |
st.write(probs_table)
|
| 70 |
st.write(model.config.decoder_start_token_id)
|
| 71 |
-
"""
|
|
|
|
| 13 |
from transformers import MarianMTModel, MarianTokenizer
|
| 14 |
model_name = 'Helsinki-NLP/opus-mt-ROMANCE-en'
|
| 15 |
|
| 16 |
+
@st.cache
|
| 17 |
def get_tokenizer(model_name):
|
| 18 |
return MarianTokenizer.from_pretrained(model_name)
|
| 19 |
|
| 20 |
+
@st.cache
|
| 21 |
def get_model(model_name):
|
| 22 |
return MarianMTModel.from_pretrained(model_name).to(device)
|
| 23 |
|
| 24 |
tokenizer = get_tokenizer(model_name)
|
| 25 |
model = get_model(model_name)
|
| 26 |
+
|
| 27 |
print(f"The model has {model.num_parameters():,d} parameters.")
|
| 28 |
|
| 29 |
input_text = st.text_input("Enter text to translate", "Hola, mi nombre es Juan")
|
|
|
|
| 68 |
|
| 69 |
st.write(probs_table)
|
| 70 |
st.write(model.config.decoder_start_token_id)
|
|
|