Spaces:
Sleeping
Sleeping
Santhosh1325
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -37,7 +37,7 @@ content_models = {
|
|
37 |
# Load the translation model and tokenizer locally
|
38 |
@st.cache_resource
|
39 |
def load_translation_model():
|
40 |
-
with st.spinner('Loading translation model... Please wait.'):
|
41 |
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
|
42 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
|
43 |
return model, tokenizer
|
@@ -45,7 +45,7 @@ def load_translation_model():
|
|
45 |
# Function to perform translation locally
|
46 |
def translate_text_local(text):
|
47 |
model, tokenizer = load_translation_model()
|
48 |
-
with st.spinner('Translation is on progress
|
49 |
inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
|
50 |
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
51 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|
|
|
37 |
# Load the translation model and tokenizer locally
|
38 |
@st.cache_resource
|
39 |
def load_translation_model():
|
40 |
+
with st.spinner('Loading translation model... Please wait.If you are here for First Time it takes 2 Mins to Please wait'):
|
41 |
model = MBartForConditionalGeneration.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
|
42 |
tokenizer = MBart50TokenizerFast.from_pretrained("facebook/mbart-large-50-many-to-one-mmt")
|
43 |
return model, tokenizer
|
|
|
45 |
# Function to perform translation locally
|
46 |
def translate_text_local(text):
|
47 |
model, tokenizer = load_translation_model()
|
48 |
+
with st.spinner('Translation is on progress.If you are here for First Time it takes 2 Mins to Please wait'):
|
49 |
inputs = tokenizer(text, return_tensors="pt", max_length=512, truncation=True)
|
50 |
translated_tokens = model.generate(**inputs, forced_bos_token_id=tokenizer.lang_code_to_id["en_XX"])
|
51 |
translated_text = tokenizer.batch_decode(translated_tokens, skip_special_tokens=True)[0]
|