Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -11,22 +11,27 @@ df.columns = ['ISO 639-1', 'ISO 639-2', 'Language Name', 'Native Name']
|
|
11 |
df['ISO 639-1'] = df['ISO 639-1'].str.strip()
|
12 |
|
13 |
# Prepare language options for the dropdown
|
14 |
-
language_options = [(row['ISO 639-1'], f"{row['ISO 639-1']}
|
15 |
|
16 |
def translate_text(text, source_language_code, target_language_code):
|
17 |
-
source_language_code = source_language.split(' - ')[0].strip()
|
18 |
-
target_language_code = target_language.split(' - ')[0].strip()
|
19 |
# Construct model name using ISO 639-1 codes
|
20 |
model_name = f"Helsinki-NLP/opus-mt-{source_language_code}-{target_language_code}"
|
|
|
|
|
21 |
if source_language_code == target_language_code:
|
22 |
return "Translation between the same languages is not supported."
|
|
|
|
|
23 |
try:
|
24 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
25 |
model = MarianMTModel.from_pretrained(model_name)
|
26 |
except Exception as e:
|
27 |
return f"Failed to load model for {source_language_code} to {target_language_code}: {str(e)}"
|
|
|
|
|
28 |
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512))
|
29 |
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
|
|
30 |
return translated_text
|
31 |
|
32 |
source_language_dropdown = gr.Dropdown(choices=language_options, label="Source Language")
|
|
|
11 |
df['ISO 639-1'] = df['ISO 639-1'].str.strip()
|
12 |
|
13 |
# Prepare language options for the dropdown
|
14 |
+
language_options = [(row['ISO 639-1'], f"{row['ISO 639-1']}") for index, row in df.iterrows()]
|
15 |
|
16 |
def translate_text(text, source_language_code, target_language_code):
|
|
|
|
|
17 |
# Construct model name using ISO 639-1 codes
|
18 |
model_name = f"Helsinki-NLP/opus-mt-{source_language_code}-{target_language_code}"
|
19 |
+
|
20 |
+
# Check if source and target languages are the same, which is not supported for translation
|
21 |
if source_language_code == target_language_code:
|
22 |
return "Translation between the same languages is not supported."
|
23 |
+
|
24 |
+
# Load tokenizer and model
|
25 |
try:
|
26 |
tokenizer = MarianTokenizer.from_pretrained(model_name)
|
27 |
model = MarianMTModel.from_pretrained(model_name)
|
28 |
except Exception as e:
|
29 |
return f"Failed to load model for {source_language_code} to {target_language_code}: {str(e)}"
|
30 |
+
|
31 |
+
# Translate text
|
32 |
translated = model.generate(**tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512))
|
33 |
translated_text = tokenizer.decode(translated[0], skip_special_tokens=True)
|
34 |
+
|
35 |
return translated_text
|
36 |
|
37 |
source_language_dropdown = gr.Dropdown(choices=language_options, label="Source Language")
|