Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,9 +7,10 @@ import tempfile
|
|
7 |
import shutil
|
8 |
import re
|
9 |
|
10 |
-
|
|
|
11 |
"""
|
12 |
-
Translates English text to
|
13 |
"""
|
14 |
# Validate input
|
15 |
if not api_key:
|
@@ -23,11 +24,11 @@ def translate_to_japanese(api_key, text):
|
|
23 |
# Define the messages for the chat model
|
24 |
messages_translation = [
|
25 |
{"role": "system", "content": "You are a helpful translator."},
|
26 |
-
{"role": "user", "content": f"Translate the following English text to
|
27 |
]
|
28 |
|
29 |
try:
|
30 |
-
# Call the OpenAI API to get the
|
31 |
response_translation = openai.ChatCompletion.create(
|
32 |
model="gpt-4o", # Use the correct endpoint for chat models
|
33 |
messages=messages_translation,
|
@@ -35,13 +36,13 @@ def translate_to_japanese(api_key, text):
|
|
35 |
temperature=0.5
|
36 |
)
|
37 |
|
38 |
-
# Extract the
|
39 |
-
|
40 |
|
41 |
-
# Define the messages for the pronunciation (
|
42 |
messages_pronunciation = [
|
43 |
-
{"role": "system", "content": "You are a helpful assistant who provides the
|
44 |
-
{"role": "user", "content": f"Provide the
|
45 |
]
|
46 |
|
47 |
# Call the OpenAI API to get the pronunciation
|
@@ -52,10 +53,10 @@ def translate_to_japanese(api_key, text):
|
|
52 |
temperature=0.5
|
53 |
)
|
54 |
|
55 |
-
# Extract the pronunciation
|
56 |
pronunciation = response_pronunciation.choices[0].message['content'].strip()
|
57 |
|
58 |
-
return
|
59 |
|
60 |
except openai.error.OpenAIError as e:
|
61 |
return f"OpenAI API error: {str(e)}", None
|
@@ -64,21 +65,21 @@ def translate_to_japanese(api_key, text):
|
|
64 |
|
65 |
# Function to clean pronunciation text
|
66 |
def clean_pronunciation(pronunciation_text):
|
67 |
-
# Remove introductory phrases like "Sure! The
|
68 |
-
pronunciation_cleaned = re.sub(r"^Sure! The
|
69 |
return pronunciation_cleaned
|
70 |
|
71 |
# Function to generate audio file from text using gTTS
|
72 |
-
def generate_audio_from_text(text):
|
73 |
-
tts = gTTS(text, lang=
|
74 |
# Save audio to a temporary file
|
75 |
temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
76 |
tts.save(temp_audio_file.name)
|
77 |
return temp_audio_file.name
|
78 |
|
79 |
# Streamlit UI
|
80 |
-
st.title("English to
|
81 |
-
st.markdown("Translate English text into Japanese and get
|
82 |
|
83 |
translateimg = Image.open("Untitled.png") # Ensure the file is in the correct directory
|
84 |
st.image(translateimg, use_container_width=True) # Adjust the size as per preference
|
@@ -89,24 +90,36 @@ api_key = os.getenv("OPENAI_API_KEY")
|
|
89 |
# Input field for the text
|
90 |
english_text = st.text_area("Enter the English text to translate")
|
91 |
|
|
|
|
|
|
|
|
|
92 |
# Initialize the progress bar and progress text above the translate button
|
93 |
progress_bar = st.progress(0)
|
94 |
progress_text = st.empty() # To show the progress text
|
95 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
96 |
# Button to trigger the translation
|
97 |
if st.button("Translate"):
|
98 |
if api_key and english_text:
|
99 |
-
|
100 |
try:
|
101 |
# Step 1: Request translation
|
102 |
-
progress_text.text("Translating text...")
|
103 |
progress_bar.progress(33) # Update progress bar to 33%
|
104 |
|
105 |
-
|
|
|
106 |
|
107 |
# Step 2: Check if translation was successful
|
108 |
if pronunciation:
|
109 |
-
progress_text.text("Generating
|
110 |
progress_bar.progress(66) # Update progress bar to 66%
|
111 |
|
112 |
# Clean pronunciation (remove unnecessary parts)
|
@@ -114,11 +127,11 @@ if st.button("Translate"):
|
|
114 |
|
115 |
st.markdown("### Translation Result:")
|
116 |
st.write(f"**English Text:** {english_text}")
|
117 |
-
st.write(f"**
|
118 |
st.write(f"**Pronunciation:** {cleaned_pronunciation}")
|
119 |
|
120 |
# Save the result in a text file
|
121 |
-
result_text = f"English Text: {english_text}\n\
|
122 |
|
123 |
# Write to a text file
|
124 |
with open("translation_result.txt", "w") as file:
|
@@ -134,10 +147,11 @@ if st.button("Translate"):
|
|
134 |
)
|
135 |
|
136 |
# Step 3: Generate audio for pronunciation
|
137 |
-
progress_text.text("Generating pronunciation audio...")
|
138 |
progress_bar.progress(100) # Update progress bar to 100%
|
139 |
|
140 |
-
|
|
|
141 |
|
142 |
# Provide a button to play the pronunciation audio
|
143 |
st.audio(audio_file_path, format="audio/mp3")
|
@@ -146,7 +160,7 @@ if st.button("Translate"):
|
|
146 |
st.image(translateimg2, width=150) # Adjust the size as per preference
|
147 |
|
148 |
else:
|
149 |
-
st.error(
|
150 |
|
151 |
except Exception as e:
|
152 |
st.error(f"An error occurred: {str(e)}")
|
@@ -154,4 +168,4 @@ if st.button("Translate"):
|
|
154 |
if not api_key:
|
155 |
st.error("API key is missing. Please add it as a secret in Hugging Face Settings.")
|
156 |
else:
|
157 |
-
st.error("Please provide text to translate.")
|
|
|
7 |
import shutil
|
8 |
import re
|
9 |
|
10 |
+
# Function to translate text to any language and provide pronunciation (Romaji or phonetic)
|
11 |
+
def translate_to_language(api_key, text, language):
|
12 |
"""
|
13 |
+
Translates English text to the target language using OpenAI's API and provides pronunciation.
|
14 |
"""
|
15 |
# Validate input
|
16 |
if not api_key:
|
|
|
24 |
# Define the messages for the chat model
|
25 |
messages_translation = [
|
26 |
{"role": "system", "content": "You are a helpful translator."},
|
27 |
+
{"role": "user", "content": f"Translate the following English text to {language}:\n\n{text}"}
|
28 |
]
|
29 |
|
30 |
try:
|
31 |
+
# Call the OpenAI API to get the translation
|
32 |
response_translation = openai.ChatCompletion.create(
|
33 |
model="gpt-4o", # Use the correct endpoint for chat models
|
34 |
messages=messages_translation,
|
|
|
36 |
temperature=0.5
|
37 |
)
|
38 |
|
39 |
+
# Extract the translated text
|
40 |
+
translated_text = response_translation.choices[0].message['content'].strip()
|
41 |
|
42 |
+
# Define the messages for the pronunciation (phonetic) request
|
43 |
messages_pronunciation = [
|
44 |
+
{"role": "system", "content": f"You are a helpful assistant who provides the pronunciation in phonetic script of {language} text."},
|
45 |
+
{"role": "user", "content": f"Provide the pronunciation for the following {language} text:\n\n{translated_text}"}
|
46 |
]
|
47 |
|
48 |
# Call the OpenAI API to get the pronunciation
|
|
|
53 |
temperature=0.5
|
54 |
)
|
55 |
|
56 |
+
# Extract the pronunciation from the response
|
57 |
pronunciation = response_pronunciation.choices[0].message['content'].strip()
|
58 |
|
59 |
+
return translated_text, pronunciation
|
60 |
|
61 |
except openai.error.OpenAIError as e:
|
62 |
return f"OpenAI API error: {str(e)}", None
|
|
|
65 |
|
66 |
# Function to clean pronunciation text
|
67 |
def clean_pronunciation(pronunciation_text):
|
68 |
+
# Remove introductory phrases like "Sure! The pronunciation... is:"
|
69 |
+
pronunciation_cleaned = re.sub(r"^Sure! The pronunciation for the.*?text.*?is[:]*", "", pronunciation_text).strip()
|
70 |
return pronunciation_cleaned
|
71 |
|
72 |
# Function to generate audio file from text using gTTS
|
73 |
+
def generate_audio_from_text(text, language_code):
|
74 |
+
tts = gTTS(text, lang=language_code) # Use the appropriate language code
|
75 |
# Save audio to a temporary file
|
76 |
temp_audio_file = tempfile.NamedTemporaryFile(delete=False, suffix=".mp3")
|
77 |
tts.save(temp_audio_file.name)
|
78 |
return temp_audio_file.name
|
79 |
|
80 |
# Streamlit UI
|
81 |
+
st.title("English to Multiple Language Translator with Pronunciation")
|
82 |
+
st.markdown("Translate English text into Japanese, Spanish, Italian, and German and get their pronunciation (phonetic).")
|
83 |
|
84 |
translateimg = Image.open("Untitled.png") # Ensure the file is in the correct directory
|
85 |
st.image(translateimg, use_container_width=True) # Adjust the size as per preference
|
|
|
90 |
# Input field for the text
|
91 |
english_text = st.text_area("Enter the English text to translate")
|
92 |
|
93 |
+
# Language selection dropdown
|
94 |
+
languages = ["Japanese", "Spanish", "Italian", "German"]
|
95 |
+
selected_language = st.selectbox("Select the target language", languages)
|
96 |
+
|
97 |
# Initialize the progress bar and progress text above the translate button
|
98 |
progress_bar = st.progress(0)
|
99 |
progress_text = st.empty() # To show the progress text
|
100 |
|
101 |
+
# Mapping of languages to their corresponding language codes for gTTS
|
102 |
+
language_codes = {
|
103 |
+
"Japanese": "ja",
|
104 |
+
"Spanish": "es",
|
105 |
+
"Italian": "it",
|
106 |
+
"German": "de"
|
107 |
+
}
|
108 |
+
|
109 |
# Button to trigger the translation
|
110 |
if st.button("Translate"):
|
111 |
if api_key and english_text:
|
|
|
112 |
try:
|
113 |
# Step 1: Request translation
|
114 |
+
progress_text.text(f"Translating text to {selected_language}...")
|
115 |
progress_bar.progress(33) # Update progress bar to 33%
|
116 |
|
117 |
+
# Translate based on the selected language
|
118 |
+
translated_text, pronunciation = translate_to_language(api_key, english_text, selected_language)
|
119 |
|
120 |
# Step 2: Check if translation was successful
|
121 |
if pronunciation:
|
122 |
+
progress_text.text(f"Generating {selected_language} pronunciation...")
|
123 |
progress_bar.progress(66) # Update progress bar to 66%
|
124 |
|
125 |
# Clean pronunciation (remove unnecessary parts)
|
|
|
127 |
|
128 |
st.markdown("### Translation Result:")
|
129 |
st.write(f"**English Text:** {english_text}")
|
130 |
+
st.write(f"**{selected_language} Translation:** {translated_text}")
|
131 |
st.write(f"**Pronunciation:** {cleaned_pronunciation}")
|
132 |
|
133 |
# Save the result in a text file
|
134 |
+
result_text = f"English Text: {english_text}\n\n{selected_language} Translation: {translated_text}\nPronunciation: {cleaned_pronunciation}"
|
135 |
|
136 |
# Write to a text file
|
137 |
with open("translation_result.txt", "w") as file:
|
|
|
147 |
)
|
148 |
|
149 |
# Step 3: Generate audio for pronunciation
|
150 |
+
progress_text.text(f"Generating pronunciation audio for {selected_language}...")
|
151 |
progress_bar.progress(100) # Update progress bar to 100%
|
152 |
|
153 |
+
# Generate audio for the cleaned pronunciation in the selected language
|
154 |
+
audio_file_path = generate_audio_from_text(cleaned_pronunciation, language_codes[selected_language])
|
155 |
|
156 |
# Provide a button to play the pronunciation audio
|
157 |
st.audio(audio_file_path, format="audio/mp3")
|
|
|
160 |
st.image(translateimg2, width=150) # Adjust the size as per preference
|
161 |
|
162 |
else:
|
163 |
+
st.error(translated_text) # Display error message if API call fails
|
164 |
|
165 |
except Exception as e:
|
166 |
st.error(f"An error occurred: {str(e)}")
|
|
|
168 |
if not api_key:
|
169 |
st.error("API key is missing. Please add it as a secret in Hugging Face Settings.")
|
170 |
else:
|
171 |
+
st.error("Please provide text to translate.")
|