Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -5,10 +5,9 @@ drive.mount('/content/drive')
|
|
5 |
|
6 |
pip install transformers librosa torch soundfile numba numpy TTS datasets gradio protobuf==3.20.3
|
7 |
|
8 |
-
"""Emotion Detection (Using Text Dataset)
|
9 |
|
10 |
"""
|
11 |
-
|
12 |
!pip install --upgrade numpy tensorflow transformers TTS
|
13 |
|
14 |
!pip freeze > requirements.txt"""
|
@@ -77,28 +76,6 @@ emotion = "happy"
|
|
77 |
output_audio = generate_emotional_speech("Welcome to the smart library!", emotion)
|
78 |
print(f"Generated Speech Saved At: {output_audio}")
|
79 |
|
80 |
-
#
|
81 |
-
import librosa
|
82 |
-
import soundfile as sf
|
83 |
-
|
84 |
-
def adjust_pitch(audio_path, pitch_factor):
|
85 |
-
# Load audio
|
86 |
-
y, sr = librosa.load(audio_path)
|
87 |
-
# Adjust pitch
|
88 |
-
y_shifted = librosa.effects.pitch_shift(y, sr, n_steps=pitch_factor)
|
89 |
-
# Save adjusted audio
|
90 |
-
sf.write(audio_path, y_shifted, sr)
|
91 |
-
|
92 |
-
def adjust_speed(audio_path, speed_factor):
|
93 |
-
# Load the audio file
|
94 |
-
y, sr = librosa.load(audio_path)
|
95 |
-
|
96 |
-
# Adjust the speed (this alters the duration of the audio)
|
97 |
-
y_speeded = librosa.effects.time_stretch(y, speed_factor)
|
98 |
-
|
99 |
-
# Save the adjusted audio
|
100 |
-
sf.write(audio_path, y_speeded, sr)
|
101 |
-
|
102 |
|
103 |
"""Integrating the Workflow"""
|
104 |
|
@@ -274,6 +251,28 @@ save_path = "/content/drive/My Drive/fine_tuned_tacotron2.pth"
|
|
274 |
torch.save(model.state_dict(), save_path)
|
275 |
|
276 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
277 |
"""Set up the Gradio interface"""
|
278 |
|
279 |
import gradio as gr
|
|
|
5 |
|
6 |
pip install transformers librosa torch soundfile numba numpy TTS datasets gradio protobuf==3.20.3
|
7 |
|
8 |
+
"""Emotion Detection (Using Text Dataset)""""
|
9 |
|
10 |
"""
|
|
|
11 |
!pip install --upgrade numpy tensorflow transformers TTS
|
12 |
|
13 |
!pip freeze > requirements.txt"""
|
|
|
76 |
output_audio = generate_emotional_speech("Welcome to the smart library!", emotion)
|
77 |
print(f"Generated Speech Saved At: {output_audio}")
|
78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
|
80 |
"""Integrating the Workflow"""
|
81 |
|
|
|
251 |
torch.save(model.state_dict(), save_path)
|
252 |
|
253 |
|
254 |
+
import librosa
|
255 |
+
import soundfile as sf
|
256 |
+
|
257 |
+
def adjust_pitch(audio_path, pitch_factor):
|
258 |
+
# Load audio
|
259 |
+
y, sr = librosa.load(audio_path)
|
260 |
+
# Adjust pitch
|
261 |
+
y_shifted = librosa.effects.pitch_shift(y, sr, n_steps=pitch_factor)
|
262 |
+
# Save adjusted audio
|
263 |
+
sf.write(audio_path, y_shifted, sr)
|
264 |
+
|
265 |
+
def adjust_speed(audio_path, speed_factor):
|
266 |
+
# Load the audio file
|
267 |
+
y, sr = librosa.load(audio_path)
|
268 |
+
|
269 |
+
# Adjust the speed (this alters the duration of the audio)
|
270 |
+
y_speeded = librosa.effects.time_stretch(y, speed_factor)
|
271 |
+
|
272 |
+
# Save the adjusted audio
|
273 |
+
sf.write(audio_path, y_speeded, sr)
|
274 |
+
|
275 |
+
|
276 |
"""Set up the Gradio interface"""
|
277 |
|
278 |
import gradio as gr
|