Anita-19 commited on
Commit
200d4a2
·
verified ·
1 Parent(s): ccd6a27

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -11
app.py CHANGED
@@ -12,12 +12,14 @@ tts_model = TTS(model_name="tts_models/en/ljspeech/tacotron2-DDC")
12
 
13
  # Emotion-specific settings for pitch and speed
14
  emotion_settings = {
15
- "joy": {"pitch": 1.2, "speed": 1.1},
16
- "sadness": {"pitch": 0.8, "speed": 0.9},
17
- "anger": {"pitch": 1.0, "speed": 1.2},
18
- "fear": {"pitch": 0.9, "speed": 1.0},
19
- "surprise": {"pitch": 1.3, "speed": 1.2},
20
  "neutral": {"pitch": 1.0, "speed": 1.0},
 
 
 
 
 
 
 
21
  }
22
 
23
 
@@ -43,11 +45,10 @@ def emotion_aware_tts_pipeline(input_text=None, file_input=None):
43
  audio_path = "output.wav"
44
  tts_model.tts_to_file(text=input_text, file_path=audio_path)
45
 
46
- # Adjust pitch and speed using librosa
47
- if pitch != 1.0:
48
- adjust_audio_pitch(audio_path, pitch)
49
- if speed != 1.0:
50
- adjust_audio_speed(audio_path, speed)
51
 
52
  return f"Detected Emotion: {emotion} (Confidence: {confidence:.2f})", audio_path
53
  else:
@@ -56,7 +57,6 @@ def emotion_aware_tts_pipeline(input_text=None, file_input=None):
56
  return f"Error: {str(e)}", None
57
 
58
 
59
-
60
  # Define Gradio interface
61
  interface = gr.Interface(
62
  fn=emotion_aware_tts_pipeline,
 
12
 
13
  # Emotion-specific settings for pitch and speed
14
  emotion_settings = {
 
 
 
 
 
15
  "neutral": {"pitch": 1.0, "speed": 1.0},
16
+ "joy": {"pitch": 1.3, "speed": 1.2},
17
+ "sadness": {"pitch": 0.8, "speed": 0.9},
18
+ "anger": {"pitch": 1.6, "speed": 1.4},
19
+ "fear": {"pitch": 1.2, "speed": 0.95},
20
+ "surprise": {"pitch": 1.5, "speed": 1.3},
21
+ "disgust": {"pitch": 0.9, "speed": 0.95},
22
+ "shame": {"pitch": 0.8, "speed": 0.85},
23
  }
24
 
25
 
 
45
  audio_path = "output.wav"
46
  tts_model.tts_to_file(text=input_text, file_path=audio_path)
47
 
48
+ # Adjust pitch
49
+ pitch_factor = (pitch - 1.0) * 12 # Convert to semitones for librosa
50
+ adjust_pitch(audio_path, pitch_factor)
51
+
 
52
 
53
  return f"Detected Emotion: {emotion} (Confidence: {confidence:.2f})", audio_path
54
  else:
 
57
  return f"Error: {str(e)}", None
58
 
59
 
 
60
  # Define Gradio interface
61
  interface = gr.Interface(
62
  fn=emotion_aware_tts_pipeline,