tykiww commited on
Commit
486cbec
·
verified ·
1 Parent(s): d49cd75

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +26 -4
app.py CHANGED
@@ -25,31 +25,53 @@ def init_TTS(config):
25
  tts = TTS(config['inference']['model']).to(device)
26
  return tts
27
 
 
 
 
 
 
 
 
28
 
29
  @spaces.GPU
30
- def generate_speech(microphone, text):
31
  # Generate speech using the provided text, speaker voice, and language
 
 
 
 
 
 
32
 
33
  tts.tts_to_file(text=text,
34
  file_path=config['inference']['file_path'],
35
- speaker_wav=microphone, # config['inference']['speaker_wav']
36
  language=config['inference']['language'])
37
  return config['inference']['file_path']
38
 
39
 
 
 
 
40
  ###################################### main ######################################
41
  def main(config):
 
 
 
 
42
 
43
  # Create the Gradio interface
44
  demo = gr.Interface(
45
  fn=generate_speech,
46
  inputs=[
 
 
47
  gr.Audio(label="Audio", sources="microphone", type="filepath", elem_id='audio'),
48
  gr.Textbox(label="Enter your text")
49
  ],
50
  outputs="audio",
51
- title="Voice Synthesis with Coqui-XTTS",
52
- description="Synthesize speech using predefined target voice and language."
53
  )
54
 
55
  # Launch the interface
 
25
  tts = TTS(config['inference']['model']).to(device)
26
  return tts
27
 
28
+ dataset_choice = gr.Radio(label="Choose Dataset",
29
+ choices=["Hugging Face Hub Dataset", "Upload Your Own"],
30
+ value="Hugging Face Hub Dataset")
31
+ dataset_predefined = gr.Textbox(label="Hugging Face Hub Training Dataset",
32
+ value='yahma/alpaca-cleaned',
33
+ visible=True)
34
+
35
 
36
  @spaces.GPU
37
+ def generate_speech(voice_choice, microphone, text):
38
  # Generate speech using the provided text, speaker voice, and language
39
+
40
+ if voice_choice=="Record":
41
+ speaker = microphone
42
+
43
+ else:
44
+ speaker = config['inference']['speaker_wav']
45
 
46
  tts.tts_to_file(text=text,
47
  file_path=config['inference']['file_path'],
48
+ speaker_wav=microphone,
49
  language=config['inference']['language'])
50
  return config['inference']['file_path']
51
 
52
 
53
+
54
+
55
+
56
  ###################################### main ######################################
57
  def main(config):
58
+
59
+ voice_choice = gr.Radio(label="Record or use predefined voice.",
60
+ choices=["Record", "Predefined (Nancy)"],
61
+ value="Record")
62
 
63
  # Create the Gradio interface
64
  demo = gr.Interface(
65
  fn=generate_speech,
66
  inputs=[
67
+ voice_choice,
68
+
69
  gr.Audio(label="Audio", sources="microphone", type="filepath", elem_id='audio'),
70
  gr.Textbox(label="Enter your text")
71
  ],
72
  outputs="audio",
73
+ title="Voice cloning and Synthesis with Coqui-XTTS",
74
+ description="Clone your voice and Synthesize speech using predefined target voice and language."
75
  )
76
 
77
  # Launch the interface