Staticaliza commited on
Commit
33d5f3b
ยท
verified ยท
1 Parent(s): 690106b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -149
app.py CHANGED
@@ -1,170 +1,57 @@
1
  import spaces
2
  from kokoro import KModel, KPipeline
3
  import gradio as gr
4
- import os
5
- import torch
6
 
7
- IS_DUPLICATE = not os.getenv('SPACE_ID', '').startswith('hexgrad/')
8
- CHAR_LIMIT = None if IS_DUPLICATE else 5000
9
 
10
- CUDA_AVAILABLE = torch.cuda.is_available()
11
- models = {gpu: KModel().to('cuda' if gpu else 'cpu').eval() for gpu in [False] + ([True] if CUDA_AVAILABLE else [])}
12
- pipelines = {lang_code: KPipeline(lang_code=lang_code, model=False) for lang_code in 'ab'}
13
- pipelines['a'].g2p.lexicon.golds['kokoro'] = 'kหˆOkษ™ษนO'
14
- pipelines['b'].g2p.lexicon.golds['kokoro'] = 'kหˆQkษ™ษนQ'
15
 
16
- @spaces.GPU(duration=10)
17
- def forward_gpu(ps, ref_s, speed):
18
- return models[True](ps, ref_s, speed)
19
-
20
- def generate_first(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
21
- text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
22
- pipeline = pipelines[voice[0]]
23
  pack = pipeline.load_voice(voice)
24
- use_gpu = use_gpu and CUDA_AVAILABLE
25
  for _, ps, _ in pipeline(text, voice, speed):
26
- ref_s = pack[len(ps)-1]
27
- try:
28
- if use_gpu:
29
- audio = forward_gpu(ps, ref_s, speed)
30
- else:
31
- audio = models[False](ps, ref_s, speed)
32
- except gr.exceptions.Error as e:
33
- if use_gpu:
34
- gr.Warning(str(e))
35
- gr.Info('Retrying with CPU. To avoid this error, change Hardware to CPU.')
36
- audio = models[False](ps, ref_s, speed)
37
- else:
38
- raise gr.Error(e)
39
  return (24000, audio.numpy()), ps
40
- return None, ''
41
 
42
- # Arena API
43
- def predict(text, voice='af_heart', speed=1):
44
- return generate_first(text, voice, speed, use_gpu=False)[0]
45
 
46
- def tokenize_first(text, voice='af_heart'):
47
- pipeline = pipelines[voice[0]]
48
  for _, ps, _ in pipeline(text, voice):
49
  return ps
50
- return ''
51
 
52
- def generate_all(text, voice='af_heart', speed=1, use_gpu=CUDA_AVAILABLE):
53
- text = text if CHAR_LIMIT is None else text.strip()[:CHAR_LIMIT]
54
- pipeline = pipelines[voice[0]]
55
  pack = pipeline.load_voice(voice)
56
- use_gpu = use_gpu and CUDA_AVAILABLE
57
  for _, ps, _ in pipeline(text, voice, speed):
58
- ref_s = pack[len(ps)-1]
59
- try:
60
- if use_gpu:
61
- audio = forward_gpu(ps, ref_s, speed)
62
- else:
63
- audio = models[False](ps, ref_s, speed)
64
- except gr.exceptions.Error as e:
65
- if use_gpu:
66
- gr.Warning(str(e))
67
- gr.Info('Switching to CPU')
68
- audio = models[False](ps, ref_s, speed)
69
- else:
70
- raise gr.Error(e)
71
  yield 24000, audio.numpy()
72
 
73
- CHOICES = {
74
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Heart โค๏ธ': 'af_heart',
75
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Bella ๐Ÿ”ฅ': 'af_bella',
76
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Nicole ๐ŸŽง': 'af_nicole',
77
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Aoede': 'af_aoede',
78
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Kore': 'af_kore',
79
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Sarah': 'af_sarah',
80
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Nova': 'af_nova',
81
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Sky': 'af_sky',
82
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Alloy': 'af_alloy',
83
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ Jessica': 'af_jessica',
84
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšบ River': 'af_river',
85
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Michael': 'am_michael',
86
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Fenrir': 'am_fenrir',
87
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Puck': 'am_puck',
88
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Echo': 'am_echo',
89
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Eric': 'am_eric',
90
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Liam': 'am_liam',
91
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Onyx': 'am_onyx',
92
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Santa': 'am_santa',
93
- '๐Ÿ‡บ๐Ÿ‡ธ ๐Ÿšน Adam': 'am_adam',
94
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Emma': 'bf_emma',
95
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Isabella': 'bf_isabella',
96
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Alice': 'bf_alice',
97
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšบ Lily': 'bf_lily',
98
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน George': 'bm_george',
99
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Fable': 'bm_fable',
100
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Lewis': 'bm_lewis',
101
- '๐Ÿ‡ฌ๐Ÿ‡ง ๐Ÿšน Daniel': 'bm_daniel',
102
- }
103
-
104
- for v in CHOICES.values():
105
- pipelines[v[0]].load_voice(v)
106
-
107
- TOKEN_NOTE = '''
108
- ๐Ÿ’ก Customize pronunciation with Markdown link syntax and /slashes/ like `[Kokoro](/kหˆOkษ™ษนO/)`
109
- ๐Ÿ’ฌ To adjust intonation, try punctuation `;:,.!?โ€”โ€ฆ"()โ€œโ€` or stress `หˆ` and `หŒ`
110
- โฌ‡๏ธ Lower stress `[1 level](-1)` or `[2 levels](-2)`
111
- โฌ†๏ธ Raise stress 1 level `[or](+2)` 2 levels (only works on less stressed, usually short words)
112
- '''
113
-
114
- with gr.Blocks() as generate_tab:
115
- out_audio = gr.Audio(label='Output Audio', interactive=False, streaming=False, autoplay=True)
116
- generate_btn = gr.Button('Generate', variant='primary')
117
- with gr.Accordion('Output Tokens', open=True):
118
- out_ps = gr.Textbox(interactive=False, show_label=False, info='Tokens used to generate the audio, up to 510 context length.')
119
- tokenize_btn = gr.Button('Tokenize', variant='secondary')
120
- gr.Markdown(TOKEN_NOTE)
121
- predict_btn = gr.Button('Predict', variant='secondary', visible=False)
122
-
123
- STREAM_NOTE = ['โš ๏ธ There is an unknown Gradio bug that might yield no audio the first time you click `Stream`.']
124
- if CHAR_LIMIT is not None:
125
- STREAM_NOTE.append(f'โœ‚๏ธ Each stream is capped at {CHAR_LIMIT} characters.')
126
- STREAM_NOTE.append('๐Ÿš€ Want more characters? You can [use Kokoro directly](https://huggingface.co/hexgrad/Kokoro-82M#usage) or duplicate this space:')
127
- STREAM_NOTE = '\n\n'.join(STREAM_NOTE)
128
 
129
- with gr.Blocks() as stream_tab:
130
- out_stream = gr.Audio(label='Output Audio Stream', interactive=False, streaming=True, autoplay=True)
131
- with gr.Row():
132
- stream_btn = gr.Button('Stream', variant='primary')
133
- stop_btn = gr.Button('Stop', variant='stop')
134
- with gr.Accordion('Note', open=True):
135
- gr.Markdown(STREAM_NOTE)
136
- gr.DuplicateButton()
137
-
138
- BANNER_TEXT = '''
139
- [***Kokoro*** **is an open-weight TTS model with 82 million parameters.**](https://huggingface.co/hexgrad/Kokoro-82M)
140
- As of January 31st, 2025, Kokoro was the most-liked [**TTS model**](https://huggingface.co/models?pipeline_tag=text-to-speech&sort=likes) and the most-liked [**TTS space**](https://huggingface.co/spaces?sort=likes&search=tts) on Hugging Face.
141
- This demo only showcases English, but you can directly use the model to access other languages.
142
- '''
143
- API_OPEN = os.getenv('SPACE_ID') != 'hexgrad/Kokoro-TTS'
144
- API_NAME = None if API_OPEN else False
145
  with gr.Blocks() as app:
146
  with gr.Row():
147
- gr.Markdown(BANNER_TEXT, container=True)
148
- with gr.Row():
149
- with gr.Column():
150
- text = gr.Textbox(label='Input Text', info=f"Up to ~500 characters per Generate, or {'โˆž' if CHAR_LIMIT is None else CHAR_LIMIT} characters per Stream")
151
- with gr.Row():
152
- voice = gr.Dropdown(list(CHOICES.items()), value='af_heart', label='Voice', info='Quality and availability vary by language')
153
- use_gpu = gr.Dropdown(
154
- [('ZeroGPU ๐Ÿš€', True), ('CPU ๐ŸŒ', False)],
155
- value=CUDA_AVAILABLE,
156
- label='Hardware',
157
- info='GPU is usually faster, but has a usage quota',
158
- interactive=CUDA_AVAILABLE
159
- )
160
- speed = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label='Speed')
161
- with gr.Column():
162
- gr.TabbedInterface([generate_tab, stream_tab], ['Generate', 'Stream'])
163
- generate_btn.click(fn=generate_first, inputs=[text, voice, speed, use_gpu], outputs=[out_audio, out_ps], api_name=API_NAME)
164
- tokenize_btn.click(fn=tokenize_first, inputs=[text, voice], outputs=[out_ps], api_name=API_NAME)
165
- stream_event = stream_btn.click(fn=generate_all, inputs=[text, voice, speed, use_gpu], outputs=[out_stream], api_name=API_NAME)
166
- stop_btn.click(fn=None, cancels=stream_event)
167
- predict_btn.click(fn=predict, inputs=[text, voice, speed], outputs=[out_audio], api_name=API_NAME)
168
-
169
- if __name__ == '__main__':
170
- app.queue(api_open=API_OPEN).launch(show_api=API_OPEN, ssr_mode=True)
 
1
  import spaces
2
  from kokoro import KModel, KPipeline
3
  import gradio as gr
 
 
4
 
5
+ CHAR_LIMIT = 5000
 
6
 
7
+ MODEL = KModel().eval() # always cpu
8
+ PIPELINES = {lang: KPipeline(lang_code=lang, model=False) for lang in "ab"}
9
+ PIPELINES["a"].g2p.lexicon.golds["kokoro"] = "kหˆOkษ™ษนO"
10
+ PIPELINES["b"].g2p.lexicon.golds["kokoro"] = "kหˆQkษ™ษนQ"
 
11
 
12
+ def generate_first(text, voice="af_heart", speed=1):
13
+ text = text.strip()[:CHAR_LIMIT]
14
+ pipeline = PIPELINES[voice[0]]
 
 
 
 
15
  pack = pipeline.load_voice(voice)
 
16
  for _, ps, _ in pipeline(text, voice, speed):
17
+ ref_s = pack[len(ps) - 1]
18
+ audio = MODEL(ps, ref_s, speed)
 
 
 
 
 
 
 
 
 
 
 
19
  return (24000, audio.numpy()), ps
20
+ return None, ""
21
 
22
+ def predict(text, voice="af_heart", speed=1):
23
+ return generate_first(text, voice, speed)[0]
 
24
 
25
+ def tokenize_first(text, voice="af_heart"):
26
+ pipeline = PIPELINES[voice[0]]
27
  for _, ps, _ in pipeline(text, voice):
28
  return ps
29
+ return ""
30
 
31
+ def generate_all(text, voice="af_heart", speed=1):
32
+ text = text.strip()[:CHAR_LIMIT]
33
+ pipeline = PIPELINES[voice[0]]
34
  pack = pipeline.load_voice(voice)
 
35
  for _, ps, _ in pipeline(text, voice, speed):
36
+ ref_s = pack[len(ps) - 1]
37
+ audio = MODEL(ps, ref_s, speed)
 
 
 
 
 
 
 
 
 
 
 
38
  yield 24000, audio.numpy()
39
 
40
+ @spaces.GPU()
41
+ def gpu():
42
+ return
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  with gr.Blocks() as app:
45
  with gr.Row():
46
+ text_input = gr.Textbox(label="input text")
47
+ voice_input = gr.Textbox(label="voice", value="af_heart")
48
+ speed_input = gr.Slider(minimum=0.5, maximum=2, value=1, step=0.1, label="speed")
49
+ out_audio = gr.Audio(label="output audio", interactive=False, autoplay=True)
50
+ out_tokens = gr.Textbox(label="tokens", interactive=False)
51
+ gen_btn = gr.Button("generate")
52
+ token_btn = gr.Button("tokenize")
53
+ gen_btn.click(fn=generate_first, inputs=[text_input, voice_input, speed_input], outputs=[out_audio, out_tokens])
54
+ token_btn.click(fn=tokenize_first, inputs=[text_input, voice_input], outputs=out_tokens)
55
+
56
+ if __name__ == "__main__":
57
+ app.launch()