File size: 20,325 Bytes
4389dd9
09696be
4509628
dac2ec6
c2dd837
 
 
 
 
dac2ec6
4389dd9
c2dd837
 
 
 
 
 
 
 
 
 
 
 
 
f55110a
ab66837
19f514f
d0bce20
b2ae370
 
dac2ec6
 
 
c2dd837
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
dac2ec6
 
 
 
 
 
 
 
 
e1c8796
09696be
 
 
 
 
 
 
 
 
 
 
33ccd4a
19f514f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
d0bce20
 
e1c8796
dac2ec6
 
e1c8796
ad69aa3
 
 
 
 
 
 
b0ba76a
ad69aa3
d41dc2a
 
09696be
 
 
 
b0ba76a
 
09696be
ad69aa3
b2ae370
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4389dd9
 
d0bce20
4f390ee
4389dd9
 
 
 
4f390ee
4389dd9
 
c2dd837
 
4f390ee
4389dd9
 
4bd8c7c
c2dd837
 
 
 
 
50a4cd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2dd837
50a4cd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
c2dd837
50a4cd9
 
c2dd837
ba73818
 
 
4f390ee
aa638de
 
 
 
 
6814695
 
aa638de
 
 
e1c8796
ad69aa3
4f390ee
 
 
6064589
dac2ec6
ceb045c
4509628
 
 
c2dd837
 
4509628
 
 
 
d476e13
dac2ec6
f1908d5
d476e13
ba73818
 
 
4389dd9
ad69aa3
 
 
 
 
d41dc2a
46625ac
ad69aa3
 
 
387abf0
d41dc2a
ad69aa3
 
 
46625ac
ad69aa3
5296cfd
d41dc2a
 
 
ab66837
ba73818
5296cfd
 
 
8a61e03
 
b2b5e7d
46625ac
8a61e03
 
 
 
3313aa0
d41dc2a
ad69aa3
ba73818
 
 
4389dd9
2226656
 
 
 
 
 
 
 
 
5753c7d
2226656
 
 
c84c493
b24c709
 
 
c84c493
b24c709
 
 
ab66837
2226656
 
7ec88b0
b24c709
7ec88b0
19f514f
 
33ccd4a
19f514f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4389dd9
 
 
7ec88b0
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
import gradio as gr
import langcodes
import torch
import uuid
import json
import librosa
import os
import tempfile
import soundfile as sf
import scipy.io.wavfile as wav

from transformers import pipeline, VitsModel, AutoTokenizer, set_seed
from huggingface_hub import InferenceClient
from langdetect import detect, DetectorFactory
from nemo.collections.asr.models import EncDecMultiTaskModel

# Constants
SAMPLE_RATE = 16000  # Hz

# load ASR model
canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
decode_cfg = canary_model.cfg.decoding
decode_cfg.beam.beam_size = 1
canary_model.change_decoding_strategy(decode_cfg)


client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
tts_model = VitsModel.from_pretrained("facebook/mms-tts-eng")
tts_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")

# Function to convert audio to text using ASR
def gen_text(audio_filepath, action, source_lang, target_lang):
    if audio_filepath is None:
        raise gr.Error("Please provide some input audio.")
    
    utt_id = uuid.uuid4()
    with tempfile.TemporaryDirectory() as tmpdir:
        # Convert to 16 kHz
        data, sr = librosa.load(audio_filepath, sr=None, mono=True)
        if sr != SAMPLE_RATE:
            data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
        converted_audio_filepath = os.path.join(tmpdir, f"{utt_id}.wav")
        sf.write(converted_audio_filepath, data, SAMPLE_RATE)

        # Transcribe audio
        duration = len(data) / SAMPLE_RATE
        manifest_data = {
            "audio_filepath": converted_audio_filepath,
            "taskname": action,
            "source_lang": source_lang,
            "target_lang": source_lang if action=="asr" else target_lang,
            "pnc": "no",
            "answer": "predict",
            "duration": str(duration),
        }
        manifest_filepath = os.path.join(tmpdir, f"{utt_id}.json")
        with open(manifest_filepath, 'w') as fout:
            fout.write(json.dumps(manifest_data))

        predicted_text = canary_model.transcribe(manifest_filepath)[0]
        # if duration < 40:
        #     predicted_text = canary_model.transcribe(manifest_filepath)[0]
        # else:
        #     predicted_text = get_buffered_pred_feat_multitaskAED(
        #         frame_asr,
        #         canary_model.cfg.preprocessor,
        #         model_stride_in_secs,
        #         canary_model.device,
        #         manifest=manifest_filepath,
        #     )[0].text
    
    return predicted_text

# Function to convert text to speech using TTS
def gen_translated_speech(text, lang):
    set_seed(555)  # Make it deterministic
    match lang:
        case "en":
             model = "facebook/mms-tts-eng"
        case "fr":
             model = "facebook/mms-tts-fra"
        case "de":
             model = "facebook/mms-tts-deu"
        case "es":
             model = "facebook/mms-tts-spa"
        case _:
            model = "facebook/mms-tts"
    
    # load TTS model
    tts_model = VitsModel.from_pretrained(model)
    tts_tokenizer = AutoTokenizer.from_pretrained(model)

    input_text = tts_tokenizer(text, return_tensors="pt")
    with torch.no_grad():
        outputs = tts_model(**input_text)
    waveform_np = outputs.waveform[0].cpu().numpy()
    output_file = f"{str(uuid.uuid4())}.wav"
    wav.write(output_file, rate=tts_model.config.sampling_rate, data=waveform_np)
    return output_file

# Root function for Gradio interface
def start_process(audio_filepath, source_lang, target_lang):
    transcription = gen_text(audio_filepath, "asr", source_lang, target_lang)
    print("Done transcribing")
    translation = gen_text(audio_filepath, "s2t_translation", source_lang, target_lang) 
    print("Done translation")
    audio_output_filepath = gen_translated_speech(translation, target_lang)
    print("Done speaking")   
    return transcription, translation, audio_output_filepath


def gen_speech(text):
    set_seed(555)  # Make it deterministic
    input_text = tts_tokenizer(text, return_tensors="pt")
    with torch.no_grad():
        outputs = tts_model(**input_text)
    waveform_np = outputs.waveform[0].cpu().numpy()
    output_file = f"{str(uuid.uuid4())}.wav"
    wav.write(output_file, rate=tts_model.config.sampling_rate, data=waveform_np)
    return output_file
    
def detect_language(text):
    DetectorFactory.seed = 0  # Ensure consistent results
    return detect(text)
    
def language_name_to_code(language_name):
    try:
        language = langcodes.find(language_name)
        return language.language
    except langcodes.LanguageTagError:
        return None

def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p,):
    messages = [{"role": "system", "content": system_message}]

    for val in history:
        if val[0]:
            messages.append({"role": "user", "content": val[0]})
        if val[1]:
            messages.append({"role": "assistant", "content": val[1]})

    messages.append({"role": "user", "content": message})

    response = ""

    for message in client.chat_completion(
        messages,
        max_tokens=max_tokens,
        stream=True,
        temperature=temperature,
        top_p=top_p,
    ):
        token = message.choices[0].delta.content

        response += token
        yield response


def launch_image_pipe(input):
    out = image_pipe(input)
    text = out[0]['generated_text']
    audio_output_filepath = gen_speech(text)
    return text, audio_output_filepath

def translate(input_text, source, target):
    try:
      model = f"Helsinki-NLP/opus-mt-{source}-{target}"
      pipe = pipeline("translation", model=model)
      translation = pipe(input_text)
      return translation[0]['translation_text'], ""
    except KeyError:
      return "", f"Error: Translation direction {source} to {target} is not supported by Helsinki Translation Models"

def summarize_translate(input_text, target_lang):
    output = summary_pipe(input_text)
    input_text_summary = output[0]['summary_text']
    # source = 'en'
    source = detect_language(input_text_summary)
    target = language_name_to_code(target_lang)
    print(f"source_detect:{source}, target_lang:{target_lang}, target_code:{target}")
    summary_translated = translate(input_text_summary, source, target)
    return input_text_summary, summary_translated[0]

def merge_tokens(tokens):
    merged_tokens = []
    for token in tokens:
        if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]):
            # If current token continues the entity of the last one, merge them
            last_token = merged_tokens[-1]
            last_token['word'] += token['word'].replace('##', '')
            last_token['end'] = token['end']
            last_token['score'] = (last_token['score'] + token['score']) / 2
        else:
            # Otherwise, add the token to the list
            merged_tokens.append(token)
    return merged_tokens

def ner(input):
    output = ner_pipe(input)
    merged_tokens = merge_tokens(output)
    return {"text": input, "entities": merged_tokens}
    
def create_playground_header():
    gr.Markdown("""
                # 🤗 Hugging Face Labs
                **Explore different LLM on Hugging Face platform. Just play and enjoy**
                """)

def create_playground_footer():
    gr.Markdown("""
                **To Learn More about 🤗 Hugging Face, [Click Here](https://huggingface.co/docs)**
                """)

# Create Gradio interface
playground = gr.Blocks()

with playground:
    create_playground_header()
    
    with gr.Tabs():
        ## ================================================================================================================================
        ## Speech Translator
        ## ================================================================================================================================
        with gr.TabItem("Speech Translator"):
            with gr.Row():
                gr.Markdown("""
                        ## Your AI Translate Assistant
                        ### Gets input audio from user, transcribe and translate it. Convert back to speech.
                        - category: [Automatic Speech Recognition](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition), model: [nvidia/canary-1b](https://huggingface.co/nvidia/canary-1b)
                        - category: [Text-to-Speech](https://huggingface.co/models?pipeline_tag=text-to-speech), model: [facebook/mms-tts](https://huggingface.co/facebook/mms-tts)
                        """)
            
            with gr.Row():
                with gr.Column():
                    source_lang = gr.Dropdown(
                        choices=["en", "de", "es", "fr"], value="en", label="Source Language"
                    )
                with gr.Column():
                    target_lang = gr.Dropdown(
                        choices=["en", "de", "es", "fr"], value="fr", label="Target Language"
                    )            
        
            with gr.Row():
                with gr.Column():
                    input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Input Audio")            
                with gr.Column():
                    translated_speech = gr.Audio(type="filepath", label="Generated Speech")            
        
            with gr.Row():
                with gr.Column():            
                    transcipted_text = gr.Textbox(label="Transcription")
                with gr.Column():            
                    translated_text = gr.Textbox(label="Translation")
        
            with gr.Row():
                with gr.Column():
                    submit_button = gr.Button(value="Start Process", variant="primary")
                with gr.Column():
                    clear_button = gr.ClearButton(components=[input_audio, source_lang, target_lang, transcipted_text, translated_text, translated_speech], value="Clear")    
        
            with gr.Row():
                gr.Examples(
                    examples=[
                        ["audio/sample_en.wav","en","fr"],
                        ["audio/sample_fr.wav","fr","de"],
                        ["audio/sample_de.wav","de","es"],
                        ["audio/sample_es.wav","es","en"]
                    ], 
                    inputs=[input_audio, source_lang, target_lang], 
                    outputs=[transcipted_text, translated_text, translated_speech], 
                    run_on_click=True, cache_examples=True, fn=start_process
                )
                    
            submit_button.click(start_process, inputs=[input_audio, source_lang, target_lang], outputs=[transcipted_text, translated_text, translated_speech])
        
        ## ================================================================================================================================
        ## Image Captioning
        ## ================================================================================================================================
        with gr.TabItem("Image"):
            with gr.Row():
                with gr.Column(scale=4):
                    gr.Markdown("""
                                ## Image Captioning
                                ### Upload a image, check what AI understand and have vision on it.
                                - category: Image-to-Text, model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)
                                - category: Text-to-Speech, model: [facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng)
                                """)
                with gr.Column(scale=1):
                    ITT_button = gr.Button(value="Start Process", variant="primary")

            with gr.Row():
                with gr.Column():
                    img = gr.Image(type='pil')
                with gr.Column():
                    generated_textbox = gr.Textbox(lines=2, placeholder="", label="Generated Text")
                    audio_output = gr.Audio(type="filepath", label="Generated Speech")
                    ITT_Clear_button = gr.ClearButton(components=[img, generated_textbox, audio_output], value="Clear")
                    
                    gr.Examples(
                        examples=[
                            ["image/lion-dog-costume.jpg"], 
                            ["image/dog-halloween.jpeg"]
                        ], 
                        inputs=[img], 
                        outputs=[generated_textbox, audio_output], 
                        run_on_click=True, cache_examples=True, fn=launch_image_pipe)
                        
            ITT_button.click(launch_image_pipe, inputs=[img], outputs=[generated_textbox, audio_output])
            # generate_audio_button.click(generate_audio, inputs=[generated_textbox], outputs=[audio_output])
        
        ## ================================================================================================================================
        ## Text Summarization and Translation
        ## ================================================================================================================================
        with gr.TabItem("Text"):
            with gr.Row():
                with gr.Column(scale=4):
                    gr.Markdown("""
                                ## Text Summarization and Translation
                                ### Summarize the paragraph and translate it into other language.
                                - pipeline: summarization, model: [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6)
                                - pipeline: translation, model: [Helsinki-NLP/opus-mt-en-{target](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr)
                                """)
                    
                with gr.Column(scale=1):
                    text_pipeline_button = gr.Button(value="Start Process", variant="primary")
                    

            with gr.Row():
                with gr.Column():
                    source_text = gr.Textbox(label="Text to summarize", lines=18)                    
                with gr.Column():
                    summary_textoutput = gr.Textbox(lines=3, placeholder="", label="Text Summarization")
                    target_language_dropdown = gr.Dropdown( choices=["Chinese", "French", "Spanish"],
                                                            value="Chinese",
                                                            label="Translate to Language")
                    translated_textbox = gr.Textbox(lines=3, placeholder="", label="Translated Result")
                    Text_Clear_button = gr.ClearButton(components=[source_text, summary_textoutput, translated_textbox], value="Clear")
            
            with gr.Row():
                with gr.Column():
                    gr.Examples(
                        examples=[
                            ["The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.","French"], 
                            ["Tower Bridge is a Grade I listed combined bascule, suspension, and, until 1960, cantilever bridge in London, built between 1886 and 1894, designed by Horace Jones and engineered by John Wolfe Barry with the help of Henry Marc Brunel. It crosses the River Thames close to the Tower of London and is one of five London bridges owned and maintained by the City Bridge Foundation, a charitable trust founded in 1282. The bridge was constructed to connect the 39 per cent of London's population that lived east of London Bridge, while allowing shipping to access the Pool of London between the Tower of London and London Bridge. The bridge was opened by Edward, Prince of Wales and Alexandra, Princess of Wales on 30 June 1894.","Chinese"]
                        ], 
                        inputs=[source_text, target_language_dropdown], 
                        outputs=[summary_textoutput, translated_textbox], 
                        run_on_click=True, cache_examples=True, fn=summarize_translate)
 
            text_pipeline_button.click(summarize_translate, inputs=[source_text, target_language_dropdown], outputs=[summary_textoutput, translated_textbox])
            
        ## ================================================================================================================================
        ## Find entities
        ## ================================================================================================================================
        with gr.TabItem("Name Entity"):
            with gr.Row():
                with gr.Column(scale=4):
                    gr.Markdown("""
                                ## Find entities
                                ### Entities involved Name, Organization, and Location.
                                > pipeline: ner, model: [dslim/bert-base-NER](https://huggingface.co/dslim/bert-base-NER)
                                """)
                    
                with gr.Column(scale=1):
                    ner_pipeline_button = gr.Button(value="Start Process", variant="primary")
                    
            with gr.Row():
                with gr.Column():
                    ner_text_input = gr.Textbox(label="Text to find entities", lines=5)
                    
                with gr.Column():
                    ner_text_output = gr.HighlightedText(label="Text with entities")
                    Ner_Clear_button = gr.ClearButton(components=[ner_text_input, ner_text_output], value="Clear")
 
            with gr.Row():
                with gr.Column():
                    gr.Examples(examples=[
                        "My name is Ray, I'm learning through Hugging Face and DeepLearning.AI and I live in Caversham, Reading", 
                        "My name is Raymond, I work at A&O IT Group"
                    ], inputs=[ner_text_input], outputs=[ner_text_output], run_on_click=True, cache_examples=True, fn=ner)
 
            ner_pipeline_button.click(ner, inputs=[ner_text_input], outputs=[ner_text_output])

        ## ================================================================================================================================
        ## Chatbot
        ## ================================================================================================================================
        with gr.TabItem("Chatbot"):
            gr.ChatInterface(
                respond,
                additional_inputs=[
                    gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
                    gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
                    gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
                    gr.Slider(
                        minimum=0.1,
                        maximum=1.0,
                        value=0.95,
                        step=0.05,
                        label="Top-p (nucleus sampling)",
                    ),
                ],
            )

            
    create_playground_footer()

playground.launch()