rayl-aoit's picture
Update app.py
4bd8c7c verified
import gradio as gr
import langcodes
import torch
import uuid
import json
import librosa
import os
import tempfile
import soundfile as sf
import scipy.io.wavfile as wav
from transformers import pipeline, VitsModel, AutoTokenizer, set_seed
from huggingface_hub import InferenceClient
from langdetect import detect, DetectorFactory
from nemo.collections.asr.models import EncDecMultiTaskModel
# Constants
SAMPLE_RATE = 16000 # Hz
# load ASR model
canary_model = EncDecMultiTaskModel.from_pretrained('nvidia/canary-1b')
decode_cfg = canary_model.cfg.decoding
decode_cfg.beam.beam_size = 1
canary_model.change_decoding_strategy(decode_cfg)
client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
image_pipe = pipeline("image-to-text", model="Salesforce/blip-image-captioning-base")
summary_pipe = pipeline("summarization", model="sshleifer/distilbart-cnn-12-6")
ner_pipe = pipeline("ner", model="dslim/bert-base-NER")
tts_model = VitsModel.from_pretrained("facebook/mms-tts-eng")
tts_tokenizer = AutoTokenizer.from_pretrained("facebook/mms-tts-eng")
# Function to convert audio to text using ASR
def gen_text(audio_filepath, action, source_lang, target_lang):
if audio_filepath is None:
raise gr.Error("Please provide some input audio.")
utt_id = uuid.uuid4()
with tempfile.TemporaryDirectory() as tmpdir:
# Convert to 16 kHz
data, sr = librosa.load(audio_filepath, sr=None, mono=True)
if sr != SAMPLE_RATE:
data = librosa.resample(data, orig_sr=sr, target_sr=SAMPLE_RATE)
converted_audio_filepath = os.path.join(tmpdir, f"{utt_id}.wav")
sf.write(converted_audio_filepath, data, SAMPLE_RATE)
# Transcribe audio
duration = len(data) / SAMPLE_RATE
manifest_data = {
"audio_filepath": converted_audio_filepath,
"taskname": action,
"source_lang": source_lang,
"target_lang": source_lang if action=="asr" else target_lang,
"pnc": "no",
"answer": "predict",
"duration": str(duration),
}
manifest_filepath = os.path.join(tmpdir, f"{utt_id}.json")
with open(manifest_filepath, 'w') as fout:
fout.write(json.dumps(manifest_data))
predicted_text = canary_model.transcribe(manifest_filepath)[0]
# if duration < 40:
# predicted_text = canary_model.transcribe(manifest_filepath)[0]
# else:
# predicted_text = get_buffered_pred_feat_multitaskAED(
# frame_asr,
# canary_model.cfg.preprocessor,
# model_stride_in_secs,
# canary_model.device,
# manifest=manifest_filepath,
# )[0].text
return predicted_text
# Function to convert text to speech using TTS
def gen_translated_speech(text, lang):
set_seed(555) # Make it deterministic
match lang:
case "en":
model = "facebook/mms-tts-eng"
case "fr":
model = "facebook/mms-tts-fra"
case "de":
model = "facebook/mms-tts-deu"
case "es":
model = "facebook/mms-tts-spa"
case _:
model = "facebook/mms-tts"
# load TTS model
tts_model = VitsModel.from_pretrained(model)
tts_tokenizer = AutoTokenizer.from_pretrained(model)
input_text = tts_tokenizer(text, return_tensors="pt")
with torch.no_grad():
outputs = tts_model(**input_text)
waveform_np = outputs.waveform[0].cpu().numpy()
output_file = f"{str(uuid.uuid4())}.wav"
wav.write(output_file, rate=tts_model.config.sampling_rate, data=waveform_np)
return output_file
# Root function for Gradio interface
def start_process(audio_filepath, source_lang, target_lang):
transcription = gen_text(audio_filepath, "asr", source_lang, target_lang)
print("Done transcribing")
translation = gen_text(audio_filepath, "s2t_translation", source_lang, target_lang)
print("Done translation")
audio_output_filepath = gen_translated_speech(translation, target_lang)
print("Done speaking")
return transcription, translation, audio_output_filepath
def gen_speech(text):
set_seed(555) # Make it deterministic
input_text = tts_tokenizer(text, return_tensors="pt")
with torch.no_grad():
outputs = tts_model(**input_text)
waveform_np = outputs.waveform[0].cpu().numpy()
output_file = f"{str(uuid.uuid4())}.wav"
wav.write(output_file, rate=tts_model.config.sampling_rate, data=waveform_np)
return output_file
def detect_language(text):
DetectorFactory.seed = 0 # Ensure consistent results
return detect(text)
def language_name_to_code(language_name):
try:
language = langcodes.find(language_name)
return language.language
except langcodes.LanguageTagError:
return None
def respond(message, history: list[tuple[str, str]], system_message, max_tokens, temperature, top_p,):
messages = [{"role": "system", "content": system_message}]
for val in history:
if val[0]:
messages.append({"role": "user", "content": val[0]})
if val[1]:
messages.append({"role": "assistant", "content": val[1]})
messages.append({"role": "user", "content": message})
response = ""
for message in client.chat_completion(
messages,
max_tokens=max_tokens,
stream=True,
temperature=temperature,
top_p=top_p,
):
token = message.choices[0].delta.content
response += token
yield response
def launch_image_pipe(input):
out = image_pipe(input)
text = out[0]['generated_text']
audio_output_filepath = gen_speech(text)
return text, audio_output_filepath
def translate(input_text, source, target):
try:
model = f"Helsinki-NLP/opus-mt-{source}-{target}"
pipe = pipeline("translation", model=model)
translation = pipe(input_text)
return translation[0]['translation_text'], ""
except KeyError:
return "", f"Error: Translation direction {source} to {target} is not supported by Helsinki Translation Models"
def summarize_translate(input_text, target_lang):
output = summary_pipe(input_text)
input_text_summary = output[0]['summary_text']
# source = 'en'
source = detect_language(input_text_summary)
target = language_name_to_code(target_lang)
print(f"source_detect:{source}, target_lang:{target_lang}, target_code:{target}")
summary_translated = translate(input_text_summary, source, target)
return input_text_summary, summary_translated[0]
def merge_tokens(tokens):
merged_tokens = []
for token in tokens:
if merged_tokens and token['entity'].startswith('I-') and merged_tokens[-1]['entity'].endswith(token['entity'][2:]):
# If current token continues the entity of the last one, merge them
last_token = merged_tokens[-1]
last_token['word'] += token['word'].replace('##', '')
last_token['end'] = token['end']
last_token['score'] = (last_token['score'] + token['score']) / 2
else:
# Otherwise, add the token to the list
merged_tokens.append(token)
return merged_tokens
def ner(input):
output = ner_pipe(input)
merged_tokens = merge_tokens(output)
return {"text": input, "entities": merged_tokens}
def create_playground_header():
gr.Markdown("""
# 🤗 Hugging Face Labs
**Explore different LLM on Hugging Face platform. Just play and enjoy**
""")
def create_playground_footer():
gr.Markdown("""
**To Learn More about 🤗 Hugging Face, [Click Here](https://huggingface.co/docs)**
""")
# Create Gradio interface
playground = gr.Blocks()
with playground:
create_playground_header()
with gr.Tabs():
## ================================================================================================================================
## Speech Translator
## ================================================================================================================================
with gr.TabItem("Speech Translator"):
with gr.Row():
gr.Markdown("""
## Your AI Translate Assistant
### Gets input audio from user, transcribe and translate it. Convert back to speech.
- category: [Automatic Speech Recognition](https://huggingface.co/models?pipeline_tag=automatic-speech-recognition), model: [nvidia/canary-1b](https://huggingface.co/nvidia/canary-1b)
- category: [Text-to-Speech](https://huggingface.co/models?pipeline_tag=text-to-speech), model: [facebook/mms-tts](https://huggingface.co/facebook/mms-tts)
""")
with gr.Row():
with gr.Column():
source_lang = gr.Dropdown(
choices=["en", "de", "es", "fr"], value="en", label="Source Language"
)
with gr.Column():
target_lang = gr.Dropdown(
choices=["en", "de", "es", "fr"], value="fr", label="Target Language"
)
with gr.Row():
with gr.Column():
input_audio = gr.Audio(sources=["microphone"], type="filepath", label="Input Audio")
with gr.Column():
translated_speech = gr.Audio(type="filepath", label="Generated Speech")
with gr.Row():
with gr.Column():
transcipted_text = gr.Textbox(label="Transcription")
with gr.Column():
translated_text = gr.Textbox(label="Translation")
with gr.Row():
with gr.Column():
submit_button = gr.Button(value="Start Process", variant="primary")
with gr.Column():
clear_button = gr.ClearButton(components=[input_audio, source_lang, target_lang, transcipted_text, translated_text, translated_speech], value="Clear")
with gr.Row():
gr.Examples(
examples=[
["audio/sample_en.wav","en","fr"],
["audio/sample_fr.wav","fr","de"],
["audio/sample_de.wav","de","es"],
["audio/sample_es.wav","es","en"]
],
inputs=[input_audio, source_lang, target_lang],
outputs=[transcipted_text, translated_text, translated_speech],
run_on_click=True, cache_examples=True, fn=start_process
)
submit_button.click(start_process, inputs=[input_audio, source_lang, target_lang], outputs=[transcipted_text, translated_text, translated_speech])
## ================================================================================================================================
## Image Captioning
## ================================================================================================================================
with gr.TabItem("Image"):
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("""
## Image Captioning
### Upload a image, check what AI understand and have vision on it.
- category: Image-to-Text, model: [Salesforce/blip-image-captioning-base](https://huggingface.co/Salesforce/blip-image-captioning-base)
- category: Text-to-Speech, model: [facebook/mms-tts-eng](https://huggingface.co/facebook/mms-tts-eng)
""")
with gr.Column(scale=1):
ITT_button = gr.Button(value="Start Process", variant="primary")
with gr.Row():
with gr.Column():
img = gr.Image(type='pil')
with gr.Column():
generated_textbox = gr.Textbox(lines=2, placeholder="", label="Generated Text")
audio_output = gr.Audio(type="filepath", label="Generated Speech")
ITT_Clear_button = gr.ClearButton(components=[img, generated_textbox, audio_output], value="Clear")
gr.Examples(
examples=[
["image/lion-dog-costume.jpg"],
["image/dog-halloween.jpeg"]
],
inputs=[img],
outputs=[generated_textbox, audio_output],
run_on_click=True, cache_examples=True, fn=launch_image_pipe)
ITT_button.click(launch_image_pipe, inputs=[img], outputs=[generated_textbox, audio_output])
# generate_audio_button.click(generate_audio, inputs=[generated_textbox], outputs=[audio_output])
## ================================================================================================================================
## Text Summarization and Translation
## ================================================================================================================================
with gr.TabItem("Text"):
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("""
## Text Summarization and Translation
### Summarize the paragraph and translate it into other language.
- pipeline: summarization, model: [sshleifer/distilbart-cnn-12-6](https://huggingface.co/sshleifer/distilbart-cnn-12-6)
- pipeline: translation, model: [Helsinki-NLP/opus-mt-en-{target](https://huggingface.co/Helsinki-NLP/opus-mt-en-fr)
""")
with gr.Column(scale=1):
text_pipeline_button = gr.Button(value="Start Process", variant="primary")
with gr.Row():
with gr.Column():
source_text = gr.Textbox(label="Text to summarize", lines=18)
with gr.Column():
summary_textoutput = gr.Textbox(lines=3, placeholder="", label="Text Summarization")
target_language_dropdown = gr.Dropdown( choices=["Chinese", "French", "Spanish"],
value="Chinese",
label="Translate to Language")
translated_textbox = gr.Textbox(lines=3, placeholder="", label="Translated Result")
Text_Clear_button = gr.ClearButton(components=[source_text, summary_textoutput, translated_textbox], value="Clear")
with gr.Row():
with gr.Column():
gr.Examples(
examples=[
["The tower is 324 metres (1,063 ft) tall, about the same height as an 81-storey building, and the tallest structure in Paris. Its base is square, measuring 125 metres (410 ft) on each side. During its construction, the Eiffel Tower surpassed the Washington Monument to become the tallest man-made structure in the world, a title it held for 41 years until the Chrysler Building in New York City was finished in 1930. It was the first structure to reach a height of 300 metres. Due to the addition of a broadcasting aerial at the top of the tower in 1957, it is now taller than the Chrysler Building by 5.2 metres (17 ft). Excluding transmitters, the Eiffel Tower is the second tallest free-standing structure in France after the Millau Viaduct.","French"],
["Tower Bridge is a Grade I listed combined bascule, suspension, and, until 1960, cantilever bridge in London, built between 1886 and 1894, designed by Horace Jones and engineered by John Wolfe Barry with the help of Henry Marc Brunel. It crosses the River Thames close to the Tower of London and is one of five London bridges owned and maintained by the City Bridge Foundation, a charitable trust founded in 1282. The bridge was constructed to connect the 39 per cent of London's population that lived east of London Bridge, while allowing shipping to access the Pool of London between the Tower of London and London Bridge. The bridge was opened by Edward, Prince of Wales and Alexandra, Princess of Wales on 30 June 1894.","Chinese"]
],
inputs=[source_text, target_language_dropdown],
outputs=[summary_textoutput, translated_textbox],
run_on_click=True, cache_examples=True, fn=summarize_translate)
text_pipeline_button.click(summarize_translate, inputs=[source_text, target_language_dropdown], outputs=[summary_textoutput, translated_textbox])
## ================================================================================================================================
## Find entities
## ================================================================================================================================
with gr.TabItem("Name Entity"):
with gr.Row():
with gr.Column(scale=4):
gr.Markdown("""
## Find entities
### Entities involved Name, Organization, and Location.
> pipeline: ner, model: [dslim/bert-base-NER](https://huggingface.co/dslim/bert-base-NER)
""")
with gr.Column(scale=1):
ner_pipeline_button = gr.Button(value="Start Process", variant="primary")
with gr.Row():
with gr.Column():
ner_text_input = gr.Textbox(label="Text to find entities", lines=5)
with gr.Column():
ner_text_output = gr.HighlightedText(label="Text with entities")
Ner_Clear_button = gr.ClearButton(components=[ner_text_input, ner_text_output], value="Clear")
with gr.Row():
with gr.Column():
gr.Examples(examples=[
"My name is Ray, I'm learning through Hugging Face and DeepLearning.AI and I live in Caversham, Reading",
"My name is Raymond, I work at A&O IT Group"
], inputs=[ner_text_input], outputs=[ner_text_output], run_on_click=True, cache_examples=True, fn=ner)
ner_pipeline_button.click(ner, inputs=[ner_text_input], outputs=[ner_text_output])
## ================================================================================================================================
## Chatbot
## ================================================================================================================================
with gr.TabItem("Chatbot"):
gr.ChatInterface(
respond,
additional_inputs=[
gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
gr.Slider(
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
label="Top-p (nucleus sampling)",
),
],
)
create_playground_footer()
playground.launch()