nithinraok's picture
Update app.py
e68b9eb
raw
history blame
1.8 kB
import nemo
from nemo.collections.asr.models.msdd_models import NeuralDiarizer
import gradio as gr
import pandas as pd
import torch
device = "cuda" if torch.cuda.is_available() else "cpu"
model = NeuralDiarizer.from_pretrained("diar_msdd_telephonic").to(device)
def run_diarization(path1):
annotation = model(path1)
rttm=annotation.to_rttm()
df = pd.DataFrame(columns=['start_time', 'end_time', 'speaker'])
for idx,line in enumerate(rttm.splitlines()):
split = line.split()
start_time, duration, speaker = split[3], split[4], split[7]
end_time = float(start_time) + float(duration)
df.loc[idx] = start_time, end_time, speaker
return df
inputs = [
gr.components.Audio(source="microphone", type="filepath", optional=True, label="Input Audio"),
]
output = gr.components.Dataframe()
description = (
"This demonstration will perform offline speaker diarization on an audio file using nemo"
)
article = (
"<p style='text-align: center'>"
"<a href='https://catalog.ngc.nvidia.com/orgs/nvidia/teams/nemo/models/diar_msdd_telephonic' target='_blank'>πŸŽ™οΈ Learn more about MSDD model</a> | "
"<a href='https://arxiv.org/abs/2203.15974' target='_blank'>πŸ“š MSDD paper</a> | "
"<a href='https://github.com/NVIDIA/NeMo' target='_blank'>πŸ§‘β€πŸ’» Repository</a>"
"</p>"
)
examples = [
["data/sample_interview_conversation.wav"],
["data/id10270_5r0dWxy17C8-00001.wav"],
]
interface = gr.Interface(
fn=run_diarization,
inputs=inputs,
outputs=output,
title="Offline Speaker Diarization with NeMo",
description=description,
article=article,
layout="horizontal",
theme="huggingface",
allow_flagging=False,
live=False,
examples=examples,
)
interface.launch(enable_queue=True)