Soorani_ASR / app.py
imansarraf's picture
Update app.py
abfa321 verified
import gradio as gr
from iman.sad_tfpy10 import *
import torch
from transformers import AutoProcessor, AutoModelForCTC
processor = AutoProcessor.from_pretrained("Akashpb13/Central_kurdish_xlsr")
model = AutoModelForCTC.from_pretrained("Akashpb13/Central_kurdish_xlsr")
import soundfile as sf
css = """
textarea { direction: rtl; text-align: right; font-family: Calibri, sans-serif; font-size: 16px;}
"""
seg = Segmenter(ffmpeg_path="ffmpeg",model_path="keras_speech_music_noise_cnn.hdf5" , device="cpu",vad_type="vad")
def process_segment(args):
segment, wav = args
start, stop = segment
# pp = converter((start, stop))
pp = wav[int(start*16000) : int(stop*16000)]
input_values =processor(pp, sampling_rate=16000 , return_tensors="pt").input_values
with torch.no_grad():
logits=model(input_values).logits
predicted_ids = torch.argmax(logits, dim=-1)
transcription = processor.batch_decode(predicted_ids)[0]
return start, stop, transcription
def pcm_to_flac(pcm_data, sample_rate=16000):
buffer = io.BytesIO()
sf.write(buffer, pcm_data, sample_rate, format='FLAC')
flac_data = buffer.getvalue()
return flac_data
def transcribe_audio(audio_file):
text=""
isig,wav = seg(audio_file)
isig = filter_output(isig , max_silence=0.5 ,ignore_small_speech_segments=0.1 , max_speech_len=15 ,split_speech_bigger_than=20)
isig = [(a,b) for x,a,b,_,_ in isig]
print(isig)
results=[]
for segment in isig:
results.append (process_segment((segment, wav)))
for start, stop, tr_beamsearch_lm in results:
try:
text += ' ' + tr_beamsearch_lm + '\r\n'
print(start)
print(stop)
print(text)
except:
pass
return text
# Define the Gradio interface
interface = gr.Interface(
fn=transcribe_audio,
inputs=gr.Audio(type="filepath"),
outputs=gr.Textbox(label="Transcription", elem_id="output-text",interactive=True),
title="Soorani Audio Transcription",
description="Upload an audio file or record audio to get the transcription.",
css=css
)
# Launch the Gradio app
interface.launch()