conformer-asr / app.py
RamAnanth1's picture
Update app.py
8e29f49
raw
history blame
6.45 kB
import gradio as gr
import os
import json
import requests
import time
# AssemblyAI transcript endpoint (where we submit the file)
transcript_endpoint = "https://api.assemblyai.com/v2/transcript"
upload_endpoint = "https://api.assemblyai.com/v2/upload"
headers={
"Authorization": os.environ["ASSEMBLYAI_KEY"],
"Content-Type": "application/json"
}
# Helper function to upload data
def _read_file(filename, chunk_size=5242880):
with open(filename, "rb") as f:
while True:
data = f.read(chunk_size)
if not data:
break
yield data
def get_transcript_url(url, audio_intelligence_options):
# JSON that tells the API which file to trancsribe
json={
# URL of the audio file to process
"audio_url": url,
# Turn on speaker labels
"speaker_labels": True,
# Turn on cusom vocabulary
"word_boost": ["assembly ai"],
# Turn on custom spelling
"custom_spelling": [
{"from": ["assembly AI"], "to": "AssemblyAI"},
{"from": ["assembly AI's"], "to": "AssemblyAI's"}
],
# Turn on PII Redaction and specify policies
"redact_pii": True,
"redact_pii_policies": ["drug", "injury", "person_name"],
"redact_pii_audio": True,
# Turn on Auto Highlights
"auto_highlights": True,
# Turn on Content Moderation
"content_safety": True,
# Turn on Topic Detection
"iab_categories": True,
# Turn on Sentiment Analysis
"sentiment_analysis": True,
# Turn on Summarization and specify configuration
"summarization": True,
"summary_model": "informative",
"summary_type": "bullets",
# Turn on Entity Detection
"entity_detection": True,}
response = requests.post(
transcript_endpoint,
json=json,
headers=headers # Authorization to link this transcription with your account
)
polling_endpoint = f"https://api.assemblyai.com/v2/transcript/{response.json()['id']}"
while True:
transcription_result = requests.get(polling_endpoint, headers=headers).json()
if transcription_result['status'] == 'completed':
break
elif transcription_result['status'] == 'error':
raise RuntimeError(f"Transcription failed: {transcription_result['error']}")
else:
time.sleep(3)
res = transcription_result['sentiment_analysis_results']
sentiment_analysis_result = ''
for elt in res:
sentiment_analysis_result = sentiment_analysis_result + "TEXT: "+ elt['text']+ "\n"
sentiment_analysis_result = sentiment_analysis_result + "SENTIMENT: " + elt['sentiment'] + "\n"
sentiment_analysis_result = sentiment_analysis_result + "CONFIDENCE: " + str(round(float(elt['confidence']), 2)) + "\n"
return transcription_result['text'], transcription_result['summary'], sentiment_analysis_result
def get_transcript_file(filename):
upload_response = requests.post(
upload_endpoint,
headers=headers,
data=_read_file(filename))
# JSON that tells the API which file to trancsribe
json = {
# URL of the audio file to process
"audio_url": upload_response.json()['upload_url'],
# Turn on speaker labels
"speaker_labels": True,
# Turn on custom vocabulary
"word_boost": ["assembly ai"],
# Turn on custom spelling
"custom_spelling": [
{"from": ["assembly AI"], "to": "AssemblyAI"},
{"from": ["assembly AI's"], "to": "AssemblyAI's"}
],
# Turn on PII Redaction and specify policies
"redact_pii": True,
"redact_pii_policies": ["drug", "injury", "person_name"],
"redact_pii_audio": True,
# Turn on Auto Highlights
"auto_highlights": True,
# Turn on Content Moderation
"content_safety": True,
# Turn on Topic Detection
"iab_categories": True,
# Turn on Sentiment Analysis
"sentiment_analysis": True,
# Turn on Summarization and specify configuration
"summarization": True,
"summary_model": "informative",
"summary_type": "bullets",
# Turn on Entity Detection
"entity_detection": True,
}
response = requests.post(
transcript_endpoint,
json=json,
headers=headers # Authorization to link this transcription with your account
)
polling_endpoint = f"https://api.assemblyai.com/v2/transcript/{response.json()['id']}"
while True:
transcription_result = requests.get(polling_endpoint, headers=headers).json()
if transcription_result['status'] == 'completed':
break
elif transcription_result['status'] == 'error':
raise RuntimeError(f"Transcription failed: {transcription_result['error']}")
else:
time.sleep(3)
return transcription_result['text']
audio_intelligence_list = [
"Summarization",
"Sentiment Analysis"
]
title = """<h1 align="center">🔥Conformer-1 API </h1>"""
description = """
### In this demo, you can explore the outputs of a Conformer-1 Speech Recognition Model from AssemblyAI.
"""
with gr.Blocks() as demo:
gr.HTML(title)
gr.Markdown(description)
with gr.Column(elem_id = "col_container"):
with gr.Tab("Audio URL file"):
inputs = gr.Textbox(label = "Enter the url for the audio file")
#audio_intelligence_options = gr.CheckboxGroup(audio_intelligence_list, label="Audio Intelligence Options")
b1 = gr.Button('Transcribe')
with gr.Tab("Upload Audio as File"):
audio_input_u = gr.Audio(label = 'Upload Audio',source="upload",type="filepath")
transcribe_audio_u = gr.Button('Transcribe')
transcript = gr.Textbox(label = "Transcript Result" )
summary = gr.Textbox(label = "Summary Result")
sentiment_analysis = gr.Textbox(label = "Sentiment Analysis Result" )
inputs.submit(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis])
b1.click(get_transcript_url, [inputs], [transcript, summary, sentiment_analysis])
transcribe_audio_u.click(get_transcript_file, [audio_input_u], [transcript])
examples = gr.Examples(examples = [["https://github.com/AssemblyAI-Examples/assemblyai-and-python-in-5-minutes/blob/main/audio.mp3?raw=true"]], inputs = inputs, outputs=[transcript, summary, sentiment_analysis], cache_examples = True, fn = get_transcript_url)
demo.queue().launch(debug=True)