File size: 2,233 Bytes
b5bfb01
fc53194
b5bfb01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc53194
b5bfb01
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fc53194
b5bfb01
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
import os
import gradio as gr
import whisper
from gtts import gTTS
import io
import almlapi  # Assuming AL/ML has a Python API library

# Set your AL/ML API key for authentication
os.environ["ALML_API_KEY"] = "701b35863e6d4a7b81bdcad2e6f3c880"

# Load the Whisper model for audio transcription
model = whisper.load_model("base")

# Function to process audio and interact with the AL/ML API
def process_audio(file_path):
    try:
        # Load and transcribe audio using Whisper
        audio = whisper.load_audio(file_path)
        result = model.transcribe(audio)
        text = result["text"]

        # Call OpenAI o1 model via AL/ML API for problem-solving
        response = almlapi.call_o1(
            api_key=os.environ.get("ALML_API_KEY"),
            prompt=text,
            model="o1"  # Model name, adjust according to AL/ML documentation
        )

        # Extract the response message
        response_message = response["generated_text"].strip()

        # Convert response message to speech using gTTS
        tts = gTTS(response_message)
        response_audio_io = io.BytesIO()
        tts.write_to_fp(response_audio_io)  # Save the audio to BytesIO object
        response_audio_io.seek(0)

        # Save the audio file
        with open("response.mp3", "wb") as audio_file:
            audio_file.write(response_audio_io.getvalue())

        # Return the response text and audio file path
        return response_message, "response.mp3"

    except Exception as e:
        # Handle exceptions
        return f"An error occurred: {e}", None

# Interface configurations (UI)
title = "Voice-to-Voice AI Chatbot with AL/ML API"
description = "Developed by [Adnan Tariq](https://www.linkedin.com/in/adnaantariq/) with ❤️"
article = "### Instructions\n1. Upload an audio file.\n2. Wait for the transcription.\n3. Listen to the chatbot's response."

# Gradio interface setup
iface = gr.Interface(
    fn=process_audio,
    inputs=gr.Audio(type="filepath"),  # Upload audio via file path
    outputs=[gr.Textbox(label="Response Text"), gr.Audio(label="Response Audio")],
    live=True,
    title=title,
    description=description,
    theme="dark",
    article=article
)

# Launch the Gradio app
iface.launch()