adnaan05 commited on
Commit
225463a
·
verified ·
1 Parent(s): e7f2891

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -16
app.py CHANGED
@@ -3,31 +3,47 @@ import gradio as gr
3
  import whisper
4
  from gtts import gTTS
5
  import io
6
- import almlapi # Assuming AL/ML has a Python API library
7
 
8
- # Set your AL/ML API key for authentication
9
- os.environ["ALML_API_KEY"] = "701b35863e6d4a7b81bdcad2e6f3c880"
 
 
 
 
10
 
11
  # Load the Whisper model for audio transcription
12
  model = whisper.load_model("base")
13
 
14
- # Function to process audio and interact with the AL/ML API
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
15
  def process_audio(file_path):
16
  try:
17
  # Load and transcribe audio using Whisper
18
  audio = whisper.load_audio(file_path)
19
  result = model.transcribe(audio)
20
- text = result["text"]
21
-
22
- # Call OpenAI o1 model via AL/ML API for problem-solving
23
- response = almlapi.call_o1(
24
- api_key=os.environ.get("ALML_API_KEY"),
25
- prompt=text,
26
- model="o1" # Model name, adjust according to AL/ML documentation
27
- )
28
 
29
- # Extract the response message
30
- response_message = response["generated_text"].strip()
31
 
32
  # Convert response message to speech using gTTS
33
  tts = gTTS(response_message)
@@ -43,11 +59,11 @@ def process_audio(file_path):
43
  return response_message, "response.mp3"
44
 
45
  except Exception as e:
46
- # Handle exceptions
47
  return f"An error occurred: {e}", None
48
 
49
  # Interface configurations (UI)
50
- title = "Voice-to-Voice AI Chatbot with AL/ML API"
51
  description = "Developed by [Adnan Tariq](https://www.linkedin.com/in/adnaantariq/) with ❤️"
52
  article = "### Instructions\n1. Upload an audio file.\n2. Wait for the transcription.\n3. Listen to the chatbot's response."
53
 
 
3
  import whisper
4
  from gtts import gTTS
5
  import io
6
+ from openai import OpenAI # Import OpenAI for AI/ML API calls
7
 
8
+ # Set the base URL and API key for AI/ML API
9
+ base_url = "https://api.aimlapi.com/v1"
10
+ api_key = os.getenv("701b35863e6d4a7b81bdcad2e6f3c880") # Make sure to set this environment variable
11
+
12
+ # Initialize the OpenAI API with the custom base URL
13
+ api = OpenAI(api_key=api_key, base_url=base_url)
14
 
15
  # Load the Whisper model for audio transcription
16
  model = whisper.load_model("base")
17
 
18
+ # Function to make a chat completion call to the AI/ML API
19
+ def call_aiml_api(user_prompt, system_prompt="You are a helpful assistant."):
20
+ try:
21
+ completion = api.chat.completions.create(
22
+ model="mistralai/Mistral-7B-Instruct-v0.2", # Specify the model from AI/ML
23
+ messages=[
24
+ {"role": "system", "content": system_prompt},
25
+ {"role": "user", "content": user_prompt},
26
+ ],
27
+ temperature=0.7,
28
+ max_tokens=256,
29
+ )
30
+
31
+ # Return the response from the AI model
32
+ return completion.choices[0].message.content.strip()
33
+
34
+ except Exception as e:
35
+ raise Exception(f"API request failed with error: {e}")
36
+
37
+ # Function to process audio and interact with the AI/ML API
38
  def process_audio(file_path):
39
  try:
40
  # Load and transcribe audio using Whisper
41
  audio = whisper.load_audio(file_path)
42
  result = model.transcribe(audio)
43
+ user_prompt = result["text"]
 
 
 
 
 
 
 
44
 
45
+ # Call AI/ML API to get a response
46
+ response_message = call_aiml_api(user_prompt)
47
 
48
  # Convert response message to speech using gTTS
49
  tts = gTTS(response_message)
 
59
  return response_message, "response.mp3"
60
 
61
  except Exception as e:
62
+ # Handle any errors
63
  return f"An error occurred: {e}", None
64
 
65
  # Interface configurations (UI)
66
+ title = "Voice-to-Voice AI Chatbot with AI/ML API"
67
  description = "Developed by [Adnan Tariq](https://www.linkedin.com/in/adnaantariq/) with ❤️"
68
  article = "### Instructions\n1. Upload an audio file.\n2. Wait for the transcription.\n3. Listen to the chatbot's response."
69