shukdevdatta123 commited on
Commit
e0a6679
Β·
verified Β·
1 Parent(s): 5c41df1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -21
app.py CHANGED
@@ -1,29 +1,38 @@
1
  import streamlit as st
2
  from audio_recorder_streamlit import audio_recorder
3
- import openai
4
- API_KEY = 'enter-openai-api-key-here'
5
 
 
6
 
 
 
7
 
8
  def transcribe_text_to_voice(audio_location):
9
- client = openai(api_key=API_KEY)
10
- audio_file= open(audio_location, "rb")
11
- transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
12
- return transcript.text
 
 
13
 
14
  def chat_completion_call(text):
15
- client = openai(api_key=API_KEY)
16
- messages = [{"role": "user", "content": text}]
17
- response = client.chat.completions.create(model="gpt-3.5-turbo-1106", messages=messages)
18
- return response.choices[0].message.content
19
-
 
20
 
21
  def text_to_speech_ai(speech_file_path, api_response):
22
- client = openai(api_key=API_KEY)
23
- response = client.audio.speech.create(model="tts-1",voice="nova",input=api_response)
24
- response.stream_to_file(speech_file_path)
25
-
26
-
 
 
 
 
27
 
28
  st.title("πŸ§‘β€πŸ’» Skolo Online πŸ’¬ Talking Assistant")
29
 
@@ -33,20 +42,22 @@ HiπŸ€– just click on the voice recorder and let me know how I can help you today
33
 
34
  audio_bytes = audio_recorder()
35
  if audio_bytes:
36
- ##Save the Recorded File
37
  audio_location = "audio_file.wav"
38
  with open(audio_location, "wb") as f:
39
  f.write(audio_bytes)
40
 
41
- #Transcribe the saved file to text
42
  text = transcribe_text_to_voice(audio_location)
43
  st.write(text)
44
 
45
- #Use API to get an AI response
46
  api_response = chat_completion_call(text)
47
  st.write(api_response)
48
 
49
- # Read out the text response using tts
50
  speech_file_path = 'audio_response.mp3'
51
  text_to_speech_ai(speech_file_path, api_response)
52
- st.audio(speech_file_path)
 
 
 
1
  import streamlit as st
2
  from audio_recorder_streamlit import audio_recorder
3
+ import openai # Corrected import
 
4
 
5
+ API_KEY = 'enter-openai-api-key-here'
6
 
7
+ # Set the API key
8
+ openai.api_key = API_KEY
9
 
10
  def transcribe_text_to_voice(audio_location):
11
+ # Transcribe audio to text using Whisper API
12
+ with open(audio_location, "rb") as audio_file:
13
+ transcript = openai.Audio.transcriptions.create(
14
+ model="whisper-1", file=audio_file
15
+ )
16
+ return transcript['text']
17
 
18
  def chat_completion_call(text):
19
+ # Send the text to GPT-3.5 for chat completion
20
+ response = openai.ChatCompletion.create(
21
+ model="gpt-3.5-turbo-1106",
22
+ messages=[{"role": "user", "content": text}]
23
+ )
24
+ return response['choices'][0]['message']['content']
25
 
26
  def text_to_speech_ai(speech_file_path, api_response):
27
+ # Convert the text to speech using OpenAI's TTS API
28
+ response = openai.Audio.speech.create(
29
+ model="text-to-speech-1", # TTS model name may differ
30
+ voice="nova", # Specify a voice (choose one that is available)
31
+ input=api_response
32
+ )
33
+ # Save the speech response to a file
34
+ with open(speech_file_path, "wb") as f:
35
+ f.write(response['audio'])
36
 
37
  st.title("πŸ§‘β€πŸ’» Skolo Online πŸ’¬ Talking Assistant")
38
 
 
42
 
43
  audio_bytes = audio_recorder()
44
  if audio_bytes:
45
+ # Save the Recorded Audio File
46
  audio_location = "audio_file.wav"
47
  with open(audio_location, "wb") as f:
48
  f.write(audio_bytes)
49
 
50
+ # Transcribe the saved file to text
51
  text = transcribe_text_to_voice(audio_location)
52
  st.write(text)
53
 
54
+ # Get AI response from GPT-3.5
55
  api_response = chat_completion_call(text)
56
  st.write(api_response)
57
 
58
+ # Convert the response to speech and save it as a file
59
  speech_file_path = 'audio_response.mp3'
60
  text_to_speech_ai(speech_file_path, api_response)
61
+
62
+ # Play the audio response in the app
63
+ st.audio(speech_file_path)