shukdevdatta123 commited on
Commit
afe4a62
·
verified ·
1 Parent(s): 22697aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +48 -70
app.py CHANGED
@@ -1,70 +1,48 @@
1
- import streamlit as st
2
- import pandas as pd
3
- import openai
4
- from text_speech_utils import * # Assuming this module exists for your audio functionality
5
-
6
- # Define filenames for audio and conversation output
7
- input_audio_filename = 'input.wav'
8
- output_audio_filename = 'chatgpt_response.wav'
9
- output_conversation_filename = 'ChatGPT_conversation.txt'
10
-
11
- # Initialize app
12
- if 'messages' not in st.session_state:
13
- st.session_state['messages'] = [{"role": "system", "content": "You are a helpful assistant."}]
14
-
15
- # Allow user to input OpenAI API Key via Streamlit text input
16
- openai.api_key = st.text_input("Enter your OpenAI API Key", type="password")
17
-
18
- # Display a warning if API key is not provided
19
- if not openai.api_key:
20
- st.warning("Please enter your OpenAI API key to proceed.")
21
-
22
- # UI components
23
- st.title("My awesome personal assistant")
24
- sec = st.slider("Select number of seconds of recording", min_value=2, max_value=8, value=5)
25
-
26
- # Record audio + transcribe with Whisper + get GPT-3 response
27
- if st.button('Record audio'):
28
- if openai.api_key: # Proceed only if API key is provided
29
- st.write("Recording...")
30
- record_audio(input_audio_filename, sec)
31
-
32
- transcription = transcribe_audio(input_audio_filename)
33
- st.write(f"Me: {transcription['text']}")
34
- st.session_state['messages'].append({"role": "user", "content": transcription['text']})
35
-
36
- bot = openai.ChatCompletion.create(model="gpt-3.5-turbo", messages=st.session_state['messages'])
37
- response = bot.choices[0].message.content
38
- st.write(f"GPT: {response}")
39
-
40
- save_text_as_audio(response, output_audio_filename)
41
- play_audio(output_audio_filename)
42
-
43
- st.session_state['messages'].append({"role": "assistant", "content": response})
44
- else:
45
- st.error("API key is required to interact with GPT.")
46
-
47
- # # Download conversation button
48
- # st.download_button(label="Download conversation",
49
- # data=pd.DataFrame(st.session_state['messages']).to_csv(index=False).encode('utf-8'),
50
- # file_name=output_conversation_filename)
51
-
52
- # Function to generate conversation as plain text
53
- def generate_conversation_text(messages):
54
- conversation_text = ""
55
- for message in messages:
56
- if message["role"] == "user":
57
- conversation_text += f"Me: {message['content']}\n"
58
- elif message["role"] == "assistant":
59
- conversation_text += f"GPT: {message['content']}\n"
60
- elif message["role"] == "system":
61
- conversation_text += f"System: {message['content']}\n"
62
- return conversation_text
63
-
64
- # Download conversation button
65
- st.download_button(
66
- label="Download conversation",
67
- data=generate_conversation_text(st.session_state['messages']).encode('utf-8'),
68
- file_name=output_conversation_filename,
69
- mime="text/plain"
70
- )
 
1
+ import streamlit as st
2
+ from tempfile import NamedTemporaryFile
3
+ from audiorecorder import audiorecorder
4
+ from whispercpp import Whisper
5
+
6
+ # Download whisper.cpp
7
+ w = Whisper('tiny')
8
+
9
+ def inference(audio):
10
+ # Save audio to a file:
11
+ with NamedTemporaryFile(suffix=".mp3") as temp:
12
+ with open(f"{temp.name}", "wb") as f:
13
+ f.write(audio.tobytes())
14
+ result = w.transcribe(f"{temp.name}")
15
+ text = w.extract_text(result)
16
+ return text[0]
17
+
18
+ # Streamlit
19
+ with st.sidebar:
20
+ audio = audiorecorder("Click to send voice message", "Recording... Click when you're done", key="recorder")
21
+ st.title("Echo Bot with Whisper")
22
+
23
+ # Initialize chat history
24
+ if "messages" not in st.session_state:
25
+ st.session_state.messages = []
26
+
27
+ # Display chat messages from history on app rerun
28
+ for message in st.session_state.messages:
29
+ with st.chat_message(message["role"]):
30
+ st.markdown(message["content"])
31
+
32
+ # React to user input
33
+ if (prompt := st.chat_input("Your message")) or len(audio):
34
+ # If it's coming from the audio recorder transcribe the message with whisper.cpp
35
+ if len(audio)>0:
36
+ prompt = inference(audio)
37
+
38
+ # Display user message in chat message container
39
+ st.chat_message("user").markdown(prompt)
40
+ # Add user message to chat history
41
+ st.session_state.messages.append({"role": "user", "content": prompt})
42
+
43
+ response = f"Echo: {prompt}"
44
+ # Display assistant response in chat message container
45
+ with st.chat_message("assistant"):
46
+ st.markdown(response)
47
+ # Add assistant response to chat history
48
+ st.session_state.messages.append({"role": "assistant", "content": response})