Update app.py
Browse files
app.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
from flask import Flask, render_template_string, request, jsonify
|
2 |
from datetime import datetime
|
3 |
-
import speech_recognition as sr # Import
|
|
|
|
|
4 |
|
5 |
app = Flask(__name__)
|
6 |
|
@@ -27,11 +29,9 @@ html_code = """
|
|
27 |
height: 100vh;
|
28 |
margin: 0;
|
29 |
}
|
30 |
-
|
31 |
h1 {
|
32 |
color: #333;
|
33 |
}
|
34 |
-
|
35 |
.mic-button {
|
36 |
width: 80px;
|
37 |
height: 80px;
|
@@ -47,22 +47,18 @@ html_code = """
|
|
47 |
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
|
48 |
transition: background-color 0.3s;
|
49 |
}
|
50 |
-
|
51 |
.mic-button:hover {
|
52 |
background-color: #0056b3;
|
53 |
}
|
54 |
-
|
55 |
.status {
|
56 |
margin-top: 20px;
|
57 |
font-size: 18px;
|
58 |
color: #666;
|
59 |
}
|
60 |
-
|
61 |
.listening {
|
62 |
color: green;
|
63 |
font-weight: bold;
|
64 |
}
|
65 |
-
|
66 |
.response {
|
67 |
margin-top: 20px;
|
68 |
padding: 10px;
|
@@ -80,19 +76,15 @@ html_code = """
|
|
80 |
<button class="mic-button" id="mic-button">🎤</button>
|
81 |
<div class="status" id="status">Press the mic button to start listening...</div>
|
82 |
<div class="response" id="response" style="display: none;">Response will appear here...</div>
|
83 |
-
|
84 |
<script>
|
85 |
const micButton = document.getElementById('mic-button');
|
86 |
const status = document.getElementById('status');
|
87 |
const response = document.getElementById('response');
|
88 |
-
|
89 |
if (!window.MediaRecorder) {
|
90 |
alert("Your browser does not support audio recording.");
|
91 |
}
|
92 |
-
|
93 |
let mediaRecorder;
|
94 |
let audioChunks = [];
|
95 |
-
|
96 |
micButton.addEventListener('click', async () => {
|
97 |
navigator.mediaDevices.getUserMedia({ audio: true })
|
98 |
.then(stream => {
|
@@ -100,30 +92,25 @@ html_code = """
|
|
100 |
mediaRecorder.start();
|
101 |
status.textContent = 'Listening...';
|
102 |
status.classList.add('listening');
|
103 |
-
|
104 |
mediaRecorder.ondataavailable = event => {
|
105 |
audioChunks.push(event.data);
|
106 |
};
|
107 |
-
|
108 |
mediaRecorder.onstop = async () => {
|
109 |
-
const audioBlob = new Blob(audioChunks, { type: 'audio/wav' });
|
110 |
const formData = new FormData();
|
111 |
formData.append('audio', audioBlob);
|
112 |
-
|
113 |
status.textContent = 'Processing...';
|
114 |
status.classList.remove('listening');
|
115 |
-
|
116 |
try {
|
117 |
const result = await fetch('/process-audio', {
|
118 |
method: 'POST',
|
119 |
body: formData,
|
120 |
});
|
121 |
-
|
122 |
const data = await result.json();
|
123 |
response.textContent = data.response;
|
124 |
response.style.display = 'block';
|
125 |
status.textContent = 'Press the mic button to start listening...';
|
126 |
-
|
127 |
// Use browser text-to-speech
|
128 |
const utterance = new SpeechSynthesisUtterance(data.response);
|
129 |
speechSynthesis.speak(utterance);
|
@@ -133,7 +120,6 @@ html_code = """
|
|
133 |
status.textContent = 'Press the mic button to start listening...';
|
134 |
}
|
135 |
};
|
136 |
-
|
137 |
setTimeout(() => {
|
138 |
mediaRecorder.stop();
|
139 |
}, 5000); // Stop recording after 5 seconds
|
@@ -155,12 +141,17 @@ def index():
|
|
155 |
def process_audio():
|
156 |
try:
|
157 |
audio_file = request.files['audio']
|
|
|
|
|
|
|
|
|
158 |
recognizer = sr.Recognizer()
|
159 |
-
with sr.AudioFile(
|
160 |
audio_data = recognizer.record(source)
|
161 |
command = recognizer.recognize_google(audio_data)
|
162 |
response = process_command(command)
|
163 |
-
|
|
|
164 |
except Exception as e:
|
165 |
return jsonify({"response": f"An error occurred: {str(e)}"})
|
166 |
|
|
|
1 |
from flask import Flask, render_template_string, request, jsonify
|
2 |
from datetime import datetime
|
3 |
+
import speech_recognition as sr # Import speech recognition
|
4 |
+
from tempfile import NamedTemporaryFile
|
5 |
+
import os
|
6 |
|
7 |
app = Flask(__name__)
|
8 |
|
|
|
29 |
height: 100vh;
|
30 |
margin: 0;
|
31 |
}
|
|
|
32 |
h1 {
|
33 |
color: #333;
|
34 |
}
|
|
|
35 |
.mic-button {
|
36 |
width: 80px;
|
37 |
height: 80px;
|
|
|
47 |
box-shadow: 0px 4px 6px rgba(0, 0, 0, 0.1);
|
48 |
transition: background-color 0.3s;
|
49 |
}
|
|
|
50 |
.mic-button:hover {
|
51 |
background-color: #0056b3;
|
52 |
}
|
|
|
53 |
.status {
|
54 |
margin-top: 20px;
|
55 |
font-size: 18px;
|
56 |
color: #666;
|
57 |
}
|
|
|
58 |
.listening {
|
59 |
color: green;
|
60 |
font-weight: bold;
|
61 |
}
|
|
|
62 |
.response {
|
63 |
margin-top: 20px;
|
64 |
padding: 10px;
|
|
|
76 |
<button class="mic-button" id="mic-button">🎤</button>
|
77 |
<div class="status" id="status">Press the mic button to start listening...</div>
|
78 |
<div class="response" id="response" style="display: none;">Response will appear here...</div>
|
|
|
79 |
<script>
|
80 |
const micButton = document.getElementById('mic-button');
|
81 |
const status = document.getElementById('status');
|
82 |
const response = document.getElementById('response');
|
|
|
83 |
if (!window.MediaRecorder) {
|
84 |
alert("Your browser does not support audio recording.");
|
85 |
}
|
|
|
86 |
let mediaRecorder;
|
87 |
let audioChunks = [];
|
|
|
88 |
micButton.addEventListener('click', async () => {
|
89 |
navigator.mediaDevices.getUserMedia({ audio: true })
|
90 |
.then(stream => {
|
|
|
92 |
mediaRecorder.start();
|
93 |
status.textContent = 'Listening...';
|
94 |
status.classList.add('listening');
|
95 |
+
audioChunks = [];
|
96 |
mediaRecorder.ondataavailable = event => {
|
97 |
audioChunks.push(event.data);
|
98 |
};
|
|
|
99 |
mediaRecorder.onstop = async () => {
|
100 |
+
const audioBlob = new Blob(audioChunks, { type: 'audio/wav; codecs=LINEAR16' });
|
101 |
const formData = new FormData();
|
102 |
formData.append('audio', audioBlob);
|
|
|
103 |
status.textContent = 'Processing...';
|
104 |
status.classList.remove('listening');
|
|
|
105 |
try {
|
106 |
const result = await fetch('/process-audio', {
|
107 |
method: 'POST',
|
108 |
body: formData,
|
109 |
});
|
|
|
110 |
const data = await result.json();
|
111 |
response.textContent = data.response;
|
112 |
response.style.display = 'block';
|
113 |
status.textContent = 'Press the mic button to start listening...';
|
|
|
114 |
// Use browser text-to-speech
|
115 |
const utterance = new SpeechSynthesisUtterance(data.response);
|
116 |
speechSynthesis.speak(utterance);
|
|
|
120 |
status.textContent = 'Press the mic button to start listening...';
|
121 |
}
|
122 |
};
|
|
|
123 |
setTimeout(() => {
|
124 |
mediaRecorder.stop();
|
125 |
}, 5000); // Stop recording after 5 seconds
|
|
|
141 |
def process_audio():
|
142 |
try:
|
143 |
audio_file = request.files['audio']
|
144 |
+
# Save the audio file temporarily
|
145 |
+
temp_file = NamedTemporaryFile(delete=False, suffix=".wav")
|
146 |
+
audio_file.save(temp_file.name)
|
147 |
+
|
148 |
recognizer = sr.Recognizer()
|
149 |
+
with sr.AudioFile(temp_file.name) as source:
|
150 |
audio_data = recognizer.record(source)
|
151 |
command = recognizer.recognize_google(audio_data)
|
152 |
response = process_command(command)
|
153 |
+
os.unlink(temp_file.name) # Remove the temporary file
|
154 |
+
return jsonify({"response": response})
|
155 |
except Exception as e:
|
156 |
return jsonify({"response": f"An error occurred: {str(e)}"})
|
157 |
|