Voice-Bot-AI / custom.js
Krishnavamshithumma's picture
Update custom.js
153b8d5 verified
// custom.js
const recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
recognition.continuous = false;
recognition.lang = "en-US";
// Initialize chat box scrolling
function scrollChat() {
const chatBox = document.getElementById("chatBox");
if (chatBox) {
chatBox.scrollTop = chatBox.scrollHeight;
}
}
// Observe chat box changes for auto-scroll
const observer = new MutationObserver(scrollChat);
if (document.getElementById("chatBox")) {
observer.observe(document.getElementById("chatBox"), {
childList: true,
subtree: true
});
}
// Expose function to global scope
window.startListening = function() {
const apiKey = document.querySelector("#apiKeyInput input")?.value;
if (!apiKey) {
alert("Please enter your OpenAI API key first!");
return;
}
recognition.start();
const micButton = document.getElementById("micButton");
if (micButton) micButton.textContent = "πŸ”΄ Listening...";
}
recognition.onresult = (event) => {
const transcript = event.results[0][0].transcript;
const voiceInput = document.querySelector("#voiceInput input");
if (voiceInput) {
voiceInput.value = transcript;
voiceInput.dispatchEvent(new Event("change"));
}
};
recognition.onend = () => {
const micButton = document.getElementById("micButton");
if (micButton) micButton.textContent = "🎀 Speak";
};
recognition.onerror = (event) => {
console.error("Speech recognition error", event.error);
const micButton = document.getElementById("micButton");
if (micButton) micButton.textContent = "🎀 Speak";
alert("Speech recognition error: " + event.error);
};
// Initial scroll
setTimeout(scrollChat, 500);