Spaces:
Sleeping
Sleeping
Upload 8 files
Browse files- Dockerfile +0 -0
- app.py +190 -0
- base_prompt.txt +84 -0
- readme.md +51 -0
- requirements.txt +19 -0
- static/script.js +260 -0
- static/styles.css +234 -0
- templates/index.html +59 -0
Dockerfile
ADDED
File without changes
|
app.py
ADDED
@@ -0,0 +1,190 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask, request, jsonify, render_template
|
2 |
+
from dotenv import load_dotenv
|
3 |
+
from groq import Groq
|
4 |
+
import os
|
5 |
+
import uuid
|
6 |
+
from gtts import gTTS
|
7 |
+
import io
|
8 |
+
import base64
|
9 |
+
import speech_recognition as sr
|
10 |
+
import tempfile
|
11 |
+
import json
|
12 |
+
|
13 |
+
try:
|
14 |
+
import pyaudio
|
15 |
+
except ImportError:
|
16 |
+
print("Warning: PyAudio not available, speech functionality will be limited")
|
17 |
+
|
18 |
+
# Initialize Flask app
|
19 |
+
app = Flask(__name__, static_folder='static')
|
20 |
+
|
21 |
+
# Load environment variables
|
22 |
+
load_dotenv()
|
23 |
+
|
24 |
+
# Groq API Configuration
|
25 |
+
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
|
26 |
+
client = Groq(api_key=GROQ_API_KEY)
|
27 |
+
MODEL = "llama3-70b-8192"
|
28 |
+
|
29 |
+
# Initialize speech recognition
|
30 |
+
recognizer = sr.Recognizer()
|
31 |
+
|
32 |
+
# Store conversation history
|
33 |
+
conversations = {}
|
34 |
+
|
35 |
+
def load_base_prompt():
|
36 |
+
try:
|
37 |
+
with open("base_prompt.txt", "r") as file:
|
38 |
+
return file.read().strip()
|
39 |
+
except FileNotFoundError:
|
40 |
+
print("Error: base_prompt.txt file not found.")
|
41 |
+
return "You are a helpful assistant for language learning."
|
42 |
+
|
43 |
+
# Load the base prompt
|
44 |
+
base_prompt = load_base_prompt()
|
45 |
+
|
46 |
+
def chat_with_groq(user_message, conversation_id=None):
|
47 |
+
try:
|
48 |
+
# Get conversation history or create new
|
49 |
+
messages = conversations.get(conversation_id, [])
|
50 |
+
if not messages:
|
51 |
+
messages.append({"role": "system", "content": base_prompt})
|
52 |
+
|
53 |
+
# Add user message
|
54 |
+
messages.append({"role": "user", "content": user_message})
|
55 |
+
|
56 |
+
# Get completion from Groq
|
57 |
+
completion = client.chat.completions.create(
|
58 |
+
model=MODEL,
|
59 |
+
messages=messages,
|
60 |
+
temperature=0.1,
|
61 |
+
max_tokens=1024
|
62 |
+
)
|
63 |
+
|
64 |
+
# Add assistant's response to history
|
65 |
+
assistant_message = completion.choices[0].message.content.strip()
|
66 |
+
messages.append({"role": "assistant", "content": assistant_message})
|
67 |
+
|
68 |
+
# Update conversation history
|
69 |
+
if conversation_id:
|
70 |
+
conversations[conversation_id] = messages
|
71 |
+
|
72 |
+
return assistant_message
|
73 |
+
except Exception as e:
|
74 |
+
print(f"Error in chat_with_groq: {str(e)}")
|
75 |
+
return f"I apologize, but I'm having trouble responding right now. Error: {str(e)}"
|
76 |
+
|
77 |
+
def text_to_speech(text):
|
78 |
+
try:
|
79 |
+
tts = gTTS(text=text, lang='en')
|
80 |
+
audio_io = io.BytesIO()
|
81 |
+
tts.write_to_fp(audio_io)
|
82 |
+
audio_io.seek(0)
|
83 |
+
return audio_io
|
84 |
+
except Exception as e:
|
85 |
+
print(f"Error in text_to_speech: {str(e)}")
|
86 |
+
return None
|
87 |
+
|
88 |
+
def speech_to_text(audio_file):
|
89 |
+
try:
|
90 |
+
# Save the uploaded audio to a temporary file
|
91 |
+
with tempfile.NamedTemporaryFile(delete=False, suffix='.wav') as temp_audio:
|
92 |
+
audio_file.save(temp_audio.name)
|
93 |
+
|
94 |
+
# Use SpeechRecognition to convert speech to text
|
95 |
+
with sr.AudioFile(temp_audio.name) as source:
|
96 |
+
# Adjust recognition settings
|
97 |
+
recognizer.dynamic_energy_threshold = True
|
98 |
+
recognizer.energy_threshold = 4000
|
99 |
+
|
100 |
+
# Record the entire audio file
|
101 |
+
audio = recognizer.record(source)
|
102 |
+
|
103 |
+
# Perform recognition with increased timeout
|
104 |
+
text = recognizer.recognize_google(audio, language='en-US')
|
105 |
+
return text
|
106 |
+
|
107 |
+
except sr.UnknownValueError:
|
108 |
+
return "Could not understand audio"
|
109 |
+
except sr.RequestError as e:
|
110 |
+
return f"Could not request results; {str(e)}"
|
111 |
+
except Exception as e:
|
112 |
+
print(f"Error in speech_to_text: {str(e)}")
|
113 |
+
return None
|
114 |
+
finally:
|
115 |
+
# Clean up temporary file
|
116 |
+
try:
|
117 |
+
os.unlink(temp_audio.name)
|
118 |
+
except:
|
119 |
+
pass
|
120 |
+
|
121 |
+
@app.route('/')
|
122 |
+
def index():
|
123 |
+
return render_template('index.html')
|
124 |
+
|
125 |
+
@app.route('/api/chat', methods=['POST'])
|
126 |
+
def chat():
|
127 |
+
try:
|
128 |
+
data = request.get_json()
|
129 |
+
user_message = data.get('message', '')
|
130 |
+
conversation_id = data.get('conversation_id', str(uuid.uuid4()))
|
131 |
+
|
132 |
+
if not user_message:
|
133 |
+
return jsonify({'error': 'No message provided'}), 400
|
134 |
+
|
135 |
+
# Get response from Groq
|
136 |
+
response = chat_with_groq(user_message, conversation_id)
|
137 |
+
|
138 |
+
# Generate voice response
|
139 |
+
audio_io = text_to_speech(response)
|
140 |
+
result = {
|
141 |
+
'response': response,
|
142 |
+
'conversation_id': conversation_id
|
143 |
+
}
|
144 |
+
|
145 |
+
if audio_io:
|
146 |
+
audio_base64 = base64.b64encode(audio_io.getvalue()).decode('utf-8')
|
147 |
+
result['voice_response'] = audio_base64
|
148 |
+
|
149 |
+
return jsonify(result)
|
150 |
+
|
151 |
+
except Exception as e:
|
152 |
+
return jsonify({'error': str(e)}), 500
|
153 |
+
|
154 |
+
@app.route('/api/voice', methods=['POST'])
|
155 |
+
def handle_voice():
|
156 |
+
try:
|
157 |
+
if 'audio' not in request.files:
|
158 |
+
return jsonify({'error': 'No audio file provided'}), 400
|
159 |
+
|
160 |
+
audio_file = request.files['audio']
|
161 |
+
conversation_id = request.form.get('conversation_id', str(uuid.uuid4()))
|
162 |
+
|
163 |
+
# Convert speech to text
|
164 |
+
text = speech_to_text(audio_file)
|
165 |
+
|
166 |
+
if not text:
|
167 |
+
return jsonify({'error': 'Could not transcribe audio'}), 400
|
168 |
+
|
169 |
+
# Get response from Groq
|
170 |
+
response = chat_with_groq(text, conversation_id)
|
171 |
+
|
172 |
+
# Generate voice response
|
173 |
+
audio_io = text_to_speech(response)
|
174 |
+
result = {
|
175 |
+
'text': text,
|
176 |
+
'response': response,
|
177 |
+
'conversation_id': conversation_id
|
178 |
+
}
|
179 |
+
|
180 |
+
if audio_io:
|
181 |
+
audio_base64 = base64.b64encode(audio_io.getvalue()).decode('utf-8')
|
182 |
+
result['voice_response'] = audio_base64
|
183 |
+
|
184 |
+
return jsonify(result)
|
185 |
+
|
186 |
+
except Exception as e:
|
187 |
+
return jsonify({'error': str(e)}), 500
|
188 |
+
|
189 |
+
if __name__ == '__main__':
|
190 |
+
app.run(host='0.0.0.0', port=7860)
|
base_prompt.txt
ADDED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
# Luna AI: Conversational Language Learning Assistant
|
3 |
+
|
4 |
+
## Core Interaction Philosophy
|
5 |
+
- Act as an adaptive, intelligent language learning companion.
|
6 |
+
- Prioritize natural, context-rich conversations.
|
7 |
+
- Provide immediate, constructive feedback.
|
8 |
+
- Make language learning engaging and personalized.
|
9 |
+
|
10 |
+
## Communication Style
|
11 |
+
- Use concise, age, and level-appropriate language.
|
12 |
+
- Break complex explanations into digestible segments.
|
13 |
+
- Ask follow-up questions to maintain engagement.
|
14 |
+
- Encourage exploration and curiosity about language.
|
15 |
+
|
16 |
+
## Feedback Mechanism
|
17 |
+
- Highlight grammatical or pronunciation errors gently.
|
18 |
+
- Provide clear, concise explanations of corrections.
|
19 |
+
- Offer alternative phrasings and contextual usage.
|
20 |
+
- Praise effort and progress consistently.
|
21 |
+
|
22 |
+
## Interactive Learning Strategies
|
23 |
+
1. Scenario-based Learning
|
24 |
+
- Present realistic conversation scenarios.
|
25 |
+
- Guide users through practical language applications.
|
26 |
+
- Simulate real-world communication challenges.
|
27 |
+
|
28 |
+
2. Progressive Complexity
|
29 |
+
- Start with basic structures.
|
30 |
+
- Gradually introduce more complex linguistic elements.
|
31 |
+
- Adapt difficulty based on user's responses and skill.
|
32 |
+
|
33 |
+
3. Multimodal Learning Support
|
34 |
+
- Incorporate vocabulary explanations.
|
35 |
+
- Provide cultural context for language expressions.
|
36 |
+
- Suggest additional learning resources.
|
37 |
+
- Offer pronunciation tips and audio references.
|
38 |
+
|
39 |
+
## Specific Interaction Guidelines
|
40 |
+
- Keep responses concise: 50-100 words maximum.
|
41 |
+
- Use a warm, encouraging, and patient tone.
|
42 |
+
- Maintain a balance between correction and motivation.
|
43 |
+
- Avoid overwhelming the user with too much information.
|
44 |
+
|
45 |
+
## Response Format Example
|
46 |
+
When a user makes a grammatical error, respond with:
|
47 |
+
1. Acknowledgment of their attempt.
|
48 |
+
2. Corrected version.
|
49 |
+
3. Brief explanation.
|
50 |
+
4. Encouraging follow-up question.
|
51 |
+
|
52 |
+
### Example Interaction
|
53 |
+
User: "I go to park yesterday."
|
54 |
+
Response: "Great attempt! You meant to say 'I went to the park yesterday.' Notice how 'go' changes to 'went' for past tense. What fun activities do you enjoy doing in the park?"
|
55 |
+
|
56 |
+
## Continuous Adaptation
|
57 |
+
- Read between the lines of user's messages.
|
58 |
+
- Detect frustration, confusion, or disengagement.
|
59 |
+
- Dynamically adjust teaching approach.
|
60 |
+
- Celebrate small victories in the language learning journey.
|
61 |
+
|
62 |
+
## Creator
|
63 |
+
- Created by "Ansh Arora," passionate in AI and data science.
|
64 |
+
- Ansh created you to help others learn language and improve communication skills.
|
65 |
+
|
66 |
+
## Creator Authentication
|
67 |
+
If a user says "I am Ansh" or "I am creator," respond:
|
68 |
+
"Please provide your password to confirm, sir."
|
69 |
+
If the user says "admin 001," respond: "Welcome back, Ansh!"
|
70 |
+
Otherwise, respond: "Sorry, you're not Ansh. Let's continue our chat. What would you like to learn today?"
|
71 |
+
|
72 |
+
## Interview Preparation
|
73 |
+
You can act as an interview coach! If a user wants to practice for an interview:
|
74 |
+
1. Ask them the role they are preparing for.
|
75 |
+
2. Provide specific, role-related questions (e.g., behavioral, technical, situational).
|
76 |
+
3. Give concise feedback on their responses and offer suggestions for improvement.
|
77 |
+
|
78 |
+
## User Preference Behavior
|
79 |
+
Ask users how they prefer responses and adapt to their desired interaction style.
|
80 |
+
|
81 |
+
## Notes:
|
82 |
+
- Use a structured format for questions (e.g., STAR method for behavioral questions).
|
83 |
+
- After 3-5 interactions, ask users for feedback on your responses.
|
84 |
+
- Keep responses concise (50-100 words) to maintain engagement.
|
readme.md
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# Luna AI: Conversational Language Learning Assistant
|
2 |
+
|
3 |
+
## Project Overview
|
4 |
+
Luna AI is an advanced, voice-enabled conversational AI designed to revolutionize language learning through interactive, personalized communication.
|
5 |
+
|
6 |
+
## What is Luna AI?
|
7 |
+
Luna AI is more than just a chatbot – it's an intelligent language learning companion that transforms traditional language learning into an engaging, adaptive experience. Powered by cutting-edge AI technology, Luna provides:
|
8 |
+
- Real-time conversational practice
|
9 |
+
- Personalized language learning support
|
10 |
+
- Intelligent error correction
|
11 |
+
- Multimodal learning experiences
|
12 |
+
|
13 |
+
## Unique Voice Features
|
14 |
+
### Advanced Voice Capabilities
|
15 |
+
- Seamless speech-to-text conversion
|
16 |
+
- Intelligent text-to-speech responses
|
17 |
+
- Real-time voice interaction
|
18 |
+
- Adaptive listening and speaking modes
|
19 |
+
|
20 |
+
### Comparative Advantages
|
21 |
+
Unlike traditional chatbots, Luna AI:
|
22 |
+
- Provides context-aware language corrections
|
23 |
+
- Adapts difficulty in real-time
|
24 |
+
- Offers cultural and linguistic insights
|
25 |
+
- Supports multi-dimensional learning strategies
|
26 |
+
|
27 |
+
## Project Structure
|
28 |
+
```
|
29 |
+
luna-ai/
|
30 |
+
│
|
31 |
+
├── static/ # Frontend assets
|
32 |
+
│ ├── styles.css # UI styling
|
33 |
+
│ └── script.js # Client-side interactions
|
34 |
+
│
|
35 |
+
├── templates/ # HTML templates
|
36 |
+
│ └── index.html # Main application interface
|
37 |
+
│
|
38 |
+
├── app.py # Flask backend
|
39 |
+
├── base_prompt.txt # AI interaction guidelines
|
40 |
+
├── requirements.txt # Python dependencies
|
41 |
+
└── .env # Environment configuration
|
42 |
+
```
|
43 |
+
|
44 |
+
## Key Technologies
|
45 |
+
- Backend: Flask
|
46 |
+
- AI Model: Groq Llama3-70b
|
47 |
+
- Voice Processing: Web Speech API, gTTS
|
48 |
+
- Frontend: HTML5, CSS3, JavaScript
|
49 |
+
|
50 |
+
## Learning Philosophy
|
51 |
+
Luna AI transforms language learning from a tedious task into an exciting, personalized communication adventure, making every interaction a step towards linguistic mastery.
|
requirements.txt
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Flask==2.1.3
|
2 |
+
Werkzeug
|
3 |
+
python-dotenv
|
4 |
+
groq
|
5 |
+
gtts
|
6 |
+
SpeechRecognition
|
7 |
+
gunicorn
|
8 |
+
beautifulsoup4
|
9 |
+
idna
|
10 |
+
packaging
|
11 |
+
pipwin
|
12 |
+
pyjsparser
|
13 |
+
PyPrind
|
14 |
+
pySmartDL
|
15 |
+
pyttsx3
|
16 |
+
requests
|
17 |
+
urllib3
|
18 |
+
wikipedia
|
19 |
+
datetime
|
static/script.js
ADDED
@@ -0,0 +1,260 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
class ChatBot {
|
2 |
+
constructor() {
|
3 |
+
this.voiceEnabled = false;
|
4 |
+
this.isListening = false;
|
5 |
+
this.synthesis = window.speechSynthesis;
|
6 |
+
this.recognition = new (window.SpeechRecognition || window.webkitSpeechRecognition)();
|
7 |
+
this.currentUtterance = null;
|
8 |
+
this.isPaused = false;
|
9 |
+
this.audioQueue = [];
|
10 |
+
this.mediaRecorder = null;
|
11 |
+
this.audioChunks = [];
|
12 |
+
this.isRecording = false;
|
13 |
+
|
14 |
+
this.setupRecognition();
|
15 |
+
this.setupEventListeners();
|
16 |
+
}
|
17 |
+
|
18 |
+
setupRecognition() {
|
19 |
+
this.recognition.continuous = true;
|
20 |
+
this.recognition.interimResults = true;
|
21 |
+
this.recognition.lang = 'en-US';
|
22 |
+
|
23 |
+
this.recognition.onstart = () => {
|
24 |
+
this.isListening = true;
|
25 |
+
this.toggleVoiceInputClass(true);
|
26 |
+
};
|
27 |
+
|
28 |
+
this.recognition.onend = () => {
|
29 |
+
this.isListening = false;
|
30 |
+
this.toggleVoiceInputClass(false);
|
31 |
+
};
|
32 |
+
|
33 |
+
this.recognition.onerror = (event) => {
|
34 |
+
console.error('Speech recognition error:', event.error);
|
35 |
+
this.isListening = false;
|
36 |
+
this.toggleVoiceInputClass(false);
|
37 |
+
};
|
38 |
+
|
39 |
+
this.recognition.onresult = (event) => {
|
40 |
+
let finalTranscript = '';
|
41 |
+
let interimTranscript = '';
|
42 |
+
|
43 |
+
for (let i = event.resultIndex; i < event.results.length; i++) {
|
44 |
+
const transcript = event.results[i][0].transcript;
|
45 |
+
if (event.results[i].isFinal) {
|
46 |
+
finalTranscript += transcript + ' ';
|
47 |
+
} else {
|
48 |
+
interimTranscript += transcript;
|
49 |
+
}
|
50 |
+
}
|
51 |
+
|
52 |
+
const input = document.getElementById('messageInput');
|
53 |
+
input.value = finalTranscript + interimTranscript;
|
54 |
+
};
|
55 |
+
}
|
56 |
+
|
57 |
+
setupEventListeners() {
|
58 |
+
// Send button click event
|
59 |
+
document.getElementById('sendMessage').addEventListener('click', () => this.handleSendMessage());
|
60 |
+
|
61 |
+
// Voice input button
|
62 |
+
document.getElementById('voiceInput').addEventListener('mousedown', () => {
|
63 |
+
this.startRecording();
|
64 |
+
});
|
65 |
+
|
66 |
+
document.getElementById('voiceInput').addEventListener('mouseup', () => {
|
67 |
+
this.stopRecording();
|
68 |
+
});
|
69 |
+
|
70 |
+
// Input field key events
|
71 |
+
document.getElementById('messageInput').addEventListener('keydown', (e) => {
|
72 |
+
if (e.key === 'Enter' && !e.shiftKey) {
|
73 |
+
e.preventDefault();
|
74 |
+
this.handleSendMessage();
|
75 |
+
}
|
76 |
+
});
|
77 |
+
|
78 |
+
// Message speaker button click event
|
79 |
+
document.addEventListener('click', (e) => {
|
80 |
+
if (e.target.closest('.message-speaker')) {
|
81 |
+
const messageContent = e.target.closest('.message-content');
|
82 |
+
const text = messageContent.textContent;
|
83 |
+
if (this.isPaused) {
|
84 |
+
this.resumeSpeaking();
|
85 |
+
} else {
|
86 |
+
this.stopSpeaking();
|
87 |
+
this.speak(text);
|
88 |
+
}
|
89 |
+
}
|
90 |
+
});
|
91 |
+
}
|
92 |
+
|
93 |
+
async startRecording() {
|
94 |
+
try {
|
95 |
+
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
|
96 |
+
this.mediaRecorder = new MediaRecorder(stream);
|
97 |
+
this.audioChunks = [];
|
98 |
+
this.isRecording = true;
|
99 |
+
|
100 |
+
this.mediaRecorder.ondataavailable = (event) => {
|
101 |
+
this.audioChunks.push(event.data);
|
102 |
+
};
|
103 |
+
|
104 |
+
this.mediaRecorder.onstop = async () => {
|
105 |
+
const audioBlob = new Blob(this.audioChunks, { type: 'audio/wav' });
|
106 |
+
await this.sendAudioToServer(audioBlob);
|
107 |
+
stream.getTracks().forEach(track => track.stop());
|
108 |
+
};
|
109 |
+
|
110 |
+
this.mediaRecorder.start();
|
111 |
+
this.toggleVoiceInputClass(true);
|
112 |
+
} catch (error) {
|
113 |
+
console.error('Error starting recording:', error);
|
114 |
+
}
|
115 |
+
}
|
116 |
+
|
117 |
+
stopRecording() {
|
118 |
+
if (this.mediaRecorder && this.isRecording) {
|
119 |
+
this.mediaRecorder.stop();
|
120 |
+
this.isRecording = false;
|
121 |
+
this.toggleVoiceInputClass(false);
|
122 |
+
}
|
123 |
+
}
|
124 |
+
|
125 |
+
async sendAudioToServer(audioBlob) {
|
126 |
+
const formData = new FormData();
|
127 |
+
formData.append('audio', audioBlob);
|
128 |
+
|
129 |
+
try {
|
130 |
+
const response = await fetch('/api/voice', {
|
131 |
+
method: 'POST',
|
132 |
+
body: formData
|
133 |
+
});
|
134 |
+
|
135 |
+
if (!response.ok) throw new Error('Failed to send audio');
|
136 |
+
|
137 |
+
const data = await response.json();
|
138 |
+
if (data.text) {
|
139 |
+
document.getElementById('messageInput').value = data.text;
|
140 |
+
}
|
141 |
+
if (data.response) {
|
142 |
+
this.addMessage(data.response, 'bot');
|
143 |
+
if (this.voiceEnabled) {
|
144 |
+
this.speak(data.response);
|
145 |
+
}
|
146 |
+
}
|
147 |
+
} catch (error) {
|
148 |
+
console.error('Error sending audio:', error);
|
149 |
+
this.addMessage('Sorry, there was an error processing your voice input.', 'bot');
|
150 |
+
}
|
151 |
+
}
|
152 |
+
|
153 |
+
handleSendMessage() {
|
154 |
+
const input = document.getElementById('messageInput');
|
155 |
+
const message = input.value.trim();
|
156 |
+
if (message) {
|
157 |
+
this.stopSpeaking();
|
158 |
+
this.sendMessage(message);
|
159 |
+
input.value = '';
|
160 |
+
}
|
161 |
+
}
|
162 |
+
|
163 |
+
async sendMessage(message) {
|
164 |
+
this.addMessage(message, 'user');
|
165 |
+
this.showTypingIndicator();
|
166 |
+
|
167 |
+
try {
|
168 |
+
const response = await fetch('/api/chat', {
|
169 |
+
method: 'POST',
|
170 |
+
headers: { 'Content-Type': 'application/json' },
|
171 |
+
body: JSON.stringify({ message })
|
172 |
+
});
|
173 |
+
|
174 |
+
if (!response.ok) throw new Error('Failed to send message');
|
175 |
+
|
176 |
+
const data = await response.json();
|
177 |
+
this.removeTypingIndicator();
|
178 |
+
this.addMessage(data.response, 'bot');
|
179 |
+
|
180 |
+
if (this.voiceEnabled) {
|
181 |
+
this.speak(data.response);
|
182 |
+
}
|
183 |
+
} catch (error) {
|
184 |
+
console.error('Error:', error);
|
185 |
+
this.removeTypingIndicator();
|
186 |
+
this.addMessage('Sorry, there was an error processing your request.', 'bot');
|
187 |
+
}
|
188 |
+
}
|
189 |
+
|
190 |
+
speak(text) {
|
191 |
+
if (this.synthesis.speaking) {
|
192 |
+
this.synthesis.cancel();
|
193 |
+
}
|
194 |
+
|
195 |
+
const utterance = new SpeechSynthesisUtterance(text);
|
196 |
+
this.currentUtterance = utterance;
|
197 |
+
|
198 |
+
utterance.onend = () => {
|
199 |
+
this.currentUtterance = null;
|
200 |
+
this.isPaused = false;
|
201 |
+
if (this.audioQueue.length > 0) {
|
202 |
+
const nextText = this.audioQueue.shift();
|
203 |
+
this.speak(nextText);
|
204 |
+
}
|
205 |
+
};
|
206 |
+
|
207 |
+
this.synthesis.speak(utterance);
|
208 |
+
}
|
209 |
+
|
210 |
+
stopSpeaking() {
|
211 |
+
if (this.synthesis.speaking) {
|
212 |
+
this.synthesis.cancel();
|
213 |
+
this.currentUtterance = null;
|
214 |
+
this.isPaused = false;
|
215 |
+
this.audioQueue = [];
|
216 |
+
}
|
217 |
+
}
|
218 |
+
|
219 |
+
toggleVoiceInputClass(isActive) {
|
220 |
+
const button = document.getElementById('voiceInput');
|
221 |
+
button.classList.toggle('active', isActive);
|
222 |
+
}
|
223 |
+
|
224 |
+
addMessage(message, sender) {
|
225 |
+
const messagesContainer = document.getElementById('chatMessages');
|
226 |
+
const messageDiv = document.createElement('div');
|
227 |
+
messageDiv.className = `message ${sender}`;
|
228 |
+
|
229 |
+
const content = document.createElement('div');
|
230 |
+
content.className = 'message-content';
|
231 |
+
content.textContent = message;
|
232 |
+
|
233 |
+
if (sender === 'bot') {
|
234 |
+
const speakerButton = document.createElement('button');
|
235 |
+
speakerButton.className = 'message-speaker';
|
236 |
+
speakerButton.innerHTML = '<i class="fas fa-volume-up"></i>';
|
237 |
+
content.appendChild(speakerButton);
|
238 |
+
}
|
239 |
+
|
240 |
+
messageDiv.appendChild(content);
|
241 |
+
messagesContainer.appendChild(messageDiv);
|
242 |
+
messagesContainer.scrollTop = messagesContainer.scrollHeight;
|
243 |
+
}
|
244 |
+
|
245 |
+
showTypingIndicator() {
|
246 |
+
const indicator = document.createElement('div');
|
247 |
+
indicator.className = 'message bot typing-indicator';
|
248 |
+
indicator.innerHTML = '<div class="typing-dot"></div>'.repeat(3);
|
249 |
+
document.getElementById('chatMessages').appendChild(indicator);
|
250 |
+
}
|
251 |
+
|
252 |
+
removeTypingIndicator() {
|
253 |
+
const indicator = document.querySelector('.typing-indicator');
|
254 |
+
if (indicator) indicator.remove();
|
255 |
+
}
|
256 |
+
}
|
257 |
+
|
258 |
+
document.addEventListener('DOMContentLoaded', () => {
|
259 |
+
window.chatBot = new ChatBot();
|
260 |
+
});
|
static/styles.css
ADDED
@@ -0,0 +1,234 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
* {
|
2 |
+
margin: 0;
|
3 |
+
padding: 0;
|
4 |
+
box-sizing: border-box;
|
5 |
+
}
|
6 |
+
|
7 |
+
|
8 |
+
|
9 |
+
body {
|
10 |
+
font-family: Arial, sans-serif;
|
11 |
+
background-color: #181818; /* Dark background */
|
12 |
+
color: #e0e0e0; /* Light text color */
|
13 |
+
height: 100vh;
|
14 |
+
|
15 |
+
|
16 |
+
}
|
17 |
+
.Luna{
|
18 |
+
padding: 10px;
|
19 |
+
margin-left: 35px;
|
20 |
+
}
|
21 |
+
a:link, a:visited {
|
22 |
+
text-decoration: none;
|
23 |
+
color: #e0e0e0;
|
24 |
+
display: inline-block;
|
25 |
+
}
|
26 |
+
|
27 |
+
a:hover, a:active {
|
28 |
+
color:blueviolet;
|
29 |
+
}
|
30 |
+
.chat-container {
|
31 |
+
|
32 |
+
margin: 10px 20px 30px 40px;
|
33 |
+
height: 89vh;
|
34 |
+
width: 192vh;
|
35 |
+
background-color: #242424; /* Dark background */
|
36 |
+
border-radius: 12px;
|
37 |
+
box-shadow: 0 2px 10px rgba(0, 0, 0, 0.3);
|
38 |
+
display: flex;
|
39 |
+
flex-direction: column;
|
40 |
+
}
|
41 |
+
|
42 |
+
.chat-header {
|
43 |
+
padding: 1rem;
|
44 |
+
border-bottom: 1px solid #444; /* Darker separator */
|
45 |
+
display: flex;
|
46 |
+
justify-content: space-between;
|
47 |
+
align-items: center;
|
48 |
+
}
|
49 |
+
|
50 |
+
.bot-profile {
|
51 |
+
display: flex;
|
52 |
+
align-items: center;
|
53 |
+
gap: 10px;
|
54 |
+
}
|
55 |
+
|
56 |
+
.bot-avatar {
|
57 |
+
width: 40px;
|
58 |
+
height: 40px;
|
59 |
+
border-radius: 50%;
|
60 |
+
}
|
61 |
+
|
62 |
+
.bot-name {
|
63 |
+
font-weight: bold;
|
64 |
+
font-size: 1.1rem;
|
65 |
+
color: #f0f2f5; /* Light text */
|
66 |
+
}
|
67 |
+
|
68 |
+
.voice-controls {
|
69 |
+
display: flex;
|
70 |
+
gap: 10px;
|
71 |
+
}
|
72 |
+
|
73 |
+
.voice-button {
|
74 |
+
background: none;
|
75 |
+
border: none;
|
76 |
+
cursor: pointer;
|
77 |
+
padding: 8px;
|
78 |
+
border-radius: 50%;
|
79 |
+
transition: background-color 0.3s;
|
80 |
+
color: #f0f2f5; /* Light text */
|
81 |
+
}
|
82 |
+
|
83 |
+
.voice-button:hover {
|
84 |
+
background-color: #444; /* Dark hover */
|
85 |
+
}
|
86 |
+
|
87 |
+
.chat-messages {
|
88 |
+
flex: 1;
|
89 |
+
overflow-y: auto;
|
90 |
+
padding: 1rem;
|
91 |
+
}
|
92 |
+
|
93 |
+
.message {
|
94 |
+
margin-bottom: 1rem;
|
95 |
+
display: flex;
|
96 |
+
align-items: flex-start;
|
97 |
+
gap: 10px;
|
98 |
+
}
|
99 |
+
|
100 |
+
.message.user {
|
101 |
+
flex-direction: row-reverse;
|
102 |
+
}
|
103 |
+
|
104 |
+
.message-content {
|
105 |
+
max-width: 70%;
|
106 |
+
padding: 0.8rem;
|
107 |
+
border-radius: 12px;
|
108 |
+
position: relative;
|
109 |
+
font-size: 20px;
|
110 |
+
}
|
111 |
+
|
112 |
+
.user .message-content {
|
113 |
+
background-color: #006acc; /* Blue background */
|
114 |
+
color: white;
|
115 |
+
border-radius: 18px 18px 4px 18px;
|
116 |
+
}
|
117 |
+
|
118 |
+
.bot .message-content {
|
119 |
+
background-color: #333; /* Dark message background */
|
120 |
+
color: #f0f2f5; /* Light text */
|
121 |
+
border-radius: 18px 18px 18px 4px;
|
122 |
+
}
|
123 |
+
|
124 |
+
.chat-input-container {
|
125 |
+
padding: 1rem;
|
126 |
+
border-top: 1px solid #444; /* Dark separator */
|
127 |
+
position: relative;
|
128 |
+
}
|
129 |
+
|
130 |
+
.chat-input-container textarea {
|
131 |
+
width: 100%;
|
132 |
+
padding: 0.8rem 80px 0.8rem 1rem;
|
133 |
+
border: 1px solid #444; /* Dark border */
|
134 |
+
border-radius: 20px;
|
135 |
+
resize: none;
|
136 |
+
height: 50px;
|
137 |
+
font-size: 1rem;
|
138 |
+
background-color: #333; /* Dark background for input */
|
139 |
+
color: #f0f2f5; /* Light text */
|
140 |
+
}
|
141 |
+
|
142 |
+
.input-buttons {
|
143 |
+
position: absolute;
|
144 |
+
right: 1.5rem;
|
145 |
+
bottom: 1.7rem;
|
146 |
+
display: flex;
|
147 |
+
gap: 8px;
|
148 |
+
}
|
149 |
+
|
150 |
+
.voice-input-button, .send-button {
|
151 |
+
background: none;
|
152 |
+
border: none;
|
153 |
+
cursor: pointer;
|
154 |
+
padding: 8px;
|
155 |
+
color: #006acc; /* Blue color */
|
156 |
+
transition: color 0.3s;
|
157 |
+
}
|
158 |
+
|
159 |
+
.voice-input-button:hover, .send-button:hover {
|
160 |
+
color: #004a77; /* Darker blue on hover */
|
161 |
+
}
|
162 |
+
|
163 |
+
.message-avatar {
|
164 |
+
width: 36px;
|
165 |
+
height: 36px;
|
166 |
+
border-radius: 50%;
|
167 |
+
display: flex;
|
168 |
+
align-items: center;
|
169 |
+
justify-content: center;
|
170 |
+
float: right;
|
171 |
+
display: none;
|
172 |
+
}
|
173 |
+
|
174 |
+
.message-avatar i {
|
175 |
+
font-size: 1.2rem;
|
176 |
+
color: #006acc; /* Blue icon */
|
177 |
+
}
|
178 |
+
|
179 |
+
/* Add these styles to your existing CSS */
|
180 |
+
.message-speaker {
|
181 |
+
background: none;
|
182 |
+
border: none;
|
183 |
+
color: #006acc;
|
184 |
+
cursor: pointer;
|
185 |
+
padding: 4px;
|
186 |
+
margin-left: 8px;
|
187 |
+
transition: color 0.3s;
|
188 |
+
}
|
189 |
+
|
190 |
+
.message-speaker:hover {
|
191 |
+
color: #004a77;
|
192 |
+
}
|
193 |
+
|
194 |
+
.message-speaker.speaking i {
|
195 |
+
color: #ff4444;
|
196 |
+
}
|
197 |
+
|
198 |
+
.user-message .message-avatar i {
|
199 |
+
color: #f0f2f5; /* Light icon for user messages */
|
200 |
+
}
|
201 |
+
|
202 |
+
.listening .fa-microphone {
|
203 |
+
color: #ff4444;
|
204 |
+
animation: pulse 1.5s infinite;
|
205 |
+
}
|
206 |
+
|
207 |
+
@keyframes pulse {
|
208 |
+
0% { transform: scale(1); }
|
209 |
+
50% { transform: scale(1.2); }
|
210 |
+
100% { transform: scale(1); }
|
211 |
+
}
|
212 |
+
|
213 |
+
.typing-indicator {
|
214 |
+
display: flex;
|
215 |
+
gap: 4px;
|
216 |
+
padding: 8px;
|
217 |
+
}
|
218 |
+
|
219 |
+
.typing-dot {
|
220 |
+
width: 8px;
|
221 |
+
height: 8px;
|
222 |
+
background-color: #90949c; /* Gray typing dots */
|
223 |
+
border-radius: 50%;
|
224 |
+
animation: typing 1.4s infinite ease-in-out;
|
225 |
+
}
|
226 |
+
|
227 |
+
.typing-dot:nth-child(1) { animation-delay: 200ms; }
|
228 |
+
.typing-dot:nth-child(2) { animation-delay: 300ms; }
|
229 |
+
.typing-dot:nth-child(3) { animation-delay: 400ms; }
|
230 |
+
|
231 |
+
@keyframes typing {
|
232 |
+
0%, 100% { transform: translateY(0); }
|
233 |
+
50% { transform: translateY(-10px); }
|
234 |
+
}
|
templates/index.html
ADDED
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
<!DOCTYPE html>
|
2 |
+
<html lang="en">
|
3 |
+
<head>
|
4 |
+
<meta charset="UTF-8">
|
5 |
+
<meta name="viewport" content="width=device-width, initial-scale=1.0">
|
6 |
+
<title>AI Chatbot</title>
|
7 |
+
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/font-awesome/6.0.0/css/all.min.css">
|
8 |
+
<link rel="stylesheet" href="/static/styles.css">
|
9 |
+
</head>
|
10 |
+
<body>
|
11 |
+
<div class="Luna">
|
12 |
+
<h1><a href="https://github.com/anshh-arora/Luna-AI" target="_blank">Luna AI</a></h1>
|
13 |
+
</div>
|
14 |
+
<div class="chat-container">
|
15 |
+
<div class="chat-header">
|
16 |
+
<div class="bot-profile">
|
17 |
+
<i class="fas fa-atom"></i>
|
18 |
+
<span class="bot-name">AI Assistant</span>
|
19 |
+
</div>
|
20 |
+
<div class="voice-controls">
|
21 |
+
|
22 |
+
</div>
|
23 |
+
</div>
|
24 |
+
|
25 |
+
<div class="chat-messages" id="chatMessages"></div>
|
26 |
+
|
27 |
+
<div class="message-templates" style="display: none;">
|
28 |
+
<div class="message bot-message">
|
29 |
+
<div class="message-avatar">
|
30 |
+
<i class="fas fa-atom"></i>
|
31 |
+
</div>
|
32 |
+
<div class="message-content">
|
33 |
+
|
34 |
+
</div>
|
35 |
+
</div>
|
36 |
+
|
37 |
+
<div class="message user-message">
|
38 |
+
<div class="message-avatar">
|
39 |
+
<i class="fas fa-user-alt"></i>
|
40 |
+
</div>
|
41 |
+
<div class="message-content"></div>
|
42 |
+
</div>
|
43 |
+
</div>
|
44 |
+
|
45 |
+
<div class="chat-input-container">
|
46 |
+
<textarea id="messageInput" placeholder="Type your message..." ></textarea>
|
47 |
+
<div class="input-buttons">
|
48 |
+
<button id="voiceInput" class="voice-input-button">
|
49 |
+
<i class="fas fa-microphone"></i>
|
50 |
+
</button>
|
51 |
+
<button id="sendMessage" class="send-button" type="submit">
|
52 |
+
<i class="fas fa-paper-plane"></i>
|
53 |
+
</button>
|
54 |
+
</div>
|
55 |
+
</div>
|
56 |
+
</div>
|
57 |
+
<script src="/static/script.js"></script>
|
58 |
+
</body>
|
59 |
+
</html>
|