404Brain-Not-Found-yeah commited on
Commit
42a0d43
·
verified ·
1 Parent(s): 84b23d5

Upload 6 files

Browse files
Files changed (6) hide show
  1. README.md +84 -13
  2. app_gradio.py +369 -0
  3. model.joblib +3 -0
  4. predict.py +162 -0
  5. requirements.txt +7 -0
  6. scaler.joblib +3 -0
README.md CHANGED
@@ -1,13 +1,84 @@
1
- ---
2
- title: Healing Music Classifier
3
- emoji: 🦀
4
- colorFrom: green
5
- colorTo: gray
6
- sdk: gradio
7
- sdk_version: 5.8.0
8
- app_file: app.py
9
- pinned: false
10
- license: mit
11
- ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Healing Music Classifier
2
+
3
+ This project uses machine learning to classify whether a piece of music has healing properties or not. It analyzes various audio features including MFCC, spectral characteristics, rhythm, and harmonic content to make predictions.
4
+
5
+ ## Features
6
+
7
+ - Audio feature extraction using librosa
8
+ - Machine learning classification using Random Forest
9
+ - Web interface for easy music upload and analysis
10
+ - Visual results with healing probability score
11
+ - Cross-validation for model evaluation
12
+
13
+ ## Installation
14
+
15
+ 1. Clone this repository
16
+ 2. Install dependencies:
17
+ ```bash
18
+ pip install -r requirements.txt
19
+ ```
20
+
21
+ ## Usage
22
+
23
+ 1. First, train the model:
24
+ ```bash
25
+ python train_model.py
26
+ ```
27
+
28
+ 2. Run the web application:
29
+ ```bash
30
+ streamlit run app.py
31
+ ```
32
+
33
+ 3. Open your browser and upload a music file to analyze
34
+
35
+ ## Project Structure
36
+
37
+ - `train_model.py`: Feature extraction and model training
38
+ - `predict.py`: Prediction functionality
39
+ - `app.py`: Streamlit web interface
40
+ - `requirements.txt`: Project dependencies
41
+ - `model.joblib`: Trained model (generated after training)
42
+ - `scaler.joblib`: Feature scaler (generated after training)
43
+
44
+ ## Technical Details
45
+
46
+ The classifier uses the following features:
47
+ - Mel-frequency cepstral coefficients (MFCC)
48
+ - Spectral centroid
49
+ - Spectral rolloff
50
+ - Zero crossing rate
51
+ - Chroma features
52
+ - Tempo
53
+ - RMS energy
54
+
55
+ ## Deployment
56
+
57
+ ### Local Deployment
58
+ 1. Clone this repository
59
+ 2. Install dependencies:
60
+ ```bash
61
+ pip install -r requirements.txt
62
+ ```
63
+ 3. Run the web application:
64
+ ```bash
65
+ streamlit run app.py
66
+ ```
67
+
68
+ ### Cloud Deployment (Streamlit Cloud)
69
+ 1. Fork this repository to your GitHub account
70
+ 2. Visit [Streamlit Cloud](https://streamlit.io/cloud)
71
+ 3. Sign in with your GitHub account
72
+ 4. Click "New app" and select this repository
73
+ 5. Select the main branch and the app.py file
74
+ 6. Click "Deploy"
75
+
76
+ Note: Make sure to include some sample music files in the `healing_music` and `non_healing_music` folders for training the model.
77
+
78
+ ## License
79
+
80
+ MIT License
81
+
82
+ ## Contributing
83
+
84
+ Feel free to open issues and pull requests!
app_gradio.py ADDED
@@ -0,0 +1,369 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from predict import predict_healing_music
4
+ import train_model
5
+ import logging
6
+ import tempfile
7
+ import time
8
+ import shutil
9
+ import socket
10
+ import joblib
11
+
12
+ # Set up logging
13
+ logging.basicConfig(
14
+ level=logging.INFO,
15
+ format='%(asctime)s - %(name)s - %(levelname)s - %(message)s'
16
+ )
17
+ logger = logging.getLogger(__name__)
18
+
19
+ def find_free_port(start_port=7860, max_port=7960):
20
+ """Find a free port in the given range."""
21
+ for port in range(start_port, max_port + 1):
22
+ with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
23
+ try:
24
+ s.bind(('', port))
25
+ return port
26
+ except OSError:
27
+ continue
28
+ return None
29
+
30
+ # Ensure model directory exists
31
+ model_dir = os.path.join(os.path.dirname(__file__), "models")
32
+ os.makedirs(model_dir, exist_ok=True)
33
+
34
+ # Model file paths
35
+ model_path = os.path.join(model_dir, "model.joblib")
36
+ scaler_path = os.path.join(model_dir, "scaler.joblib")
37
+
38
+ # Check if model exists
39
+ if not os.path.exists(model_path) or not os.path.exists(scaler_path):
40
+ print('First run: Training the model...')
41
+ try:
42
+ train_model.train_and_evaluate_model()
43
+ print('Model training completed!')
44
+ except Exception as e:
45
+ print(f'Model training failed: {str(e)}')
46
+ raise e
47
+
48
+ def process_audio(audio_path):
49
+ """
50
+ Process and analyze the audio file
51
+ """
52
+ if audio_path is None:
53
+ return None, None, None, "Please upload an audio file"
54
+
55
+ model_dir = os.path.join(os.path.dirname(__file__), "models")
56
+ model_path = os.path.join(model_dir, "model.joblib")
57
+ scaler_path = os.path.join(model_dir, "scaler.joblib")
58
+
59
+ try:
60
+ # Load model and scaler
61
+ model = joblib.load(model_path)
62
+ scaler = joblib.load(scaler_path)
63
+
64
+ tmp_file = None
65
+ try:
66
+ # Create temporary file
67
+ suffix = os.path.splitext(audio_path)[1]
68
+ tmp_file = tempfile.NamedTemporaryFile(delete=False, suffix=suffix)
69
+ shutil.copy2(audio_path, tmp_file.name)
70
+
71
+ # Make prediction
72
+ healing_probability = predict_healing_music(tmp_file.name)
73
+
74
+ if healing_probability is not None:
75
+ # Calculate percentage
76
+ healing_percentage = healing_probability * 100
77
+
78
+ # Generate description
79
+ if healing_percentage >= 75:
80
+ description = "This music has strong healing properties! ✨"
81
+ color = "#15803d" # Dark green
82
+ elif healing_percentage >= 50:
83
+ description = "This music has moderate healing effects. 🌟"
84
+ color = "#0369a1" # Dark blue
85
+ else:
86
+ description = "This music has limited healing potential. 🎵"
87
+ color = "#b91c1c" # Dark red
88
+
89
+ return f"{healing_percentage:.1f}%", f'<div style="background-color: {color}; color: white; padding: 1rem; border-radius: 8px; text-align: center;">{description}</div>', None, None
90
+ else:
91
+ return "Error", "Error analyzing file. Please ensure it's a valid MP3 or WAV file.", None, None
92
+
93
+ except Exception as e:
94
+ logger.error(f"Error during analysis: {str(e)}")
95
+ return "Error", f"An unexpected error occurred: {str(e)}", None, None
96
+
97
+ finally:
98
+ # Clean up temporary file
99
+ if tmp_file is not None:
100
+ try:
101
+ tmp_file.close()
102
+ os.unlink(tmp_file.name)
103
+ except Exception as e:
104
+ logger.error(f"Failed to clean up temporary file: {str(e)}")
105
+
106
+ except Exception as e:
107
+ logger.error(f"Error during model loading: {str(e)}")
108
+ return "Error", f"An unexpected error occurred: {str(e)}", None, None
109
+
110
+ def analyze_audio(audio):
111
+ """Analyze the audio file"""
112
+ try:
113
+ if audio is None:
114
+ return [
115
+ gr.update(visible=False), # results
116
+ gr.update(visible=False), # analyzing
117
+ "", # healing_index
118
+ "" # result_text
119
+ ]
120
+
121
+ # Show analyzing status first
122
+ yield [
123
+ gr.update(visible=False), # results
124
+ gr.update(visible=True), # analyzing
125
+ "", # healing_index
126
+ "" # result_text
127
+ ]
128
+
129
+ # Process audio and get results
130
+ index, desc, _, _ = process_audio(audio)
131
+ desc_with_hint = f'{desc}<div style="margin-top: 1rem; color: #9ca3af; font-size: 0.9rem;">To analyze another file, please refresh the page</div>'
132
+
133
+ # Return final results
134
+ yield [
135
+ gr.update(visible=True), # results
136
+ gr.update(visible=False), # analyzing
137
+ index, # healing_index
138
+ desc_with_hint # result_text
139
+ ]
140
+
141
+ except Exception as e:
142
+ logger.error(f"Error in analyze_audio: {str(e)}")
143
+ yield [
144
+ gr.update(visible=True), # results
145
+ gr.update(visible=False), # analyzing
146
+ "Error", # healing_index
147
+ f"An error occurred: {str(e)}" # result_text
148
+ ]
149
+
150
+ # Custom CSS styles
151
+ custom_css = """
152
+ .gradio-container {
153
+ font-family: 'Inter', -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, sans-serif;
154
+ max-width: 800px !important;
155
+ margin: auto;
156
+ padding: 0 1rem;
157
+ background-color: #0f1117;
158
+ }
159
+ .container {
160
+ max-width: 700px;
161
+ margin: 0 auto;
162
+ padding-top: 2rem;
163
+ }
164
+ .header {
165
+ text-align: center;
166
+ margin-bottom: 1rem;
167
+ width: 100%;
168
+ display: flex;
169
+ justify-content: center;
170
+ align-items: center;
171
+ }
172
+ .title {
173
+ font-size: 2.5rem !important;
174
+ font-weight: 700 !important;
175
+ color: white !important;
176
+ margin: 0 !important;
177
+ line-height: 1.2 !important;
178
+ text-align: center !important;
179
+ white-space: nowrap !important;
180
+ }
181
+ .subtitle {
182
+ font-size: 1.2rem !important;
183
+ text-align: center;
184
+ color: #9ca3af !important;
185
+ margin-top: 0.5rem !important;
186
+ max-width: 800px;
187
+ margin-left: auto;
188
+ margin-right: auto;
189
+ white-space: nowrap !important;
190
+ }
191
+ .upload-box {
192
+ background-color: #1f2937;
193
+ border-radius: 12px;
194
+ padding: 2rem;
195
+ margin-bottom: 1rem;
196
+ border: 2px dashed #374151;
197
+ transition: all 0.3s ease;
198
+ }
199
+ .upload-box:hover {
200
+ border-color: #3b82f6;
201
+ box-shadow: 0 0 10px rgba(59, 130, 246, 0.2);
202
+ }
203
+ .upload-area {
204
+ display: flex;
205
+ flex-direction: column;
206
+ align-items: center;
207
+ justify-content: center;
208
+ gap: 1rem;
209
+ padding: 1.5rem 0;
210
+ }
211
+ .icon-text-container {
212
+ display: inline-flex;
213
+ align-items: center;
214
+ justify-content: center;
215
+ gap: 0.2rem;
216
+ white-space: nowrap;
217
+ }
218
+ .upload-icon {
219
+ color: #9ca3af;
220
+ font-size: 2rem;
221
+ line-height: 1;
222
+ margin-right: 0.1rem;
223
+ }
224
+ .upload-text {
225
+ color: white;
226
+ font-size: 1.1rem;
227
+ font-weight: 500;
228
+ line-height: 1;
229
+ }
230
+ .upload-hint {
231
+ color: #6b7280 !important;
232
+ font-size: 0.85rem !important;
233
+ margin-top: 0.5rem !important;
234
+ font-style: italic !important;
235
+ }
236
+ .progress-area {
237
+ margin: 1rem 0;
238
+ background-color: #1f2937;
239
+ border-radius: 12px;
240
+ padding: 1.5rem;
241
+ text-align: center;
242
+ }
243
+ .progress-text {
244
+ color: #60a5fa !important;
245
+ font-size: 1.2rem !important;
246
+ font-weight: 500 !important;
247
+ margin: 0 !important;
248
+ }
249
+ .results-container {
250
+ background-color: #1f2937;
251
+ border-radius: 12px;
252
+ padding: 1.25rem;
253
+ margin-top: 1rem;
254
+ animation: fadeIn 0.5s ease;
255
+ }
256
+ .result-title {
257
+ color: white !important;
258
+ font-size: 1.25rem !important;
259
+ font-weight: 600 !important;
260
+ margin-bottom: 0.5rem !important;
261
+ }
262
+ .healing-index {
263
+ font-size: 2.5rem !important;
264
+ font-weight: 700 !important;
265
+ text-align: center;
266
+ color: white !important;
267
+ margin: 0.5rem 0 !important;
268
+ animation: scaleIn 0.5s ease;
269
+ }
270
+ .result-text {
271
+ animation: slideIn 0.5s ease;
272
+ }
273
+ @keyframes fadeIn {
274
+ from { opacity: 0; transform: translateY(20px); }
275
+ to { opacity: 1; transform: translateY(0); }
276
+ }
277
+ @keyframes scaleIn {
278
+ from { transform: scale(0.8); opacity: 0; }
279
+ to { transform: scale(1); opacity: 1; }
280
+ }
281
+ @keyframes slideIn {
282
+ from { transform: translateY(10px); opacity: 0; }
283
+ to { transform: translateY(0); opacity: 1; }
284
+ }
285
+ """
286
+
287
+ # Create Gradio interface
288
+ with gr.Blocks(
289
+ title="Healing Music Classifier",
290
+ css=custom_css,
291
+ theme=gr.themes.Default()
292
+ ) as demo:
293
+ with gr.Column(elem_classes="container"):
294
+ with gr.Row(elem_classes="header"):
295
+ gr.Markdown("&#127925; Healing Music Classifier", elem_classes="title")
296
+
297
+ gr.Markdown(
298
+ "Upload your music file, and our model will analyze its healing potential!",
299
+ elem_classes="subtitle"
300
+ )
301
+
302
+ with gr.Column(elem_classes="upload-box"):
303
+ with gr.Column(elem_classes="upload-area"):
304
+ gr.Markdown("&#9729;&#65039; Drop your audio file here", elem_classes="icon-text-container")
305
+ audio_input = gr.Audio(
306
+ label="Audio Input",
307
+ sources=["upload"],
308
+ type="filepath",
309
+ elem_classes="audio-input",
310
+ interactive=True,
311
+ label_visible=False,
312
+ text="Drop audio file here or click to upload"
313
+ )
314
+ gr.Markdown("Limit 200MB per file • MP3, WAV", elem_classes="upload-hint")
315
+
316
+ # Add analyzing status
317
+ with gr.Column(elem_classes="analyzing-status", visible=False) as analyzing:
318
+ gr.Markdown(
319
+ """<div style="display: flex; align-items: center; justify-content: center; gap: 0.5rem;">
320
+ <div class="loading-spinner"></div>
321
+ <span style="color: #60a5fa;">Analyzing your music...</span>
322
+ </div>""",
323
+ elem_classes="analyzing-text"
324
+ )
325
+
326
+ with gr.Column(elem_classes="results-container", visible=False) as results:
327
+ gr.Markdown("Analysis Results", elem_classes="result-title")
328
+ healing_index = gr.Markdown("", elem_classes="healing-index")
329
+ result_text = gr.Markdown("", elem_classes="result-text")
330
+
331
+ # Add loading spinner CSS
332
+ demo.load(None, None, None, _js="""
333
+ () => {
334
+ const style = document.createElement('style');
335
+ style.textContent = `
336
+ .loading-spinner {
337
+ width: 20px;
338
+ height: 20px;
339
+ border: 3px solid #60a5fa;
340
+ border-top: 3px solid transparent;
341
+ border-radius: 50%;
342
+ animation: spin 1s linear infinite;
343
+ }
344
+ @keyframes spin {
345
+ 0% { transform: rotate(0deg); }
346
+ 100% { transform: rotate(360deg); }
347
+ }
348
+ `;
349
+ document.head.appendChild(style);
350
+ }
351
+ """)
352
+
353
+ # Audio analysis event
354
+ audio_input.upload(
355
+ fn=analyze_audio,
356
+ inputs=[audio_input],
357
+ outputs=[
358
+ results,
359
+ analyzing,
360
+ healing_index,
361
+ result_text
362
+ ],
363
+ queue=True
364
+ )
365
+
366
+ # Launch application
367
+ if __name__ == "__main__":
368
+ demo.launch() # 简化启动配置,让Hugging Face处理端口等
369
+
model.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:08e5c37f829bca6256348ab94b6450748ceba8dc4014b37ce01e98e8038b618d
3
+ size 166857
predict.py ADDED
@@ -0,0 +1,162 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import joblib
2
+ from train_model import extract_features
3
+ import numpy as np
4
+ import os
5
+ import logging
6
+ import soundfile as sf
7
+ import librosa
8
+ import traceback
9
+
10
+ logging.basicConfig(level=logging.DEBUG)
11
+ logger = logging.getLogger(__name__)
12
+
13
+ def verify_audio_file(file_path):
14
+ """Verify if the audio file is valid and can be processed."""
15
+ try:
16
+ print(f"[DEBUG] Starting audio file verification for: {file_path}")
17
+ logger.debug(f"Starting audio file verification for: {file_path}")
18
+
19
+ # Check basic file properties
20
+ if not os.path.exists(file_path):
21
+ print(f"[ERROR] File not found: {file_path}")
22
+ logger.error(f"File not found: {file_path}")
23
+ return False
24
+
25
+ file_size = os.path.getsize(file_path)
26
+ print(f"[DEBUG] File size: {file_size} bytes")
27
+ logger.debug(f"File size: {file_size} bytes")
28
+ if file_size == 0:
29
+ print("[ERROR] File is empty")
30
+ logger.error("File is empty")
31
+ return False
32
+
33
+ # Try reading with soundfile
34
+ try:
35
+ print("[DEBUG] Attempting to read with soundfile...")
36
+ logger.debug("Attempting to read with soundfile...")
37
+ with sf.SoundFile(file_path) as audio_file:
38
+ print(f"[DEBUG] SoundFile success - Sample rate: {audio_file.samplerate}Hz, "
39
+ f"Channels: {audio_file.channels}, Frames: {audio_file.frames}")
40
+ logger.debug(f"SoundFile success - Sample rate: {audio_file.samplerate}Hz, "
41
+ f"Channels: {audio_file.channels}, Frames: {audio_file.frames}")
42
+ if audio_file.frames == 0:
43
+ print("[ERROR] Audio file has no frames")
44
+ logger.error("Audio file has no frames")
45
+ return False
46
+ except Exception as e:
47
+ print(f"[WARNING] SoundFile read failed: {str(e)}")
48
+ logger.warning(f"SoundFile read failed with error: {str(e)}")
49
+ logger.debug(f"Full traceback: {traceback.format_exc()}")
50
+ # Don't return False here, try librosa as backup
51
+
52
+ # Try reading with librosa
53
+ try:
54
+ print("[DEBUG] Attempting to read with librosa...")
55
+ logger.debug("Attempting to read with librosa...")
56
+ y, sr = librosa.load(file_path, duration=1, sr=None)
57
+ print(f"[DEBUG] Librosa success - Sample rate: {sr}Hz, Length: {len(y)} samples")
58
+ logger.debug(f"Librosa success - Sample rate: {sr}Hz, Length: {len(y)} samples")
59
+ if len(y) == 0:
60
+ print("[ERROR] Librosa loaded empty audio data")
61
+ logger.error("Librosa loaded empty audio data")
62
+ return False
63
+ return True
64
+ except Exception as e:
65
+ print(f"[ERROR] Librosa read failed: {str(e)}")
66
+ logger.error(f"Librosa read failed with error: {str(e)}")
67
+ logger.debug(f"Full traceback: {traceback.format_exc()}")
68
+ return False
69
+
70
+ except Exception as e:
71
+ print(f"[ERROR] Error in verify_audio_file: {str(e)}")
72
+ logger.error(f"Error in verify_audio_file: {str(e)}")
73
+ logger.debug(f"Full traceback: {traceback.format_exc()}")
74
+ return False
75
+
76
+ def predict_healing_music(audio_path):
77
+ """
78
+ Predict whether a music file is healing or not.
79
+ Returns: probability of being healing music (0-1)
80
+ """
81
+ try:
82
+ # Step 1: Verify audio file
83
+ print(f"[DEBUG] Starting prediction process for: {audio_path}")
84
+ logger.info(f"Starting prediction process for: {audio_path}")
85
+ if not verify_audio_file(audio_path):
86
+ print("[ERROR] Audio file verification failed")
87
+ logger.error("Audio file verification failed")
88
+ return None
89
+
90
+ # Step 2: Load model and scaler
91
+ try:
92
+ print("[DEBUG] Loading model and scaler...")
93
+ logger.info("Loading model and scaler...")
94
+ if not os.path.exists('model.joblib') or not os.path.exists('scaler.joblib'):
95
+ print("[ERROR] Model or scaler file not found")
96
+ logger.error("Model or scaler file not found")
97
+ return None
98
+
99
+ model = joblib.load('model.joblib')
100
+ scaler = joblib.load('scaler.joblib')
101
+ print("[DEBUG] Model and scaler loaded successfully")
102
+ logger.info("Model and scaler loaded successfully")
103
+ except Exception as e:
104
+ print(f"[ERROR] Error loading model or scaler: {str(e)}")
105
+ logger.error(f"Error loading model or scaler: {str(e)}")
106
+ return None
107
+
108
+ # Step 3: Extract features
109
+ try:
110
+ print("[DEBUG] Starting feature extraction...")
111
+ logger.info("Starting feature extraction...")
112
+ features = extract_features(audio_path)
113
+ if features is None:
114
+ print("[ERROR] Feature extraction returned None")
115
+ logger.error("Feature extraction returned None")
116
+ return None
117
+
118
+ # Verify feature dimensions
119
+ expected_features = 38 # Updated to match our current feature extraction (26 MFCC + 12 Chroma)
120
+ if len(features) != expected_features:
121
+ print(f"[ERROR] Incorrect number of features. Expected {expected_features}, got {len(features)}")
122
+ logger.error(f"Incorrect number of features. Expected {expected_features}, got {len(features)}")
123
+ return None
124
+
125
+ print(f"[DEBUG] Successfully extracted {len(features)} features")
126
+ logger.info(f"Successfully extracted {len(features)} features")
127
+ logger.debug(f"Feature values: {features}")
128
+ except Exception as e:
129
+ print(f"[ERROR] Error during feature extraction: {str(e)}")
130
+ logger.error(f"Error during feature extraction: {str(e)}")
131
+ logger.debug(f"Full traceback: {traceback.format_exc()}")
132
+ return None
133
+
134
+ # Step 4: Scale features
135
+ try:
136
+ print("[DEBUG] Scaling features...")
137
+ logger.info("Scaling features...")
138
+ features_scaled = scaler.transform(features.reshape(1, -1))
139
+ print("[DEBUG] Features scaled successfully")
140
+ logger.info("Features scaled successfully")
141
+ except Exception as e:
142
+ print(f"[ERROR] Error scaling features: {str(e)}")
143
+ logger.error(f"Error scaling features: {str(e)}")
144
+ return None
145
+
146
+ # Step 5: Make prediction
147
+ try:
148
+ print("[DEBUG] Making prediction...")
149
+ logger.info("Making prediction...")
150
+ probability = model.predict_proba(features_scaled)[0][1]
151
+ print(f"[DEBUG] Prediction successful: {probability:.2f}")
152
+ logger.info(f"Prediction successful: {probability:.2f}")
153
+ return probability
154
+ except Exception as e:
155
+ print(f"[ERROR] Error making prediction: {str(e)}")
156
+ logger.error(f"Error making prediction: {str(e)}")
157
+ return None
158
+
159
+ except Exception as e:
160
+ print(f"[ERROR] Unexpected error during prediction: {str(e)}")
161
+ logger.error(f"Unexpected error during prediction: {str(e)}")
162
+ return None
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ gradio>=3.50.2
2
+ librosa>=0.10.1
3
+ numpy>=1.24.3
4
+ scikit-learn>=1.3.0
5
+ soundfile>=0.12.1
6
+ joblib>=1.3.2
7
+ pandas>=2.0.3
scaler.joblib ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:14e0cd7fc9d652cf93b29141e4ef46f8436edfd576c2d260262d105e21a36e77
3
+ size 1496