Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -52,11 +52,24 @@ def inference(audio_file: str, model_name: str, vocals: bool, drums: bool, bass:
|
|
52 |
|
53 |
# Use apply_model as a standalone function
|
54 |
try:
|
55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
except ValueError as e:
|
57 |
yield None, stream_log(f"Error applying model: {str(e)}")
|
58 |
yield None, stream_log(f"Separator sources: {separator.sources}")
|
59 |
yield None, stream_log(f"WAV shape: {wav.shape}")
|
|
|
|
|
60 |
raise gr.Error(f"Failed to apply model: {str(e)}. This might be due to incompatible audio format or model configuration.")
|
61 |
except Exception as e:
|
62 |
yield None, stream_log(f"Unexpected error applying model: {str(e)}")
|
|
|
52 |
|
53 |
# Use apply_model as a standalone function
|
54 |
try:
|
55 |
+
result = apply_model(separator, wav.to(device), device=device)
|
56 |
+
yield None, stream_log(f"Model application result type: {type(result)}")
|
57 |
+
yield None, stream_log(f"Model application result shape: {result.shape if hasattr(result, 'shape') else 'N/A'}")
|
58 |
+
|
59 |
+
if isinstance(result, tuple) and len(result) == 2:
|
60 |
+
sources, _ = result
|
61 |
+
elif isinstance(result, torch.Tensor):
|
62 |
+
sources = result
|
63 |
+
else:
|
64 |
+
raise ValueError(f"Unexpected result type from apply_model: {type(result)}")
|
65 |
+
|
66 |
+
yield None, stream_log(f"Sources shape: {sources.shape}")
|
67 |
except ValueError as e:
|
68 |
yield None, stream_log(f"Error applying model: {str(e)}")
|
69 |
yield None, stream_log(f"Separator sources: {separator.sources}")
|
70 |
yield None, stream_log(f"WAV shape: {wav.shape}")
|
71 |
+
yield None, stream_log(f"Separator model: {separator.__class__.__name__}")
|
72 |
+
yield None, stream_log(f"Separator config: {separator.config}")
|
73 |
raise gr.Error(f"Failed to apply model: {str(e)}. This might be due to incompatible audio format or model configuration.")
|
74 |
except Exception as e:
|
75 |
yield None, stream_log(f"Unexpected error applying model: {str(e)}")
|