Niral Patel
commited on
Commit
·
fee470d
1
Parent(s):
f17f988
change in model config
Browse files- .gitattributes +1 -0
- .gitignore +2 -1
- custom_model.py +1 -1
- pretrained_models/2stems/._checkpoint +0 -0
- pretrained_models/2stems/.probe +1 -0
- pretrained_models/2stems/checkpoint +2 -0
- pretrained_models/2stems/model.data-00000-of-00001 +3 -0
- pretrained_models/2stems/model.index +0 -0
- pretrained_models/2stems/model.meta +0 -0
- test.py +9 -21
.gitattributes
CHANGED
@@ -36,3 +36,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
36 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
37 |
model.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
38 |
vocals.wav filter=lfs diff=lfs merge=lfs -text
|
|
|
|
36 |
*.tar.gz filter=lfs diff=lfs merge=lfs -text
|
37 |
model.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
38 |
vocals.wav filter=lfs diff=lfs merge=lfs -text
|
39 |
+
pretrained_models/2stems/model.data-00000-of-00001 filter=lfs diff=lfs merge=lfs -text
|
.gitignore
CHANGED
@@ -1 +1,2 @@
|
|
1 |
-
env/
|
|
|
|
1 |
+
env/
|
2 |
+
__pycache__/
|
custom_model.py
CHANGED
@@ -25,7 +25,7 @@ class SpleeterModel(PreTrainedModel):
|
|
25 |
Returns:
|
26 |
dict: Separated stems.
|
27 |
"""
|
28 |
-
return self.separator.
|
29 |
|
30 |
|
31 |
AutoConfig.register("spleeter", SpleeterConfig)
|
|
|
25 |
Returns:
|
26 |
dict: Separated stems.
|
27 |
"""
|
28 |
+
return self.separator.separate_to_file(audio_path, "separated_audio")
|
29 |
|
30 |
|
31 |
AutoConfig.register("spleeter", SpleeterConfig)
|
pretrained_models/2stems/._checkpoint
ADDED
Binary file (176 Bytes). View file
|
|
pretrained_models/2stems/.probe
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
OK
|
pretrained_models/2stems/checkpoint
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
model_checkpoint_path: "model"
|
2 |
+
all_model_checkpoint_paths: "model"
|
pretrained_models/2stems/model.data-00000-of-00001
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7747f9fd2c782306dbec1504360fbb645a097a48446f23486c3ff9c89bc11788
|
3 |
+
size 78614080
|
pretrained_models/2stems/model.index
ADDED
Binary file (5.24 kB). View file
|
|
pretrained_models/2stems/model.meta
ADDED
Binary file (806 kB). View file
|
|
test.py
CHANGED
@@ -1,21 +1,6 @@
|
|
1 |
-
# from transformers import AutoConfig, AutoModel
|
2 |
-
|
3 |
-
# config = AutoConfig.from_pretrained("niral-env/youtube_spleeter")
|
4 |
-
# print(config)
|
5 |
-
# model = AutoModel.from_pretrained("niral-env/youtube_spleeter", config=config)
|
6 |
-
# print(model)
|
7 |
-
|
8 |
-
|
9 |
-
# Load model directly
|
10 |
-
# from transformers import AutoModel
|
11 |
-
# model = AutoModel.from_pretrained("niral-env/youtube_spleeter")
|
12 |
-
|
13 |
-
# print(model)
|
14 |
-
|
15 |
from transformers import AutoConfig, AutoModel
|
16 |
from custom_model import SpleeterModel
|
17 |
|
18 |
-
# Load configuration and model
|
19 |
config = AutoConfig.from_pretrained("niral-env/youtube_spleeter")
|
20 |
print("----"*30)
|
21 |
print(config)
|
@@ -23,12 +8,15 @@ print("----"*30)
|
|
23 |
|
24 |
model = SpleeterModel(config)
|
25 |
|
26 |
-
# Process an audio file
|
27 |
-
# result = model.forward("example.wav")
|
28 |
-
|
29 |
-
# Output separated stems
|
30 |
print(model)
|
31 |
result = model.forward("vocals.wav")
|
32 |
|
33 |
-
|
34 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
from transformers import AutoConfig, AutoModel
|
2 |
from custom_model import SpleeterModel
|
3 |
|
|
|
4 |
config = AutoConfig.from_pretrained("niral-env/youtube_spleeter")
|
5 |
print("----"*30)
|
6 |
print(config)
|
|
|
8 |
|
9 |
model = SpleeterModel(config)
|
10 |
|
|
|
|
|
|
|
|
|
11 |
print(model)
|
12 |
result = model.forward("vocals.wav")
|
13 |
|
14 |
+
print(result)
|
15 |
+
|
16 |
+
|
17 |
+
# from transformers import AutoModel
|
18 |
+
# model = AutoModel.from_pretrained("niral-env/youtube_spleeter")
|
19 |
+
# print(model)
|
20 |
+
# result = model.forward("vocals.wav")
|
21 |
+
|
22 |
+
# print(result)
|