Upload model
Browse files- modeling_test.py +9 -1
modeling_test.py
CHANGED
@@ -3,6 +3,7 @@ from .configuration_test import TestConfig
|
|
3 |
import torch.nn as nn
|
4 |
from transformers import AutoModelForMaskedLM, AutoConfig
|
5 |
import librosa
|
|
|
6 |
import os
|
7 |
|
8 |
|
@@ -16,9 +17,16 @@ class TestModel(PreTrainedModel):
|
|
16 |
self.model2 = AutoModelForMaskedLM.from_config(
|
17 |
AutoConfig.from_pretrained("albert/albert-base-v2")
|
18 |
)
|
|
|
19 |
|
20 |
def get_audio_duration(self):
|
21 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
return librosa.get_duration(arr, sr)
|
23 |
|
24 |
def forward(self, tensor):
|
|
|
3 |
import torch.nn as nn
|
4 |
from transformers import AutoModelForMaskedLM, AutoConfig
|
5 |
import librosa
|
6 |
+
from huggingface_hub import hf_hub_download
|
7 |
import os
|
8 |
|
9 |
|
|
|
17 |
self.model2 = AutoModelForMaskedLM.from_config(
|
18 |
AutoConfig.from_pretrained("albert/albert-base-v2")
|
19 |
)
|
20 |
+
self.path = config.name_or_path
|
21 |
|
22 |
def get_audio_duration(self):
|
23 |
+
audio_path = hf_hub_download(
|
24 |
+
repo_id = self.path,
|
25 |
+
file_path = "output1.wav",
|
26 |
+
repo_type = "model",
|
27 |
+
# local_dir = os.path.abspath(os.path.dirname(os.path.abspath(__file__)))
|
28 |
+
)
|
29 |
+
arr, sr = librosa.load(audio_path)
|
30 |
return librosa.get_duration(arr, sr)
|
31 |
|
32 |
def forward(self, tensor):
|