Spaces:
Sleeping
Sleeping
Manjot Singh
commited on
Commit
·
04843f3
1
Parent(s):
b798161
add req
Browse files- app.py +1 -1
- audio_processing.py +1 -1
- requirements.txt +2 -1
app.py
CHANGED
@@ -10,7 +10,7 @@ if torch.cuda.is_available():
|
|
10 |
else:
|
11 |
print("No CUDA GPUs available. Running on CPU.")
|
12 |
|
13 |
-
@spaces.GPU
|
14 |
def transcribe_audio(audio_file, translate, model_size):
|
15 |
language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
|
16 |
|
|
|
10 |
else:
|
11 |
print("No CUDA GPUs available. Running on CPU.")
|
12 |
|
13 |
+
@spaces.GPU(duration=180)
|
14 |
def transcribe_audio(audio_file, translate, model_size):
|
15 |
language_segments, final_segments = process_audio(audio_file, translate=translate, model_size=model_size)
|
16 |
|
audio_processing.py
CHANGED
@@ -30,7 +30,7 @@ def preprocess_audio(audio, chunk_size=CHUNK_LENGTH*16000, overlap=OVERLAP*16000
|
|
30 |
chunks.append(chunk)
|
31 |
return chunks
|
32 |
|
33 |
-
@spaces.GPU
|
34 |
def process_audio(audio_file, translate=False, model_size="small"):
|
35 |
start_time = time.time()
|
36 |
|
|
|
30 |
chunks.append(chunk)
|
31 |
return chunks
|
32 |
|
33 |
+
@spaces.GPU(duration=180)
|
34 |
def process_audio(audio_file, translate=False, model_size="small"):
|
35 |
start_time = time.time()
|
36 |
|
requirements.txt
CHANGED
@@ -18,4 +18,5 @@ ctranslate2
|
|
18 |
torchvision
|
19 |
cdifflib
|
20 |
pydub
|
21 |
-
cuda-python
|
|
|
|
18 |
torchvision
|
19 |
cdifflib
|
20 |
pydub
|
21 |
+
cuda-python
|
22 |
+
nvidia-cudnn
|