StormblessedKal commited on
Commit
bb029fa
1 Parent(s): 6ee526e
Files changed (2) hide show
  1. src/predict.py +1 -1
  2. src/se_extractor.py +2 -2
src/predict.py CHANGED
@@ -70,7 +70,7 @@ class Predictor:
70
  self.textclenaer = None
71
  self.mean = 0
72
  self.std = 0
73
- self.device = 'cuda:0'
74
 
75
  self.ckpt_base = 'checkpoints/base_speakers/EN'
76
  self.ckpt_converter = 'checkpoints/converter'
 
70
  self.textclenaer = None
71
  self.mean = 0
72
  self.std = 0
73
+ self.device = 'cuda'
74
 
75
  self.ckpt_base = 'checkpoints/base_speakers/EN'
76
  self.ckpt_converter = 'checkpoints/converter'
src/se_extractor.py CHANGED
@@ -12,7 +12,7 @@ model = None
12
  model_size = 'medium'
13
  def split_audio_whisper(audio_path, target_dir='processed',needs_offset=True):
14
  print("in whisper split")
15
- model = WhisperModel('medium', device="cuda:0", compute_type="float16")
16
  audio = AudioSegment.from_file(audio_path)
17
  max_len = len(audio)
18
 
@@ -162,7 +162,7 @@ def load_model():
162
 
163
  def extract_segments_to_cut_audio(max_duration,audio_path,target_dir='processed'):
164
 
165
- model = WhisperModel('medium', device="cuda:0", compute_type="float16")
166
  audio = AudioSegment.from_file(audio_path)
167
  max_len = len(audio)
168
 
 
12
  model_size = 'medium'
13
  def split_audio_whisper(audio_path, target_dir='processed',needs_offset=True):
14
  print("in whisper split")
15
+ model = WhisperModel('medium', device="cuda", compute_type="float16")
16
  audio = AudioSegment.from_file(audio_path)
17
  max_len = len(audio)
18
 
 
162
 
163
  def extract_segments_to_cut_audio(max_duration,audio_path,target_dir='processed'):
164
 
165
+ model = WhisperModel('medium', device="cuda", compute_type="float16")
166
  audio = AudioSegment.from_file(audio_path)
167
  max_len = len(audio)
168