import torchaudio | |
from pyannote.audio import Pipeline | |
pipeline = Pipeline.from_pretrained("pyannote/voice-activity-detection") | |
def detect_speech_segments(audio_path): | |
vad_result = pipeline(audio_path) | |
return [(segment.start, segment.end) for segment in vad_result.get_timeline().support()] | |