ducdatit2002 commited on
Commit
08fe07d
·
verified ·
1 Parent(s): a0dfa29

Upload 2 files

Browse files
Files changed (2) hide show
  1. app.py +267 -0
  2. requirements.txt +311 -0
app.py ADDED
@@ -0,0 +1,267 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # app.py
2
+
3
+ # -*- coding: utf-8 -*-
4
+ """
5
+ Vietnamese End-to-End Speech Recognition using Wav2Vec 2.0 with Speaker Diarization.
6
+ Streamlit Application with merged speaker segments and timestamps.
7
+ """
8
+
9
+ import os
10
+ import zipfile
11
+ import torch
12
+ import soundfile as sf
13
+ from transformers import Wav2Vec2Processor, Wav2Vec2ForCTC
14
+ import kenlm
15
+ from pyctcdecode import Alphabet, BeamSearchDecoderCTC, LanguageModel
16
+ from huggingface_hub import hf_hub_download
17
+ import streamlit as st
18
+ import numpy as np
19
+ import librosa
20
+ import logging
21
+
22
+ logging.basicConfig(level=logging.INFO)
23
+
24
+ @st.cache_resource(show_spinner=False)
25
+ def load_model_and_tokenizer(cache_dir='./cache/'):
26
+ st.info("Loading processor and model...")
27
+ processor = Wav2Vec2Processor.from_pretrained(
28
+ "nguyenvulebinh/wav2vec2-base-vietnamese-250h",
29
+ cache_dir=cache_dir
30
+ )
31
+ model = Wav2Vec2ForCTC.from_pretrained(
32
+ "nguyenvulebinh/wav2vec2-base-vietnamese-250h",
33
+ cache_dir=cache_dir
34
+ )
35
+
36
+ st.info("Downloading language model...")
37
+ lm_zip_file = hf_hub_download(
38
+ repo_id="nguyenvulebinh/wav2vec2-base-vietnamese-250h",
39
+ filename="vi_lm_4grams.bin.zip",
40
+ cache_dir=cache_dir
41
+ )
42
+
43
+ st.info("Extracting language model...")
44
+ with zipfile.ZipFile(lm_zip_file, 'r') as zip_ref:
45
+ zip_ref.extractall(cache_dir)
46
+
47
+ lm_file = os.path.join(cache_dir, 'vi_lm_4grams.bin')
48
+ if not os.path.isfile(lm_file):
49
+ raise FileNotFoundError(f"Language model file not found: {lm_file}")
50
+
51
+ st.success("Processor, model, and language model loaded successfully.")
52
+ return processor, model, lm_file
53
+
54
+ @st.cache_resource(show_spinner=False)
55
+ def get_decoder_ngram_model(_tokenizer, ngram_lm_path):
56
+ st.info("Building decoder with n-gram language model...")
57
+ vocab_dict = _tokenizer.get_vocab()
58
+ sorted_vocab = sorted((value, key) for (key, value) in vocab_dict.items())
59
+ vocab_list = [token for _, token in sorted_vocab][:-2] # Exclude special tokens
60
+
61
+ alphabet = Alphabet.build_alphabet(vocab_list)
62
+ lm_model = kenlm.Model(ngram_lm_path)
63
+ decoder = BeamSearchDecoderCTC(alphabet, language_model=LanguageModel(lm_model))
64
+ st.success("Decoder built successfully.")
65
+ return decoder
66
+
67
+ def transcribe_chunk(model, processor, decoder, speech_chunk, sampling_rate):
68
+ if speech_chunk.ndim > 1:
69
+ speech_chunk = np.mean(speech_chunk, axis=1)
70
+ speech_chunk = speech_chunk.astype(np.float32)
71
+
72
+ target_sr = 16000
73
+ if sampling_rate != target_sr:
74
+ speech_chunk = librosa.resample(speech_chunk, orig_sr=sampling_rate, target_sr=target_sr)
75
+ sampling_rate = target_sr
76
+
77
+ MIN_DURATION = 0.5 # seconds
78
+ MIN_SAMPLES = int(MIN_DURATION * sampling_rate)
79
+
80
+ if len(speech_chunk) < MIN_SAMPLES:
81
+ # Pad with zeros
82
+ padding = MIN_SAMPLES - len(speech_chunk)
83
+ speech_chunk = np.pad(speech_chunk, (0, padding), 'constant')
84
+
85
+ input_values = processor(
86
+ speech_chunk, sampling_rate=sampling_rate, return_tensors="pt"
87
+ ).input_values
88
+
89
+ with torch.no_grad():
90
+ logits = model(input_values).logits[0]
91
+
92
+ beam_search_output = decoder.decode(
93
+ logits.cpu().detach().numpy(),
94
+ beam_width=500
95
+ )
96
+ return beam_search_output
97
+
98
+ def alternative_speaker_diarization(audio_file, num_speakers=2):
99
+ try:
100
+ # Use librosa to load the audio file
101
+ y, sr = librosa.load(audio_file, sr=None)
102
+
103
+ # Rough segmentation based on energy
104
+ intervals = librosa.effects.split(y, top_db=30) # Adjust top_db as needed
105
+
106
+ # Merge very short intervals
107
+ MIN_INTERVAL_DURATION = 0.5 # seconds
108
+ MIN_SAMPLES = int(MIN_INTERVAL_DURATION * sr)
109
+ merged_intervals = []
110
+ for interval in intervals:
111
+ if merged_intervals and (interval[0] - merged_intervals[-1][1]) < MIN_SAMPLES:
112
+ merged_intervals[-1][1] = interval[1]
113
+ else:
114
+ merged_intervals.append([interval[0], interval[1]])
115
+
116
+ # Assign speakers cyclically
117
+ segments = []
118
+ for i, (start, end) in enumerate(merged_intervals):
119
+ speaker_id = i % num_speakers
120
+ start_time = start / sr
121
+ end_time = end / sr
122
+ segments.append((start_time, end_time, speaker_id))
123
+
124
+ return segments
125
+
126
+ except Exception as e:
127
+ st.error(f"Speaker diarization failed: {e}")
128
+ # Fallback to a simple equal-length segmentation
129
+ audio, sr = sf.read(audio_file)
130
+ total_duration = len(audio) / sr
131
+ segment_duration = total_duration / num_speakers
132
+
133
+ segments = []
134
+ for i in range(num_speakers):
135
+ start = i * segment_duration
136
+ end = (i + 1) * segment_duration
137
+ segments.append((start, end, i))
138
+
139
+ return segments
140
+
141
+ def process_segments(audio_file, segments, model, processor, decoder, sampling_rate=16000):
142
+ speech, sr = sf.read(audio_file)
143
+ final_transcriptions = []
144
+
145
+ # Remove duplicate or overlapping segments
146
+ unique_segments = []
147
+ for segment in sorted(segments, key=lambda x: x[0]):
148
+ if not unique_segments or segment[0] >= unique_segments[-1][1]:
149
+ unique_segments.append(segment)
150
+
151
+ for start, end, speaker_id in unique_segments:
152
+ start_sample = int(start * sr)
153
+ end_sample = int(end * sr)
154
+ speech_chunk = speech[start_sample:end_sample]
155
+ transcript = transcribe_chunk(model, processor, decoder, speech_chunk, sr)
156
+
157
+ # Only add non-empty transcripts
158
+ if transcript.strip():
159
+ # Lưu (start, end, speaker_id, transcript)
160
+ final_transcriptions.append((start, end, speaker_id, transcript))
161
+
162
+ return final_transcriptions
163
+
164
+ def format_timestamp(seconds):
165
+ # Định dạng thời gian thành MM:SS
166
+ total_seconds = int(seconds)
167
+ mm = total_seconds // 60
168
+ ss = total_seconds % 60
169
+ return f"{mm:02d}:{ss:02d}"
170
+
171
+ def merge_speaker_segments(final_transcriptions):
172
+ # Gộp các đoạn cùng speaker liên tiếp
173
+ if not final_transcriptions:
174
+ return []
175
+
176
+ merged_results = []
177
+ prev_start, prev_end, prev_speaker_id, prev_text = final_transcriptions[0]
178
+
179
+ for i in range(1, len(final_transcriptions)):
180
+ start, end, speaker_id, text = final_transcriptions[i]
181
+ if speaker_id == prev_speaker_id:
182
+ # Cùng speaker, gộp đoạn
183
+ prev_end = end
184
+ prev_text += " " + text
185
+ else:
186
+ # Khác speaker
187
+ merged_results.append((prev_start, prev_end, prev_speaker_id, prev_text))
188
+ prev_start, prev_end, prev_speaker_id, prev_text = start, end, speaker_id, text
189
+
190
+ # Thêm đoạn cuối cùng
191
+ merged_results.append((prev_start, prev_end, prev_speaker_id, prev_text))
192
+
193
+ return merged_results
194
+
195
+ def main():
196
+ st.title("🇻🇳 Vietnamese Speech Recognition with Speaker Diarization (with merging & timestamps)")
197
+
198
+ st.write("""
199
+ Upload an audio file, select the number of speakers, and get the transcribed text with timestamps and merged segments for each speaker.
200
+ """)
201
+
202
+ # Sidebar for inputs
203
+ st.sidebar.header("Input Parameters")
204
+ uploaded_file = st.sidebar.file_uploader("Upload Audio File", type=["wav", "mp3", "flac", "m4a"])
205
+ num_speakers = st.sidebar.slider("Number of Speakers", min_value=1, max_value=5, value=2, step=1)
206
+
207
+ if uploaded_file is not None:
208
+ # Save the uploaded file to a temporary location
209
+ temp_audio_path = "temp_audio_file"
210
+ with open(temp_audio_path, "wb") as f:
211
+ f.write(uploaded_file.getbuffer())
212
+
213
+ # Display audio player
214
+ st.audio(uploaded_file, format='audio/wav')
215
+
216
+ if st.button("Transcribe"):
217
+ with st.spinner("Processing..."):
218
+ try:
219
+ # Load models
220
+ processor, model, lm_file = load_model_and_tokenizer()
221
+ decoder = get_decoder_ngram_model(processor.tokenizer, lm_file)
222
+
223
+ # Speaker diarization
224
+ segments = alternative_speaker_diarization(temp_audio_path, num_speakers=num_speakers)
225
+
226
+ if not segments:
227
+ st.warning("No speech segments detected.")
228
+ return
229
+
230
+ # Process segments
231
+ final_transcriptions = process_segments(temp_audio_path, segments, model, processor, decoder)
232
+
233
+ # Merge consecutive segments of the same speaker
234
+ merged_results = merge_speaker_segments(final_transcriptions)
235
+
236
+ # Display results
237
+ if merged_results:
238
+ st.success("Transcription Completed!")
239
+ transcription_text = ""
240
+ for start_time, end_time, speaker_id, transcript in merged_results:
241
+ start_str = format_timestamp(start_time)
242
+ end_str = format_timestamp(end_time)
243
+ line = f"{start_str} - {end_str} - Speaker {speaker_id + 1}: {transcript}"
244
+ st.markdown(line)
245
+ transcription_text += line + "\n"
246
+
247
+ # Provide download link
248
+ st.download_button(
249
+ label="Download Transcription",
250
+ data=transcription_text,
251
+ file_name="transcription.txt",
252
+ mime="text/plain"
253
+ )
254
+ else:
255
+ st.warning("No transcriptions available.")
256
+
257
+ except Exception as e:
258
+ st.error(f"An error occurred during processing: {e}")
259
+
260
+ # Optionally, remove the temporary file after processing
261
+ if os.path.exists(temp_audio_path):
262
+ os.remove(temp_audio_path)
263
+ else:
264
+ st.info("Please upload an audio file to get started.")
265
+
266
+ if __name__ == '__main__':
267
+ main()
requirements.txt ADDED
@@ -0,0 +1,311 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ aiohappyeyeballs==2.4.4
2
+ aiohttp==3.11.10
3
+ aioice==0.9.0
4
+ aiortc==1.9.0
5
+ aiosignal==1.3.1
6
+ alembic==1.14.0
7
+ altair==5.5.0
8
+ antlr4-python3-runtime==4.9.3
9
+ asteroid-filterbanks==0.4.0
10
+ attrs==24.2.0
11
+ audioread==3.0.1
12
+ av==12.3.0
13
+ blinker==1.9.0
14
+ cachetools==5.5.0
15
+ certifi==2024.8.30
16
+ cffi==1.17.1
17
+ charset-normalizer==3.4.0
18
+ click==8.1.7
19
+ colorlog==6.9.0
20
+ contourpy==1.3.1
21
+ coverage==5.5
22
+ cryptography==44.0.0
23
+ cycler==0.12.1
24
+ datasets==3.1.0
25
+ decorator==5.1.1
26
+ deprecation==2.1.0
27
+ dill==0.3.8
28
+ dnspython==2.7.0
29
+ docopt==0.6.2
30
+ einops==0.8.0
31
+ eyed3==0.9.7
32
+ ffmpeg-python==0.2.0
33
+ filelock==3.16.1
34
+ filetype==1.2.0
35
+ fonttools==4.55.2
36
+ frozenlist==1.5.0
37
+ fsspec==2024.9.0
38
+ future==1.0.0
39
+ gitdb==4.0.11
40
+ GitPython==3.1.43
41
+ google-crc32c==1.6.0
42
+ hmmlearn==0.3.3
43
+ huggingface-hub==0.26.3
44
+ HyperPyYAML==1.2.2
45
+ hypothesis==6.122.1
46
+ idna==3.10
47
+ ifaddr==0.2.0
48
+ imbalanced-learn==0.12.4
49
+ imblearn==0.0
50
+ Jinja2==3.1.4
51
+ joblib==1.4.2
52
+ jsonschema==4.23.0
53
+ jsonschema-specifications==2024.10.1
54
+ julius==0.2.7
55
+ kenlm @ git+https://github.com/kpu/kenlm.git@f6c947dc943859e265fabce886232205d0fb2b37
56
+ kiwisolver==1.4.7
57
+ lazy_loader==0.4
58
+ librosa==0.10.2.post1
59
+ lightning==2.4.0
60
+ lightning-utilities==0.11.9
61
+ llvmlite==0.43.0
62
+ Mako==1.3.8
63
+ markdown-it-py==3.0.0
64
+ MarkupSafe==3.0.2
65
+ matplotlib==3.9.3
66
+ mdurl==0.1.2
67
+ mpmath==1.3.0
68
+ msgpack==1.1.0
69
+ multidict==6.1.0
70
+ multiprocess==0.70.16
71
+ narwhals==1.15.2
72
+ networkx==3.4.2
73
+ numba==0.60.0
74
+ numpy==1.26.4
75
+ omegaconf==2.3.0
76
+ optuna==4.1.0
77
+ packaging==24.2
78
+ pandas==2.2.3
79
+ pillow==11.0.0
80
+ platformdirs==4.3.6
81
+ playsound==1.3.0
82
+ plotly==5.24.1
83
+ pooch==1.8.2
84
+ primePy==1.3
85
+ propcache==0.2.1
86
+ protobuf==5.29.1
87
+ pyannote.audio==3.3.2
88
+ pyannote.core==5.0.0
89
+ pyannote.database==5.1.0
90
+ pyannote.metrics==3.2.1
91
+ pyannote.pipeline==3.0.1
92
+ pyarrow==18.1.0
93
+ pyAudioAnalysis==0.3.14
94
+ pycparser==2.22
95
+ pyctcdecode==0.5.0
96
+ pydeck==0.9.1
97
+ pydub==0.25.1
98
+ pyee==12.1.1
99
+ Pygments==2.18.0
100
+ pygtrie==2.5.0
101
+ pylibsrtp==0.10.0
102
+ pyobjc==10.3.2
103
+ pyobjc-core==10.3.2
104
+ pyobjc-framework-Accessibility==10.3.2
105
+ pyobjc-framework-Accounts==10.3.2
106
+ pyobjc-framework-AddressBook==10.3.2
107
+ pyobjc-framework-AdServices==10.3.2
108
+ pyobjc-framework-AdSupport==10.3.2
109
+ pyobjc-framework-AppleScriptKit==10.3.2
110
+ pyobjc-framework-AppleScriptObjC==10.3.2
111
+ pyobjc-framework-ApplicationServices==10.3.2
112
+ pyobjc-framework-AppTrackingTransparency==10.3.2
113
+ pyobjc-framework-AudioVideoBridging==10.3.2
114
+ pyobjc-framework-AuthenticationServices==10.3.2
115
+ pyobjc-framework-AutomaticAssessmentConfiguration==10.3.2
116
+ pyobjc-framework-Automator==10.3.2
117
+ pyobjc-framework-AVFoundation==10.3.2
118
+ pyobjc-framework-AVKit==10.3.2
119
+ pyobjc-framework-AVRouting==10.3.2
120
+ pyobjc-framework-BackgroundAssets==10.3.2
121
+ pyobjc-framework-BrowserEngineKit==10.3.2
122
+ pyobjc-framework-BusinessChat==10.3.2
123
+ pyobjc-framework-CalendarStore==10.3.2
124
+ pyobjc-framework-CallKit==10.3.2
125
+ pyobjc-framework-CFNetwork==10.3.2
126
+ pyobjc-framework-Cinematic==10.3.2
127
+ pyobjc-framework-ClassKit==10.3.2
128
+ pyobjc-framework-CloudKit==10.3.2
129
+ pyobjc-framework-Cocoa==10.3.2
130
+ pyobjc-framework-Collaboration==10.3.2
131
+ pyobjc-framework-ColorSync==10.3.2
132
+ pyobjc-framework-Contacts==10.3.2
133
+ pyobjc-framework-ContactsUI==10.3.2
134
+ pyobjc-framework-CoreAudio==10.3.2
135
+ pyobjc-framework-CoreAudioKit==10.3.2
136
+ pyobjc-framework-CoreBluetooth==10.3.2
137
+ pyobjc-framework-CoreData==10.3.2
138
+ pyobjc-framework-CoreHaptics==10.3.2
139
+ pyobjc-framework-CoreLocation==10.3.2
140
+ pyobjc-framework-CoreMedia==10.3.2
141
+ pyobjc-framework-CoreMediaIO==10.3.2
142
+ pyobjc-framework-CoreMIDI==10.3.2
143
+ pyobjc-framework-CoreML==10.3.2
144
+ pyobjc-framework-CoreMotion==10.3.2
145
+ pyobjc-framework-CoreServices==10.3.2
146
+ pyobjc-framework-CoreSpotlight==10.3.2
147
+ pyobjc-framework-CoreText==10.3.2
148
+ pyobjc-framework-CoreWLAN==10.3.2
149
+ pyobjc-framework-CryptoTokenKit==10.3.2
150
+ pyobjc-framework-DataDetection==10.3.2
151
+ pyobjc-framework-DeviceCheck==10.3.2
152
+ pyobjc-framework-DictionaryServices==10.3.2
153
+ pyobjc-framework-DiscRecording==10.3.2
154
+ pyobjc-framework-DiscRecordingUI==10.3.2
155
+ pyobjc-framework-DiskArbitration==10.3.2
156
+ pyobjc-framework-DVDPlayback==10.3.2
157
+ pyobjc-framework-EventKit==10.3.2
158
+ pyobjc-framework-ExceptionHandling==10.3.2
159
+ pyobjc-framework-ExecutionPolicy==10.3.2
160
+ pyobjc-framework-ExtensionKit==10.3.2
161
+ pyobjc-framework-ExternalAccessory==10.3.2
162
+ pyobjc-framework-FileProvider==10.3.2
163
+ pyobjc-framework-FileProviderUI==10.3.2
164
+ pyobjc-framework-FinderSync==10.3.2
165
+ pyobjc-framework-FSEvents==10.3.2
166
+ pyobjc-framework-GameCenter==10.3.2
167
+ pyobjc-framework-GameController==10.3.2
168
+ pyobjc-framework-GameKit==10.3.2
169
+ pyobjc-framework-GameplayKit==10.3.2
170
+ pyobjc-framework-HealthKit==10.3.2
171
+ pyobjc-framework-ImageCaptureCore==10.3.2
172
+ pyobjc-framework-InputMethodKit==10.3.2
173
+ pyobjc-framework-InstallerPlugins==10.3.2
174
+ pyobjc-framework-InstantMessage==10.3.2
175
+ pyobjc-framework-Intents==10.3.2
176
+ pyobjc-framework-IntentsUI==10.3.2
177
+ pyobjc-framework-IOBluetooth==10.3.2
178
+ pyobjc-framework-IOBluetoothUI==10.3.2
179
+ pyobjc-framework-IOSurface==10.3.2
180
+ pyobjc-framework-iTunesLibrary==10.3.2
181
+ pyobjc-framework-KernelManagement==10.3.2
182
+ pyobjc-framework-LatentSemanticMapping==10.3.2
183
+ pyobjc-framework-LaunchServices==10.3.2
184
+ pyobjc-framework-libdispatch==10.3.2
185
+ pyobjc-framework-libxpc==10.3.2
186
+ pyobjc-framework-LinkPresentation==10.3.2
187
+ pyobjc-framework-LocalAuthentication==10.3.2
188
+ pyobjc-framework-LocalAuthenticationEmbeddedUI==10.3.2
189
+ pyobjc-framework-MailKit==10.3.2
190
+ pyobjc-framework-MapKit==10.3.2
191
+ pyobjc-framework-MediaAccessibility==10.3.2
192
+ pyobjc-framework-MediaLibrary==10.3.2
193
+ pyobjc-framework-MediaPlayer==10.3.2
194
+ pyobjc-framework-MediaToolbox==10.3.2
195
+ pyobjc-framework-Metal==10.3.2
196
+ pyobjc-framework-MetalFX==10.3.2
197
+ pyobjc-framework-MetalKit==10.3.2
198
+ pyobjc-framework-MetalPerformanceShaders==10.3.2
199
+ pyobjc-framework-MetalPerformanceShadersGraph==10.3.2
200
+ pyobjc-framework-MetricKit==10.3.2
201
+ pyobjc-framework-MLCompute==10.3.2
202
+ pyobjc-framework-ModelIO==10.3.2
203
+ pyobjc-framework-MultipeerConnectivity==10.3.2
204
+ pyobjc-framework-NaturalLanguage==10.3.2
205
+ pyobjc-framework-NetFS==10.3.2
206
+ pyobjc-framework-Network==10.3.2
207
+ pyobjc-framework-NetworkExtension==10.3.2
208
+ pyobjc-framework-NotificationCenter==10.3.2
209
+ pyobjc-framework-OpenDirectory==10.3.2
210
+ pyobjc-framework-OSAKit==10.3.2
211
+ pyobjc-framework-OSLog==10.3.2
212
+ pyobjc-framework-PassKit==10.3.2
213
+ pyobjc-framework-PencilKit==10.3.2
214
+ pyobjc-framework-PHASE==10.3.2
215
+ pyobjc-framework-Photos==10.3.2
216
+ pyobjc-framework-PhotosUI==10.3.2
217
+ pyobjc-framework-PreferencePanes==10.3.2
218
+ pyobjc-framework-PushKit==10.3.2
219
+ pyobjc-framework-Quartz==10.3.2
220
+ pyobjc-framework-QuickLookThumbnailing==10.3.2
221
+ pyobjc-framework-ReplayKit==10.3.2
222
+ pyobjc-framework-SafariServices==10.3.2
223
+ pyobjc-framework-SafetyKit==10.3.2
224
+ pyobjc-framework-SceneKit==10.3.2
225
+ pyobjc-framework-ScreenCaptureKit==10.3.2
226
+ pyobjc-framework-ScreenSaver==10.3.2
227
+ pyobjc-framework-ScreenTime==10.3.2
228
+ pyobjc-framework-ScriptingBridge==10.3.2
229
+ pyobjc-framework-SearchKit==10.3.2
230
+ pyobjc-framework-Security==10.3.2
231
+ pyobjc-framework-SecurityFoundation==10.3.2
232
+ pyobjc-framework-SecurityInterface==10.3.2
233
+ pyobjc-framework-SensitiveContentAnalysis==10.3.2
234
+ pyobjc-framework-ServiceManagement==10.3.2
235
+ pyobjc-framework-SharedWithYou==10.3.2
236
+ pyobjc-framework-SharedWithYouCore==10.3.2
237
+ pyobjc-framework-ShazamKit==10.3.2
238
+ pyobjc-framework-Social==10.3.2
239
+ pyobjc-framework-SoundAnalysis==10.3.2
240
+ pyobjc-framework-Speech==10.3.2
241
+ pyobjc-framework-SpriteKit==10.3.2
242
+ pyobjc-framework-StoreKit==10.3.2
243
+ pyobjc-framework-Symbols==10.3.2
244
+ pyobjc-framework-SyncServices==10.3.2
245
+ pyobjc-framework-SystemConfiguration==10.3.2
246
+ pyobjc-framework-SystemExtensions==10.3.2
247
+ pyobjc-framework-ThreadNetwork==10.3.2
248
+ pyobjc-framework-UniformTypeIdentifiers==10.3.2
249
+ pyobjc-framework-UserNotifications==10.3.2
250
+ pyobjc-framework-UserNotificationsUI==10.3.2
251
+ pyobjc-framework-VideoSubscriberAccount==10.3.2
252
+ pyobjc-framework-VideoToolbox==10.3.2
253
+ pyobjc-framework-Virtualization==10.3.2
254
+ pyobjc-framework-Vision==10.3.2
255
+ pyobjc-framework-WebKit==10.3.2
256
+ pyOpenSSL==24.3.0
257
+ pyparsing==3.2.0
258
+ python-dateutil==2.9.0.post0
259
+ pytorch-lightning==2.4.0
260
+ pytorch-metric-learning==2.7.0
261
+ pytz==2024.2
262
+ PyYAML==6.0.2
263
+ referencing==0.35.1
264
+ regex==2024.11.6
265
+ requests==2.32.3
266
+ Resemblyzer==0.1.4
267
+ rich==13.9.4
268
+ rpds-py==0.22.3
269
+ ruamel.yaml==0.18.6
270
+ ruamel.yaml.clib==0.2.12
271
+ safetensors==0.4.5
272
+ scikit-learn==1.5.2
273
+ scipy==1.14.1
274
+ semver==3.0.2
275
+ sentencepiece==0.2.0
276
+ shellingham==1.5.4
277
+ six==1.17.0
278
+ smmap==5.0.1
279
+ sortedcontainers==2.4.0
280
+ sounddevice==0.5.1
281
+ soundfile==0.12.1
282
+ soxr==0.5.0.post1
283
+ spectralcluster==0.2.22
284
+ speechbrain==1.0.2
285
+ SQLAlchemy==2.0.36
286
+ streamlit==1.40.2
287
+ streamlit-webrtc==0.47.9
288
+ sympy==1.13.1
289
+ tabulate==0.9.0
290
+ tenacity==9.0.0
291
+ tensorboardX==2.6.2.2
292
+ threadpoolctl==3.5.0
293
+ tokenizers==0.21.0
294
+ toml==0.10.2
295
+ torch==2.5.1
296
+ torch-audiomentations==0.11.1
297
+ torch_pitch_shift==1.2.5
298
+ torchaudio==2.5.1
299
+ torchmetrics==1.6.0
300
+ tornado==6.4.2
301
+ tqdm==4.67.1
302
+ transformers==4.47.0
303
+ typer==0.15.1
304
+ typing==3.7.4.3
305
+ typing_extensions==4.12.2
306
+ tzdata==2024.2
307
+ urllib3==2.2.3
308
+ watchdog==6.0.0
309
+ webrtcvad==2.0.10
310
+ xxhash==3.5.0
311
+ yarl==1.18.3