Mark0047 commited on
Commit
f5d0beb
·
verified ·
1 Parent(s): 0759a7f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -1
app.py CHANGED
@@ -17,14 +17,16 @@ def transcribe_and_analyze(audio_path):
17
  audio, sample_rate = sf.read(audio_path)
18
 
19
  # Resample audio to 16000 Hz if necessary
 
20
  if sample_rate != 16000:
21
  audio_tensor = torchaudio.functional.resample(torch.tensor(audio), orig_freq=sample_rate, new_freq=16000)
22
  audio = audio_tensor.numpy() # Convert back to numpy array
23
-
24
  # Process audio with Whisper
25
  input_features = processor(audio, sampling_rate=16000, return_tensors="pt").input_features
26
  predicted_ids = model.generate(input_features)
27
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
 
28
 
29
  # Analyze emotions in the transcription
30
  emotions = emotion_classifier(transcription)
 
17
  audio, sample_rate = sf.read(audio_path)
18
 
19
  # Resample audio to 16000 Hz if necessary
20
+ print('resample')
21
  if sample_rate != 16000:
22
  audio_tensor = torchaudio.functional.resample(torch.tensor(audio), orig_freq=sample_rate, new_freq=16000)
23
  audio = audio_tensor.numpy() # Convert back to numpy array
24
+ print('trans')
25
  # Process audio with Whisper
26
  input_features = processor(audio, sampling_rate=16000, return_tensors="pt").input_features
27
  predicted_ids = model.generate(input_features)
28
  transcription = processor.batch_decode(predicted_ids, skip_special_tokens=True)[0]
29
+ print(transcription)
30
 
31
  # Analyze emotions in the transcription
32
  emotions = emotion_classifier(transcription)