Tri4 commited on
Commit
6e13dba
·
verified ·
1 Parent(s): a35f83d

Update main.py

Browse files
Files changed (1) hide show
  1. main.py +13 -13
main.py CHANGED
@@ -14,7 +14,7 @@ def hello():
14
  # Load the Whisper model
15
  print("Loading Whisper model...\n", flush=True)
16
  model = whisper.load_model("tiny")
17
- print("Whisper model loaded.\n", flush=True)
18
 
19
  # Get time of request
20
  def get_time():
@@ -38,36 +38,36 @@ def convert_size(bytes):
38
  return f"{bytes / 1024**2:.2f} MB"
39
 
40
  def transcribe(audio_path):
41
- print(f"Transcribing audio from: {audio_path}\n", flush=True)
42
 
43
  # Load audio and pad/trim it to fit 30 seconds
44
- print("Loading and processing audio...\n", flush=True)
45
  audio = whisper.load_audio(audio_path)
46
  audio = whisper.pad_or_trim(audio)
47
 
48
  # Make log-Mel spectrogram and move to the same device as the model
49
- print("Creating log-Mel spectrogram...\n", flush=True)
50
  mel = whisper.log_mel_spectrogram(audio).to(model.device)
51
 
52
  # Detect the spoken language
53
- print("Detecting language...", flush=True)
54
  _, probs = model.detect_language(mel)
55
  language = max(probs, key=probs.get)
56
- print(f"Detected language: {language}\n", flush=True)
57
 
58
  # Decode the audio
59
- print("Decoding audio...", flush=True)
60
  options = whisper.DecodingOptions(fp16=False)
61
  result = whisper.decode(model, mel, options)
62
 
63
- print("Transcription complete.\n", flush=True)
64
  return result.text, language
65
 
66
  @app.route('/transcribe', methods=['POST'])
67
  def transcribe_audio():
68
  # Record the time when the request was received
69
  request_received_time, _ = get_time()
70
- print(f"Received request at /transcribe at {request_received_time}\n", flush=True)
71
 
72
  if 'audio' not in request.files:
73
  print("Error: No audio file provided", flush=True)
@@ -82,7 +82,7 @@ def transcribe_audio():
82
  audio_path = os.path.join("temp_audio", audio_file.filename)
83
  os.makedirs("temp_audio", exist_ok=True)
84
  audio_file.save(audio_path)
85
- print(f"Audio file saved to: {audio_path} (Size: {audio_file_size})\n", flush=True)
86
 
87
  # Record the time before starting transcription
88
  transcription_start_time = time.time()
@@ -91,7 +91,7 @@ def transcribe_audio():
91
  try:
92
  transcription, language = transcribe(audio_path)
93
  except Exception as e:
94
- print(f"Error during transcription: {str(e)}", flush=True)
95
  return jsonify({"error": f"An error occurred: {str(e)}"}), 500
96
 
97
  # Calculate the time taken for transcription
@@ -100,13 +100,13 @@ def transcribe_audio():
100
 
101
  # Clean up the saved file
102
  os.remove(audio_path)
103
- print(f"Audio file removed from: {audio_path}\n", flush=True)
104
 
105
  # Record the time when the response is being sent
106
  response_sent_time, _ = get_time()
107
 
108
  # Return the transcription, detected language, and timing information
109
- print(f"Transcription: {transcription}, Language: {language}\n", flush=True)
110
  return jsonify({
111
  "transcription": transcription,
112
  "language": language,
 
14
  # Load the Whisper model
15
  print("Loading Whisper model...\n", flush=True)
16
  model = whisper.load_model("tiny")
17
+ print("\nWhisper model loaded.\n", flush=True)
18
 
19
  # Get time of request
20
  def get_time():
 
38
  return f"{bytes / 1024**2:.2f} MB"
39
 
40
  def transcribe(audio_path):
41
+ print(f"Transcribing audio from: {audio_path}", flush=True)
42
 
43
  # Load audio and pad/trim it to fit 30 seconds
44
+ print(" Loading and processing audio...", flush=True)
45
  audio = whisper.load_audio(audio_path)
46
  audio = whisper.pad_or_trim(audio)
47
 
48
  # Make log-Mel spectrogram and move to the same device as the model
49
+ print(" Creating log-Mel spectrogram...", flush=True)
50
  mel = whisper.log_mel_spectrogram(audio).to(model.device)
51
 
52
  # Detect the spoken language
53
+ print(" Detecting language...", flush=True)
54
  _, probs = model.detect_language(mel)
55
  language = max(probs, key=probs.get)
56
+ print(f" Detected language: {language}", flush=True)
57
 
58
  # Decode the audio
59
+ print(" Decoding audio...", flush=True)
60
  options = whisper.DecodingOptions(fp16=False)
61
  result = whisper.decode(model, mel, options)
62
 
63
+ print(" Transcription complete.", flush=True)
64
  return result.text, language
65
 
66
  @app.route('/transcribe', methods=['POST'])
67
  def transcribe_audio():
68
  # Record the time when the request was received
69
  request_received_time, _ = get_time()
70
+ print(f"Received request at /transcribe at {request_received_time}", flush=True)
71
 
72
  if 'audio' not in request.files:
73
  print("Error: No audio file provided", flush=True)
 
82
  audio_path = os.path.join("temp_audio", audio_file.filename)
83
  os.makedirs("temp_audio", exist_ok=True)
84
  audio_file.save(audio_path)
85
+ print(f" Audio file saved to: {audio_path} (Size: {audio_file_size})", flush=True)
86
 
87
  # Record the time before starting transcription
88
  transcription_start_time = time.time()
 
91
  try:
92
  transcription, language = transcribe(audio_path)
93
  except Exception as e:
94
+ print(f" Error during transcription: {str(e)}", flush=True)
95
  return jsonify({"error": f"An error occurred: {str(e)}"}), 500
96
 
97
  # Calculate the time taken for transcription
 
100
 
101
  # Clean up the saved file
102
  os.remove(audio_path)
103
+ print(f" Audio file removed from: {audio_path}\n", flush=True)
104
 
105
  # Record the time when the response is being sent
106
  response_sent_time, _ = get_time()
107
 
108
  # Return the transcription, detected language, and timing information
109
+ print(f" Transcription: {transcription}, Language: {language}\n", flush=True)
110
  return jsonify({
111
  "transcription": transcription,
112
  "language": language,