yolloo commited on
Commit
9f57212
·
verified ·
1 Parent(s): 9fce3cc

Upload whisper_server (1).py

Browse files
Files changed (1) hide show
  1. whisper_server (1).py +66 -0
whisper_server (1).py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ import tempfile
4
+ from flask import request, jsonify
5
+ from transformers import pipeline
6
+ import torch
7
+ import traceback
8
+
9
+ # Define a writable directory for the model cache.
10
+ # This now respects the HF_HOME environment variable set in the Dockerfile.
11
+ cache_dir = os.environ.get("HF_HOME", "./.cache")
12
+ os.makedirs(cache_dir, exist_ok=True)
13
+
14
+
15
+ print("Loading collabora/whisper-tiny-hindi model via transformers pipeline...")
16
+
17
+ # Determine device
18
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
19
+
20
+ # Initialize the ASR pipeline with the specified model
21
+ # Using the transformers pipeline is the correct way to load custom models from the Hub.
22
+ model = pipeline(
23
+ "automatic-speech-recognition",
24
+ model="collabora/whisper-tiny-hindi",
25
+ device=device,
26
+ model_kwargs={"cache_dir": cache_dir}
27
+ )
28
+
29
+ print("Whisper model loaded.")
30
+
31
+ def handle_transcribe():
32
+ try:
33
+ # Step 1: Validate request - looking for 'audio' key from frontend
34
+ if 'audio' not in request.files:
35
+ print("Error: 'audio' key not in request.files")
36
+ return jsonify({'error': 'No audio file part in the request'}), 400
37
+
38
+ file = request.files['audio']
39
+
40
+ if file.filename == '':
41
+ print("Error: No selected file")
42
+ return jsonify({'error': 'No selected file'}), 400
43
+
44
+ # Step 2: Use a temporary file to save the upload
45
+ with tempfile.NamedTemporaryFile(delete=True, suffix=".webm") as temp_audio:
46
+ file.save(temp_audio.name)
47
+
48
+ print(f"Transcribing file: {temp_audio.name} with collabora/whisper-tiny-hindi pipeline")
49
+
50
+ # Step 3: Transcribe using the pipeline
51
+ # The pipeline is robust and can handle various formats directly, leveraging ffmpeg.
52
+ result = model(temp_audio.name)
53
+
54
+ transcribed_text = result.get('text', '')
55
+
56
+ print("Transcription successful.")
57
+ return jsonify({'text': transcribed_text})
58
+
59
+ return jsonify({'error': 'File processing failed'}), 500
60
+
61
+ except Exception as e:
62
+ # Step 4: Robust error logging
63
+ print("❌ Error in handle_transcribe():")
64
+ traceback.print_exc()
65
+ return jsonify({'error': f"An unexpected error occurred during transcription: {str(e)}"}), 500
66
+