Pavithiran commited on
Commit
69c1109
·
verified ·
1 Parent(s): 974fc27

Create download_model.py

Browse files
Files changed (1) hide show
  1. download_model.py +49 -0
download_model.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Pre-download the model to ensure it's available during app startup.
3
+ This script should be run before launching the app.
4
+ """
5
+
6
+ import os
7
+ import torch
8
+ from transformers import AutoModelForSpeechSeq2Seq, AutoProcessor
9
+
10
+ # Set cache directories
11
+ os.environ["TRANSFORMERS_CACHE"] = "/tmp/transformers_cache"
12
+ os.environ["HF_HOME"] = "/tmp/hf_home"
13
+ os.environ["HUGGINGFACE_HUB_CACHE"] = "/tmp/hf_hub_cache"
14
+
15
+ # Create cache directories
16
+ for cache_dir in ["/tmp/transformers_cache", "/tmp/hf_home", "/tmp/hf_hub_cache"]:
17
+ os.makedirs(cache_dir, exist_ok=True)
18
+
19
+ # Model configuration
20
+ MODEL_NAME = "openai/whisper-large-v3-turbo"
21
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
22
+ torch_dtype = torch.float16 if torch.cuda.is_available() else torch.float32
23
+
24
+ print("Pre-downloading model to cache...")
25
+ print(f"Using device: {device}")
26
+ print(f"Using dtype: {torch_dtype}")
27
+
28
+ try:
29
+ # Download model
30
+ model = AutoModelForSpeechSeq2Seq.from_pretrained(
31
+ MODEL_NAME,
32
+ torch_dtype=torch_dtype,
33
+ low_cpu_mem_usage=True,
34
+ use_safetensors=True,
35
+ cache_dir="/tmp/transformers_cache",
36
+ )
37
+ print("Model downloaded successfully!")
38
+
39
+ # Download processor
40
+ processor = AutoProcessor.from_pretrained(
41
+ MODEL_NAME,
42
+ cache_dir="/tmp/transformers_cache",
43
+ )
44
+ print("Processor downloaded successfully!")
45
+
46
+ print("Pre-download complete! Model is ready for use.")
47
+ except Exception as e:
48
+ print(f"Error during pre-download: {e}")
49
+ raise