Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -21,6 +21,9 @@ MODEL = None
|
|
21 |
PROCESSOR = None
|
22 |
DEVICE = None
|
23 |
|
|
|
|
|
|
|
24 |
def get_device():
|
25 |
"""Determine the best available device"""
|
26 |
if torch.cuda.is_available():
|
@@ -28,7 +31,7 @@ def get_device():
|
|
28 |
else:
|
29 |
return "cpu"
|
30 |
|
31 |
-
def load_model_cached(model_name: str = "Qwen/Qwen2
|
32 |
"""Load and cache model - optimized for HF Spaces"""
|
33 |
global MODEL, PROCESSOR, DEVICE
|
34 |
|
|
|
21 |
PROCESSOR = None
|
22 |
DEVICE = None
|
23 |
|
24 |
+
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
25 |
+
os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
|
26 |
+
|
27 |
def get_device():
|
28 |
"""Determine the best available device"""
|
29 |
if torch.cuda.is_available():
|
|
|
31 |
else:
|
32 |
return "cpu"
|
33 |
|
34 |
+
def load_model_cached(model_name: str = "Qwen/Qwen2-VL-2B-Instruct"):
|
35 |
"""Load and cache model - optimized for HF Spaces"""
|
36 |
global MODEL, PROCESSOR, DEVICE
|
37 |
|