walaa2022 commited on
Commit
e7ca87e
·
verified ·
1 Parent(s): b166566

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -1
app.py CHANGED
@@ -21,6 +21,9 @@ MODEL = None
21
  PROCESSOR = None
22
  DEVICE = None
23
 
 
 
 
24
  def get_device():
25
  """Determine the best available device"""
26
  if torch.cuda.is_available():
@@ -28,7 +31,7 @@ def get_device():
28
  else:
29
  return "cpu"
30
 
31
- def load_model_cached(model_name: str = "Qwen/Qwen2.5-VL-3B-Instruct"):
32
  """Load and cache model - optimized for HF Spaces"""
33
  global MODEL, PROCESSOR, DEVICE
34
 
 
21
  PROCESSOR = None
22
  DEVICE = None
23
 
24
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
25
+ os.environ['TRANSFORMERS_CACHE'] = '/tmp/transformers_cache'
26
+
27
  def get_device():
28
  """Determine the best available device"""
29
  if torch.cuda.is_available():
 
31
  else:
32
  return "cpu"
33
 
34
+ def load_model_cached(model_name: str = "Qwen/Qwen2-VL-2B-Instruct"):
35
  """Load and cache model - optimized for HF Spaces"""
36
  global MODEL, PROCESSOR, DEVICE
37