whyumesh commited on
Commit
753217c
·
verified ·
1 Parent(s): 530524e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +27 -5
app.py CHANGED
@@ -11,24 +11,46 @@ import cv2
11
  import numpy as np
12
  import gradio as gr
13
  import spaces
 
 
 
 
 
 
 
 
 
 
 
14
 
15
  # Load both models and their processors/tokenizers
16
  def load_models():
 
 
 
17
  # Vision model
18
  vision_model = Qwen2VLForConditionalGeneration.from_pretrained(
19
  "Qwen/Qwen2-VL-2B-Instruct",
20
  torch_dtype=torch.float16,
21
- device_map="auto"
 
 
 
 
 
22
  )
23
- vision_processor = AutoProcessor.from_pretrained("Qwen/Qwen2-VL-2B-Instruct")
24
 
25
- # Code model
26
  code_model = AutoModelForCausalLM.from_pretrained(
27
  "Qwen/Qwen2.5-Coder-1.5B-Instruct",
28
  torch_dtype=torch.float16,
29
- device_map="auto"
 
 
 
 
 
30
  )
31
- code_tokenizer = AutoTokenizer.from_pretrained("Qwen/Qwen2.5-Coder-1.5B-Instruct")
32
 
33
  return vision_model, vision_processor, code_model, code_tokenizer
34
 
 
11
  import numpy as np
12
  import gradio as gr
13
  import spaces
14
+ from huggingface_hub import login
15
+ import os
16
+
17
+ # Add login function at the start
18
+ def init_huggingface_auth():
19
+ # Get token from environment variable or set it directly
20
+ token = os.getenv("HUGGINGFACE_TOKEN")
21
+ if token:
22
+ login(token=token)
23
+ else:
24
+ print("Warning: HUGGINGFACE_TOKEN not found in environment variables")
25
 
26
  # Load both models and their processors/tokenizers
27
  def load_models():
28
+ # Initialize HF auth before loading models
29
+ init_huggingface_auth()
30
+
31
  # Vision model
32
  vision_model = Qwen2VLForConditionalGeneration.from_pretrained(
33
  "Qwen/Qwen2-VL-2B-Instruct",
34
  torch_dtype=torch.float16,
35
+ device_map="auto",
36
+ use_auth_token=True # Add auth token usage
37
+ )
38
+ vision_processor = AutoProcessor.from_pretrained(
39
+ "Qwen/Qwen2-VL-2B-Instruct",
40
+ use_auth_token=True # Add auth token usage
41
  )
 
42
 
43
+ # Code model
44
  code_model = AutoModelForCausalLM.from_pretrained(
45
  "Qwen/Qwen2.5-Coder-1.5B-Instruct",
46
  torch_dtype=torch.float16,
47
+ device_map="auto",
48
+ use_auth_token=True # Add auth token usage
49
+ )
50
+ code_tokenizer = AutoTokenizer.from_pretrained(
51
+ "Qwen/Qwen2.5-Coder-1.5B-Instruct",
52
+ use_auth_token=True # Add auth token usage
53
  )
 
54
 
55
  return vision_model, vision_processor, code_model, code_tokenizer
56