hiko1999 commited on
Commit
e3781d9
·
1 Parent(s): 15a6b2d

Switch to CPU execution

Browse files
Files changed (1) hide show
  1. app.py +4 -2
app.py CHANGED
@@ -9,7 +9,7 @@ model_path = "hiko1999/Qwen2-Wildfire-VL-2B-Instruct" # 替换为你的模型
9
 
10
  # 加载 Hugging Face 上的模型和 processor
11
  tokenizer = AutoTokenizer.from_pretrained(model_path)
12
- model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.bfloat16, device_map="auto")
13
  processor = AutoProcessor.from_pretrained(model_path)
14
 
15
  # 定义预测函数
@@ -22,7 +22,9 @@ def predict(image):
22
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
23
  image_inputs, video_inputs = process_vision_info(messages)
24
  inputs = processor(text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt")
25
- inputs = inputs.to("cuda") # 转移到GPU
 
 
26
 
27
  # 生成模型输出
28
  generated_ids = model.generate(**inputs, max_new_tokens=128)
 
9
 
10
  # 加载 Hugging Face 上的模型和 processor
11
  tokenizer = AutoTokenizer.from_pretrained(model_path)
12
+ model = Qwen2VLForConditionalGeneration.from_pretrained(model_path, torch_dtype=torch.bfloat16) # 移除 device_map 参数以避免自动分配到 GPU
13
  processor = AutoProcessor.from_pretrained(model_path)
14
 
15
  # 定义预测函数
 
22
  text = processor.apply_chat_template(messages, tokenize=False, add_generation_prompt=True)
23
  image_inputs, video_inputs = process_vision_info(messages)
24
  inputs = processor(text=[text], images=image_inputs, videos=video_inputs, padding=True, return_tensors="pt")
25
+
26
+ # 将数据转移到 CPU
27
+ inputs = inputs.to("cpu") # 使用 CPU 而不是 CUDA
28
 
29
  # 生成模型输出
30
  generated_ids = model.generate(**inputs, max_new_tokens=128)