rphrp1985 commited on
Commit
00f0616
·
verified ·
1 Parent(s): a3cc381

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -4
app.py CHANGED
@@ -40,15 +40,15 @@ tokenizer = AutoTokenizer.from_pretrained(
40
  , token= token,)
41
 
42
 
43
- # with init_empty_weights():
44
- model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  attn_implementation="flash_attention_2",
49
  low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
- # device_map='cuda',
52
 
53
  )
54
 
@@ -56,7 +56,7 @@ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
56
  #
57
 
58
 
59
- # device_map = infer_auto_device_map(model, max_memory={0: "80GB", 1: "80GB", "cpu": "65GB"})
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
 
40
  , token= token,)
41
 
42
 
43
+ with init_empty_weights():
44
+ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  attn_implementation="flash_attention_2",
49
  low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
+ device_map='cuda',
52
 
53
  )
54
 
 
56
  #
57
 
58
 
59
+ device_map = infer_auto_device_map(model, max_memory={0: "79GB", })
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])