rphrp1985 commited on
Commit
aaeb2df
·
verified ·
1 Parent(s): 9a7a921

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -8
app.py CHANGED
@@ -40,15 +40,15 @@ tokenizer = AutoTokenizer.from_pretrained(
40
  , token= token,)
41
 
42
 
43
- with init_empty_weights():
44
- model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  attn_implementation="flash_attention_2",
49
- # low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
- # device_map=device_map,
52
 
53
  )
54
 
@@ -56,11 +56,11 @@ with init_empty_weights():
56
  #
57
 
58
 
59
- device_map = infer_auto_device_map(model, max_memory={0: "80GB", 1: "80GB", "cpu": "65GB"})
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
63
- # model.half()
64
 
65
 
66
 
@@ -77,8 +77,8 @@ def respond(
77
  messages = [{"role": "user", "content": "Hello, how are you?"}]
78
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
79
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
80
- with autocast():
81
- gen_tokens = model.generate(
82
  input_ids,
83
  max_new_tokens=100,
84
  # do_sample=True,
 
40
  , token= token,)
41
 
42
 
43
+ # with init_empty_weights():
44
+ model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
45
  # torch_dtype= torch.uint8,
46
  torch_dtype=torch.float16,
47
  # torch_dtype=torch.fl,
48
  attn_implementation="flash_attention_2",
49
+ low_cpu_mem_usage=True,
50
  # llm_int8_enable_fp32_cpu_offload=True,
51
+ device_map='cuda',
52
 
53
  )
54
 
 
56
  #
57
 
58
 
59
+ # device_map = infer_auto_device_map(model, max_memory={0: "80GB", 1: "80GB", "cpu": "65GB"})
60
 
61
  # Load the model with the inferred device map
62
  # model = load_checkpoint_and_dispatch(model, model_id, device_map=device_map, no_split_module_classes=["GPTJBlock"])
63
+ model.half()
64
 
65
 
66
 
 
77
  messages = [{"role": "user", "content": "Hello, how are you?"}]
78
  input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt").to('cuda')
79
  ## <BOS_TOKEN><|START_OF_TURN_TOKEN|><|USER_TOKEN|>Hello, how are you?<|END_OF_TURN_TOKEN|><|START_OF_TURN_TOKEN|><|CHATBOT_TOKEN|>
80
+ # with autocast():
81
+ gen_tokens = model.generate(
82
  input_ids,
83
  max_new_tokens=100,
84
  # do_sample=True,