rphrp1985 commited on
Commit
bdd4365
·
verified ·
1 Parent(s): 560936f

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -47,7 +47,7 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
  model_id = "Qwen/Qwen1.5-14B-Chat"
50
- model_id = "Citaman/command-r-25-layer"
51
 
52
 
53
  tokenizer = AutoTokenizer.from_pretrained(
@@ -61,9 +61,9 @@ accelerator = Accelerator()
61
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
62
  # torch_dtype= torch.uint8,
63
  torch_dtype=torch.float16,
64
- load_in_8bit=True,
65
  # # # torch_dtype=torch.fl,
66
- # attn_implementation="flash_attention_2",
67
  low_cpu_mem_usage=True,
68
  # device_map='cuda',
69
  # device_map=accelerator.device_map,
 
47
  # model_id = "mistralai/Mistral-7B-v0.3"
48
 
49
  model_id = "Qwen/Qwen1.5-14B-Chat"
50
+ # model_id = "Citaman/command-r-25-layer"
51
 
52
 
53
  tokenizer = AutoTokenizer.from_pretrained(
 
61
  model = AutoModelForCausalLM.from_pretrained(model_id, token= token,
62
  # torch_dtype= torch.uint8,
63
  torch_dtype=torch.float16,
64
+ # load_in_8bit=True,
65
  # # # torch_dtype=torch.fl,
66
+ attn_implementation="flash_attention_2",
67
  low_cpu_mem_usage=True,
68
  # device_map='cuda',
69
  # device_map=accelerator.device_map,