aloy99 commited on
Commit
7406b90
1 Parent(s): 0e3fe7f

Update adapter_config.json

Browse files

Added architecture to config.json, prevents issue that stops model from being loaded with VLLM.

Files changed (1) hide show
  1. adapter_config.json +3 -0
adapter_config.json CHANGED
@@ -1,6 +1,9 @@
1
  {
2
  "auto_mapping": null,
3
  "base_model_name_or_path": "base_models/Llama-2-7b-hf",
 
 
 
4
  "bias": "none",
5
  "fan_in_fan_out": false,
6
  "inference_mode": true,
 
1
  {
2
  "auto_mapping": null,
3
  "base_model_name_or_path": "base_models/Llama-2-7b-hf",
4
+ "architectures": [
5
+ "LlamaForCausalLM"
6
+ ],
7
  "bias": "none",
8
  "fan_in_fan_out": false,
9
  "inference_mode": true,