Update adapter_config.json
Browse filesAdded architecture to config.json, prevents issue that stops model from being loaded with VLLM.
- adapter_config.json +3 -0
adapter_config.json
CHANGED
@@ -1,6 +1,9 @@
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
"base_model_name_or_path": "base_models/Llama-2-7b-hf",
|
|
|
|
|
|
|
4 |
"bias": "none",
|
5 |
"fan_in_fan_out": false,
|
6 |
"inference_mode": true,
|
|
|
1 |
{
|
2 |
"auto_mapping": null,
|
3 |
"base_model_name_or_path": "base_models/Llama-2-7b-hf",
|
4 |
+
"architectures": [
|
5 |
+
"LlamaForCausalLM"
|
6 |
+
],
|
7 |
"bias": "none",
|
8 |
"fan_in_fan_out": false,
|
9 |
"inference_mode": true,
|