{ "config": { "alpha": 256, "architecture": "lora", "attn_matrices": [ "q", "v" ], "composition_mode": "add", "dropout": 0.1, "init_weights": "lora", "intermediate_lora": false, "leave_out": [], "output_lora": false, "r": 128, "selfattn_lora": true, "use_gating": false }, "config_id": "313205f6cdead6fa", "hidden_size": 2048, "model_class": "LlamaForCausalLM", "model_name": "meta-llama/Llama-3.2-1B-Instruct", "model_type": "llama", "name": "lora128", "version": "adapters.1.0.0" }