{"_name_or_path":"TheBloke/Mistral-7B-v0.1-GPTQ","architectures":["MistralForCausalLM"],"bos_token_id":1,"eos_token_id":2,"hidden_act":"silu","hidden_size":4096,"initializer_range":0.02,"intermediate_size":14336,"max_position_embeddings":32768,"model_type":"mistral","num_attention_heads":32,"num_hidden_layers":32,"num_key_value_heads":8,"pad_token_id":0,"pretraining_tp":1,"quantization_config":{"batch_size":1,"bits":4,"block_name_to_quantize":null,"damp_percent":0.1,"dataset":null,"desc_act":true,"disable_exllama":false,"group_size":128,"max_input_length":null,"model_seqlen":null,"module_name_preceding_first_block":null,"pad_token_id":null,"quant_method":"gptq","sym":true,"tokenizer":null,"true_sequential":true,"use_cuda_fp16":false},"rms_norm_eps":0.00001,"rope_theta":10000,"sliding_window":4096,"tie_word_embeddings":false,"torch_dtype":"bfloat16","transformers_version":"4.34.1","use_cache":true,"vocab_size":32000} |