Add LM Studio config
Browse files- model_config.json +53 -0
model_config.json
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"name": "Config for Chat ID 1713992517623",
|
3 |
+
"load_params": {
|
4 |
+
"n_ctx": 8192,
|
5 |
+
"n_batch": 512,
|
6 |
+
"rope_freq_base": 0,
|
7 |
+
"rope_freq_scale": 4,
|
8 |
+
"n_gpu_layers": -1,
|
9 |
+
"use_mlock": true,
|
10 |
+
"main_gpu": 0,
|
11 |
+
"tensor_split": [
|
12 |
+
0
|
13 |
+
],
|
14 |
+
"seed": -1,
|
15 |
+
"f16_kv": true,
|
16 |
+
"use_mmap": true,
|
17 |
+
"no_kv_offload": false,
|
18 |
+
"num_experts_used": 0
|
19 |
+
},
|
20 |
+
"inference_params": {
|
21 |
+
"n_threads": 4,
|
22 |
+
"n_predict": -1,
|
23 |
+
"top_k": 40,
|
24 |
+
"min_p": 0.05,
|
25 |
+
"top_p": 0.95,
|
26 |
+
"temp": 0.4,
|
27 |
+
"repeat_penalty": 1.1,
|
28 |
+
"input_prefix": "### Input:\n",
|
29 |
+
"input_suffix": "\\n### Response:\\n",
|
30 |
+
"antiprompt": [
|
31 |
+
"### Instruction:",
|
32 |
+
"### Instruction:\\n",
|
33 |
+
"### Input:\\n"
|
34 |
+
],
|
35 |
+
"pre_prompt": "Below is an instruction that describes a task. Write a response that appropriately completes the request.",
|
36 |
+
"pre_prompt_suffix": "\\n",
|
37 |
+
"pre_prompt_prefix": "### Instruction:\n",
|
38 |
+
"seed": -1,
|
39 |
+
"tfs_z": 1,
|
40 |
+
"typical_p": 1,
|
41 |
+
"repeat_last_n": 64,
|
42 |
+
"frequency_penalty": 0,
|
43 |
+
"presence_penalty": 0,
|
44 |
+
"n_keep": 0,
|
45 |
+
"logit_bias": {},
|
46 |
+
"mirostat": 0,
|
47 |
+
"mirostat_tau": 5,
|
48 |
+
"mirostat_eta": 0.1,
|
49 |
+
"memory_f16": true,
|
50 |
+
"multiline_input": false,
|
51 |
+
"penalize_nl": true
|
52 |
+
}
|
53 |
+
}
|