gfjiogopdfgdfs commited on
Commit
76b92a4
·
verified ·
1 Parent(s): 064d9b6

delete tabbyAPI/config.yml

Browse files
Files changed (1) hide show
  1. tabbyAPI/config.yml +0 -137
tabbyAPI/config.yml DELETED
@@ -1,137 +0,0 @@
1
- # Sample YAML file for configuration.
2
- # Comment and uncomment values as needed. Every value has a default within the application.
3
- # This file serves to be a drop in for config.yml
4
-
5
- # Unless specified in the comments, DO NOT put these options in quotes!
6
- # You can use https://www.yamllint.com/ if you want to check your YAML formatting.
7
-
8
- # Options for networking
9
- network:
10
- # The IP to host on (default: 127.0.0.1).
11
- # Use 0.0.0.0 to expose on all network adapters
12
- host: 127.0.0.1
13
-
14
- # The port to host on (default: 5000)
15
- port: 5000
16
-
17
- # Disable HTTP token authenticaion with requests
18
- # WARNING: This will make your instance vulnerable!
19
- # Turn on this option if you are ONLY connecting from localhost
20
- disable_auth: False
21
-
22
- # Options for logging
23
- logging:
24
- # Enable prompt logging (default: False)
25
- prompt: False
26
-
27
- # Enable generation parameter logging (default: False)
28
- generation_params: False
29
-
30
- # Options for sampling
31
- sampling:
32
- # Override preset name. Find this in the sampler-overrides folder (default: None)
33
- # This overrides default fallbacks for sampler values that are passed to the API
34
- # Server-side overrides are NOT needed by default
35
- # WARNING: Using this can result in a generation speed penalty
36
- #override_preset:
37
-
38
- # Options for development
39
- developer:
40
- # Skips exllamav2 version check (default: False)
41
- # It's highly recommended to update your dependencies rather than enabling this flag
42
- # WARNING: Don't set this unless you know what you're doing!
43
- #unsafe_launch: False
44
-
45
- # Options for model overrides and loading
46
- model:
47
- # Overrides the directory to look for models (default: models)
48
- # Windows users, DO NOT put this path in quotes! This directory will be invalid otherwise.
49
- model_dir: models
50
-
51
- # An initial model to load. Make sure the model is located in the model directory!
52
- # A model can be loaded later via the API.
53
- # REQUIRED: This must be filled out to load a model on startup!
54
- model_name:
55
-
56
- # Sends dummy model names when the models endpoint is queried
57
- # Enable this if the program is looking for a specific OAI model
58
- #use_dummy_models: False
59
-
60
- # The below parameters apply only if model_name is set
61
-
62
- # Max sequence length (default: Empty)
63
- # Fetched from the model's base sequence length in config.json by default
64
- #max_seq_len:
65
-
66
- # Overrides base model context length (default: Empty)
67
- # WARNING: Don't set this unless you know what you're doing!
68
- # Only use this if the model's base sequence length in config.json is incorrect (ex. Mistral/Mixtral models)
69
- #override_base_seq_len:
70
-
71
- # Automatically allocate resources to GPUs (default: True)
72
- #gpu_split_auto: True
73
-
74
- # An integer array of GBs of vram to split between GPUs (default: [])
75
- #gpu_split: [20.6, 24]
76
-
77
- # Rope scale (default: 1.0)
78
- # Same thing as compress_pos_emb
79
- # Only use if your model was trained on long context with rope (check config.json)
80
- # Leave blank to pull the value from the model
81
- #rope_scale: 1.0
82
-
83
- # Rope alpha (default: 1.0)
84
- # Same thing as alpha_value
85
- # Leave blank to automatically calculate alpha
86
- #rope_alpha: 1.0
87
-
88
- # Disable Flash-attention 2. Set to True for GPUs lower than Nvidia's 3000 series. (default: False)
89
- #no_flash_attention: False
90
-
91
- # Enable 8 bit cache mode for VRAM savings (slight performance hit). Possible values FP16, FP8. (default: FP16)
92
- #cache_mode: FP16
93
-
94
- # Set the prompt template for this model. If empty, chat completions will be disabled. (default: Empty)
95
- # NOTE: Only works with chat completion message lists!
96
- #prompt_template:
97
-
98
- # Number of experts to use PER TOKEN. Fetched from the model's config.json if not specified (default: Empty)
99
- # WARNING: Don't set this unless you know what you're doing!
100
- # NOTE: For MoE models (ex. Mixtral) only!
101
- #num_experts_per_token:
102
-
103
- # Enables CFG support (default: False)
104
- # WARNING: This flag disables Flash Attention! (a stopgap fix until it's fixed in upstream)
105
- #use_cfg: False
106
-
107
- # Enables fasttensors to possibly increase model loading speeds (default: False)
108
- #fasttensors: true
109
-
110
- # Options for draft models (speculative decoding). This will use more VRAM!
111
- #draft:
112
- # Overrides the directory to look for draft (default: models)
113
- #draft_model_dir: models
114
-
115
- # An initial draft model to load. Make sure this model is located in the model directory!
116
- # A draft model can be loaded later via the API.
117
- #draft_model_name: A model name
118
-
119
- # Rope scale for draft models (default: 1.0)
120
- # Same thing as compress_pos_emb
121
- # Only use if your draft model was trained on long context with rope (check config.json)
122
- #draft_rope_scale: 1.0
123
-
124
- # Rope alpha for draft model (default: 1.0)
125
- # Same thing as alpha_value
126
- # Leave blank to automatically calculate alpha value
127
- #draft_rope_alpha: 1.0
128
-
129
- # Options for loras
130
- #lora:
131
- # Overrides the directory to look for loras (default: loras)
132
- #lora_dir: loras
133
-
134
- # List of loras to load and associated scaling factors (default: 1.0). Comment out unused entries or add more rows as needed.
135
- #loras:
136
- #- name: lora1
137
- # scaling: 1.0