Surbao commited on
Commit
250fe69
·
verified ·
1 Parent(s): 2986f07

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +1664 -11
librechat.yaml CHANGED
@@ -1,7 +1,8 @@
1
- # For more information, see the Configuration Guide:
2
- # https://www.librechat.ai/docs/configuration/librechat_yaml
3
  version: 1.1.6
 
4
  cache: true
 
 
5
  interface:
6
  privacyPolicy:
7
  externalUrl: 'https://librechat.ai/privacy-policy'
@@ -9,19 +10,1671 @@ interface:
9
  termsOfService:
10
  externalUrl: 'https://librechat.ai/tos'
11
  openNewTab: true
 
12
  registration:
13
- socialLogins: ['github', 'google', 'discord', 'openid', 'facebook']
 
14
  endpoints:
15
- custom:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  - name: "yi-lightning"
17
- apiKey: "eyJ0eXAiOiJKV1QiLCJhbGciOiJSUzI1NiIsImtpZCI6InljaTVpbXpJWlpvQUdGd29fbk05Q2t2TnNVUUNydXlqckliNzVTMzM0eWcifQ.eyJzdWIiOiI2NzBmMjhjZTBmZGI0OTZjMmRiNjU0NDYiLCJhdWQiOiI2NWU1MjMxYTdiMTI4ZDdlZDhjYTdiZWYiLCJzY29wZSI6InByb2ZpbGUgYWRkcmVzcyBvcGVuaWQgZW1haWwgcGhvbmUiLCJpYXQiOjE3MzIyNTk2OTUsImV4cCI6MTczMzQ2OTI5NSwianRpIjoiYXd5QzZxdUhmY01Gc19Za1JBbDFIM0JKakpQR0xIWF81YmVfRjRqR0Y0cyIsImlzcyI6Imh0dHBzOi8vMDFhaS1wbGF0Zm9ybS5hdXRoaW5nLmNuL29pZGMifQ.ST4kJ7HgL7Y3OrTSR_-f6iWkYAtmrNikUfYs1iEwe_hti_96m-11fv-7vNuTZTVFaC37nxm_XXJNmwh2QkFmS66nKB5FRFUDCpRJPdT7oL4Wsr_xX008IeVOn0Jy9iixBKn8b_J1zpi-rSvxjDzeFuRX7l-SDCVtK-z_UnJKOA8px5HDE7oSlCaOJAFfHRfnG0FFqhTDzG7tU9tAZVS_q_ftWU_ktICysMRe7TF5V07C9Rgz7xRTiEyEaW3FfadKWQScQFvqSkXvXkhYf_rLUT-SdOIE3VVtimw3of6YzSoVeuWfr9U1ovJGNMk9FGOZYpHvlKQCU_PL5mAMHt-6Fg"
18
- baseURL: "https://wispy-poetry-d145.baochao0912.workers.dev/web/v1"
19
- models:
20
  default: ["yi-lightning"]
21
- titleConvo: false
22
- titleModel: "yi-lightning"
23
- modelDisplayLabel: "yi-lightning"
24
  # addParams:
25
  # safe_prompt: true # Mistral specific value for moderating messages
26
  # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
27
- dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
 
 
 
1
  version: 1.1.6
2
+
3
  cache: true
4
+ fileStrategy: "firebase"
5
+
6
  interface:
7
  privacyPolicy:
8
  externalUrl: 'https://librechat.ai/privacy-policy'
 
10
  termsOfService:
11
  externalUrl: 'https://librechat.ai/tos'
12
  openNewTab: true
13
+
14
  registration:
15
+ socialLogins: ["discord", "facebook", "github", "google", "openid"]
16
+
17
  endpoints:
18
+ custom:
19
+ # Anyscale
20
+ # # Model list: https://console.anyscale.com/v2/playground
21
+ - name: "Anyscale"
22
+ apiKey: "user_provided"
23
+ baseURL: "https://api.endpoints.anyscale.com/v1"
24
+ models:
25
+ default: [
26
+ "google/gemma-7b-it",
27
+ "llava-hf/llava-v1.6-mistral-7b-hf",
28
+ "meta-llama/Meta-Llama-3-70B-Instruct",
29
+ "meta-llama/Meta-Llama-3-8B-Instruct",
30
+ "mistralai/Mistral-7B-Instruct-v0.1",
31
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
32
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
33
+ "mlabonne/NeuralHermes-2.5-Mistral-7B",
34
+ ]
35
+ fetch: false
36
+ titleConvo: true
37
+ titleModel: "meta-llama/Meta-Llama-3-8B-Instruct"
38
+ summarize: false
39
+ summaryModel: "meta-llama/Meta-Llama-3-8B-Instruct"
40
+ forcePrompt: false
41
+ modelDisplayLabel: "Anyscale"
42
+
43
+ # APIpie
44
+ # https://apipie.ai/dashboard/
45
+ # Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/apipie.py
46
+ - name: "APIpie"
47
+ apiKey: "user_provided"
48
+ baseURL: "https://apipie.ai/v1/"
49
+ models:
50
+ default: [
51
+ "GPT-JT-Moderation-6B",
52
+ "Hermes-2-Theta-Llama-3-70B",
53
+ "Koala-13B",
54
+ "Koala-7B",
55
+ "LLaMA-2-7B-32K",
56
+ "Llama-2-13b-chat-hf",
57
+ "Llama-2-13b-hf",
58
+ "Llama-2-70b-chat-hf",
59
+ "Llama-2-70b-hf",
60
+ "Llama-2-7b-chat-hf",
61
+ "Llama-2-7b-hf",
62
+ "Llama-3-70B-Instruct-Gradient-1048k",
63
+ "Llama-3-70b-chat-hf",
64
+ "Llama-3-70b-hf",
65
+ "Llama-3-8b-chat-hf",
66
+ "Llama-3-8b-hf",
67
+ "Llama-Rank-V1",
68
+ "Meta-Llama-3-70B",
69
+ "Meta-Llama-3-70B-Instruct",
70
+ "Meta-Llama-3-70B-Instruct-Lite",
71
+ "Meta-Llama-3-70B-Instruct-Turbo",
72
+ "Meta-Llama-3-8B-Instruct",
73
+ "Meta-Llama-3-8B-Instruct-Lite",
74
+ "Meta-Llama-3-8B-Instruct-Turbo",
75
+ "Meta-Llama-3.1-405B-Instruct-Lite-Pro",
76
+ "Meta-Llama-3.1-405B-Instruct-Turbo",
77
+ "Meta-Llama-3.1-70B-Instruct-Reference",
78
+ "Meta-Llama-3.1-70B-Instruct-Turbo",
79
+ "Meta-Llama-3.1-70B-Reference",
80
+ "Meta-Llama-3.1-8B-Instruct-Turbo",
81
+ "Meta-Llama-3.1-8B-Reference",
82
+ "Mistral-7B-Instruct-v0.1",
83
+ "Mistral-7B-Instruct-v0.2",
84
+ "Mistral-7B-Instruct-v0.3",
85
+ "Mistral-7B-OpenOrca",
86
+ "Mistral-7B-v0.1",
87
+ "Mixtral-8x22B",
88
+ "Mixtral-8x22B-Instruct-v0.1",
89
+ "Mixtral-8x22B-v0.1",
90
+ "Mixtral-8x7B-Instruct-v0.1",
91
+ "MythoMax-L2-13b",
92
+ "MythoMax-L2-13b-Lite",
93
+ "NexusRaven-V2-13B",
94
+ "Nous-Capybara-7B-V1p9",
95
+ "Nous-Hermes-13b",
96
+ "Nous-Hermes-2-Mistral-7B-DPO",
97
+ "Nous-Hermes-2-Mixtral-8x7B-DPO",
98
+ "Nous-Hermes-2-Mixtral-8x7B-SFT",
99
+ "Nous-Hermes-Llama2-13b",
100
+ "Nous-Hermes-Llama2-70b",
101
+ "Nous-Hermes-llama-2-7b",
102
+ "OLMo-7B",
103
+ "OLMo-7B-Instruct",
104
+ "OpenHermes-2-Mistral-7B",
105
+ "OpenHermes-2p5-Mistral-7B",
106
+ "Platypus2-70B-instruct",
107
+ "Qwen1.5-0.5B",
108
+ "Qwen1.5-0.5B-Chat",
109
+ "Qwen1.5-1.8B",
110
+ "Qwen1.5-1.8B-Chat",
111
+ "Qwen1.5-110B-Chat",
112
+ "Qwen1.5-14B",
113
+ "Qwen1.5-14B-Chat",
114
+ "Qwen1.5-32B",
115
+ "Qwen1.5-32B-Chat",
116
+ "Qwen1.5-4B",
117
+ "Qwen1.5-4B-Chat",
118
+ "Qwen1.5-72B",
119
+ "Qwen1.5-72B-Chat",
120
+ "Qwen1.5-7B",
121
+ "Qwen1.5-7B-Chat",
122
+ "Qwen2-1.5B",
123
+ "Qwen2-1.5B-Instruct",
124
+ "Qwen2-72B",
125
+ "Qwen2-72B-Instruct",
126
+ "Qwen2-7B",
127
+ "Qwen2-7B-Instruct",
128
+ "ReMM-SLERP-L2-13B",
129
+ "RedPajama-INCITE-7B-Base",
130
+ "RedPajama-INCITE-7B-Chat",
131
+ "RedPajama-INCITE-Chat-3B-v1",
132
+ "SOLAR-10.7B-Instruct-v1.0",
133
+ "SOLAR-10.7B-Instruct-v1.0-int4",
134
+ "Snorkel-Mistral-PairRM-DPO",
135
+ "StripedHyena-Hessian-7B",
136
+ "Toppy-M-7B",
137
+ "WizardLM-13B-V1.2",
138
+ "WizardLM-2-7B",
139
+ "WizardLM-2-8x22B",
140
+ "WizardLM-70B-V1.0",
141
+ "Yi-34B",
142
+ "Yi-34B-Chat",
143
+ "Yi-6B",
144
+ "airoboros-70b",
145
+ "airoboros-l2-70b",
146
+ "alpaca-7b",
147
+ "babbage-002",
148
+ "chat-bison",
149
+ "chatgpt-4o-latest",
150
+ "chatx_cheap_128k",
151
+ "chatx_cheap_32k",
152
+ "chatx_cheap_4k",
153
+ "chatx_cheap_64k",
154
+ "chatx_cheap_8k",
155
+ "chatx_mids_4k",
156
+ "chatx_premium_128k",
157
+ "chatx_premium_32k",
158
+ "chatx_premium_4k",
159
+ "chatx_premium_8k",
160
+ "chronos-hermes-13b",
161
+ "chronos-hermes-13b-v2",
162
+ "claude-1",
163
+ "claude-1.2",
164
+ "claude-2",
165
+ "claude-2.0",
166
+ "claude-2.1",
167
+ "claude-3-5-haiku",
168
+ "claude-3-5-haiku-20241022",
169
+ "claude-3-5-sonnet",
170
+ "claude-3-haiku",
171
+ "claude-3-opus",
172
+ "claude-3-sonnet",
173
+ "claude-instant-1",
174
+ "claude-instant-1.0",
175
+ "claude-instant-1.1",
176
+ "claude-instant-1.2",
177
+ "claude2_4k",
178
+ "command",
179
+ "command-light",
180
+ "command-light-nightly",
181
+ "command-light-text-v14",
182
+ "command-r",
183
+ "command-r-03-2024",
184
+ "command-r-08-2024",
185
+ "command-r-plus",
186
+ "command-r-plus-04-2024",
187
+ "command-r-plus-08-2024",
188
+ "command-r-plus-v1",
189
+ "command-r-v1",
190
+ "command-text-v14",
191
+ "davinci-002",
192
+ "dbrx-instruct",
193
+ "deepseek-chat",
194
+ "deepseek-llm-67b-chat",
195
+ "dolphin-2.5-mixtral-8x7b",
196
+ "dolphin-2.6-mixtral-8x7b",
197
+ "dolphin-mixtral-8x22b",
198
+ "dolphin-mixtral-8x7b",
199
+ "eva-qwen-2.5-14b",
200
+ "eva-qwen-2.5-32b",
201
+ "evo-1-131k-base",
202
+ "evo-1-8k-base",
203
+ "fimbulvetr-11b-v2",
204
+ "gemini-1.5-flash",
205
+ "gemini-1.5-pro",
206
+ "gemini-exp-1114",
207
+ "gemini-flash-1.5",
208
+ "gemini-flash-1.5-8b",
209
+ "gemini-flash-1.5-8b-exp",
210
+ "gemini-flash-1.5-exp",
211
+ "gemini-flash_8k",
212
+ "gemini-pro",
213
+ "gemini-pro-1.5",
214
+ "gemini-pro-1.5-exp",
215
+ "gemini-pro-vision",
216
+ "gemini-pro_8k",
217
+ "gemma-1.1-7b-it",
218
+ "gemma-2-27b-it",
219
+ "gemma-2-9b-it",
220
+ "gemma-2b",
221
+ "gemma-2b-it",
222
+ "gemma-7b",
223
+ "gemma-7b-it",
224
+ "general_32k",
225
+ "goliath-120b",
226
+ "gpt-3.5-turbo",
227
+ "gpt-3.5-turbo-0125",
228
+ "gpt-3.5-turbo-0301",
229
+ "gpt-3.5-turbo-0613",
230
+ "gpt-3.5-turbo-1106",
231
+ "gpt-3.5-turbo-16k",
232
+ "gpt-3.5-turbo-16k-0613",
233
+ "gpt-3.5-turbo-instruct",
234
+ "gpt-3.5-turbo-instruct-0914",
235
+ "gpt-3.5_4k",
236
+ "gpt-4",
237
+ "gpt-4-0125-preview",
238
+ "gpt-4-0314",
239
+ "gpt-4-0613",
240
+ "gpt-4-1106-preview",
241
+ "gpt-4-1106-vision-preview",
242
+ "gpt-4-32k",
243
+ "gpt-4-32k-0314",
244
+ "gpt-4-turbo",
245
+ "gpt-4-turbo-2024-04-09",
246
+ "gpt-4-turbo-preview",
247
+ "gpt-4-vision-preview",
248
+ "gpt-4o",
249
+ "gpt-4o-2024-05-13",
250
+ "gpt-4o-2024-08-06",
251
+ "gpt-4o-2024-11-20",
252
+ "gpt-4o-audio-preview",
253
+ "gpt-4o-audio-preview-2024-10-01",
254
+ "gpt-4o-mini",
255
+ "gpt-4o-mini-2024-07-18",
256
+ "gpt-4o-realtime-preview",
257
+ "gpt-4o-realtime-preview-2024-10-01",
258
+ "gpt4o-mini_16k",
259
+ "gpt4o_16k",
260
+ "grok-2",
261
+ "grok-beta",
262
+ "grok-vision-beta",
263
+ "guanaco-13b",
264
+ "guanaco-33b",
265
+ "guanaco-65b",
266
+ "guanaco-7b",
267
+ "hermes-2-pro-llama-3-8b",
268
+ "hermes-2-theta-llama-3-8b",
269
+ "hermes-3-llama-3.1-405b",
270
+ "hermes-3-llama-3.1-70b",
271
+ "inflection-3-pi",
272
+ "inflection-3-productivity",
273
+ "j2-grande-instruct",
274
+ "j2-jumbo-instruct",
275
+ "j2-mid",
276
+ "j2-mid-v1",
277
+ "j2-ultra",
278
+ "j2-ultra-v1",
279
+ "jamba-1-5-large",
280
+ "jamba-1-5-large-v1",
281
+ "jamba-1-5-mini",
282
+ "jamba-1-5-mini-v1",
283
+ "jamba-instruct",
284
+ "jamba-instruct-v1",
285
+ "l3-euryale-70b",
286
+ "l3-lunaris-8b",
287
+ "l3.1-euryale-70b",
288
+ "large-latest",
289
+ "lfm-40b",
290
+ "llama-13b",
291
+ "llama-2-13b",
292
+ "llama-2-13b-chat",
293
+ "llama-2-70b-chat",
294
+ "llama-2-7b",
295
+ "llama-2-7b-chat",
296
+ "llama-3-70b-instruct",
297
+ "llama-3-8b-instruct",
298
+ "llama-3-lumimaid-70b",
299
+ "llama-3-lumimaid-8b",
300
+ "llama-3-sonar-large-32k-chat",
301
+ "llama-3-sonar-large-32k-online",
302
+ "llama-3-sonar-small-32k-chat",
303
+ "llama-3.1-405b",
304
+ "llama-3.1-405b-instruct",
305
+ "llama-3.1-70b-instruct",
306
+ "llama-3.1-8b-instruct",
307
+ "llama-3.1-lumimaid-70b",
308
+ "llama-3.1-lumimaid-8b",
309
+ "llama-3.1-nemotron-70b-instruct",
310
+ "llama-3.1-sonar-huge-128k-online",
311
+ "llama-3.1-sonar-large-128k-chat",
312
+ "llama-3.1-sonar-large-128k-online",
313
+ "llama-3.1-sonar-small-128k-chat",
314
+ "llama-3.1-sonar-small-128k-online",
315
+ "llama-3.2-11b-vision-instruct",
316
+ "llama-3.2-1b-instruct",
317
+ "llama-3.2-3b-instruct",
318
+ "llama-3.2-90b-vision-instruct",
319
+ "llama-30b",
320
+ "llama-65b",
321
+ "llama-7b",
322
+ "llama-guard-2-8b",
323
+ "llama2-13b-chat-v1",
324
+ "llama2-70b-chat-v1",
325
+ "llama3-1_128k",
326
+ "llama3-2_128k",
327
+ "llama3-70b-instruct-v1",
328
+ "llama3-8b-instruct-v1",
329
+ "llemma_7b",
330
+ "lzlv-70b-fp16-hf",
331
+ "magnum-72b",
332
+ "magnum-v2-72b",
333
+ "magnum-v4-72b",
334
+ "medium",
335
+ "meta-llama-3.1-8b-instruct",
336
+ "midnight-rose-70b",
337
+ "ministral-3b",
338
+ "ministral-8b",
339
+ "mistral-7b-instruct",
340
+ "mistral-7b-instruct-v0",
341
+ "mistral-7b-instruct-v0.1",
342
+ "mistral-7b-instruct-v0.2",
343
+ "mistral-7b-instruct-v0.3",
344
+ "mistral-large",
345
+ "mistral-medium",
346
+ "mistral-nemo",
347
+ "mistral-small",
348
+ "mistral-small-2402-v1",
349
+ "mistral-tiny",
350
+ "mistral_32k",
351
+ "mixtral-8x22b-instruct",
352
+ "mixtral-8x7b",
353
+ "mixtral-8x7b-instruct",
354
+ "mixtral-8x7b-instruct-v0",
355
+ "mixtral_32k",
356
+ "mn-celeste-12b",
357
+ "mn-inferor-12b",
358
+ "mn-starcannon-12b",
359
+ "mythalion-13b",
360
+ "mythomax-l2-13b",
361
+ "mythomist-7b",
362
+ "noromaid-20b",
363
+ "nous-hermes-2-mixtral-8x7b-dpo",
364
+ "nous-hermes-2-vision-7b",
365
+ "nous-hermes-llama2-13b",
366
+ "o1-mini",
367
+ "o1-mini-2024-09-12",
368
+ "o1-preview",
369
+ "o1-preview-2024-09-12",
370
+ "olympus-premier-v1",
371
+ "online-llama_128k",
372
+ "openchat-3.5-1210",
373
+ "openchat-7b",
374
+ "openchat_3.5",
375
+ "openhermes-2.5-mistral-7b",
376
+ "palm-2-chat-bison",
377
+ "palm-2-chat-bison-32k",
378
+ "phi-2",
379
+ "phi-3-medium-128k-instruct",
380
+ "phi-3-mini-128k-instruct",
381
+ "phi-3.5-mini-128k-instruct",
382
+ "pixtral-12b",
383
+ "pixtral-large-2411",
384
+ "qwen-110b-chat",
385
+ "qwen-2-72b-instruct",
386
+ "qwen-2-7b-instruct",
387
+ "qwen-2-vl-72b-instruct",
388
+ "qwen-2-vl-7b-instruct",
389
+ "qwen-2.5-72b-instruct",
390
+ "qwen-2.5-7b-instruct",
391
+ "qwen-72b-chat",
392
+ "qwen1-5_32k",
393
+ "qwen2_32k",
394
+ "remm-slerp-l2-13b",
395
+ "rocinante-12b",
396
+ "snowflake-arctic-instruct",
397
+ "sorcererlm-8x22b",
398
+ "titan-text-express-v1",
399
+ "titan-text-lite-v1",
400
+ "titan-text-premier-v1",
401
+ "titan-tg1-large",
402
+ "titan_32k",
403
+ "titan_4k",
404
+ "toppy-m-7b",
405
+ "unslopnemo-12b",
406
+ "vicuna-13b-v1.3",
407
+ "vicuna-13b-v1.5",
408
+ "vicuna-13b-v1.5-16k",
409
+ "vicuna-7b-v1.3",
410
+ "vicuna-7b-v1.5",
411
+ "weaver",
412
+ "wizardlm-2-7b",
413
+ "wizardlm-2-8x22b",
414
+ "xwin-lm-70b",
415
+ "yi-large",
416
+ "yi-vision",
417
+ "zephyr-7b-beta",
418
+ "zephyr-orpo-141b-A35b-v0.1"
419
+ ]
420
+ fetch: false
421
+ titleConvo: true
422
+ titleModel: "claude-3-haiku"
423
+ summarize: false
424
+ summaryModel: "claude-3-haiku"
425
+ modelDisplayLabel: "APIpie"
426
+
427
+ # cohere
428
+ # Model list: https://dashboard.cohere.com/playground/chat
429
+ - name: "cohere"
430
+ apiKey: "user_provided"
431
+ baseURL: "https://api.cohere.ai/v1"
432
+ models:
433
+ default: [
434
+ "c4ai-aya-23-35b",
435
+ "c4ai-aya-23-8b",
436
+ "command",
437
+ "command-light",
438
+ "command-light-nightly",
439
+ "command-nightly",
440
+ "command-r",
441
+ "command-r-plus",
442
+ ]
443
+ fetch: false
444
+ modelDisplayLabel: "cohere"
445
+ titleModel: "command"
446
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]
447
+
448
+ # DEEPNIGHT
449
+ # https://github.com/brahmai-research/aiforcause
450
+ # Model list: https://aiforcause.deepnight.tech/models
451
+ - name: "DEEPNIGHT"
452
+ apiKey: "sk-free1234"
453
+ baseURL: "https://aiforcause.deepnight.tech/openai/"
454
+ models:
455
+ default: [
456
+ "gpt-35-turbo",
457
+ "gpt-35-turbo-16k",
458
+ "gpt-4-turbo"
459
+ ]
460
+ fetch: false
461
+ titleConvo: true
462
+ titleModel: "gpt-35-turbo"
463
+ summarize: false
464
+ summaryModel: "gpt-35-turbo"
465
+ forcePrompt: false
466
+ modelDisplayLabel: "DEEPNIGHT"
467
+ addParams:
468
+ stream: True
469
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/DEEPNIGHT.png"
470
+
471
+ # deepseek
472
+ # https://platform.deepseek.com/api_keys
473
+ # Model list: https://platform.deepseek.com/api-docs/pricing
474
+ - name: "deepseek"
475
+ apiKey: "user_provided"
476
+ baseURL: "https://api.deepseek.com"
477
+ models:
478
+ default: [
479
+ "deepseek-chat",
480
+ "deepseek-coder"
481
+ ]
482
+ fetch: false
483
+ titleConvo: true
484
+ titleModel: "deepseek-chat"
485
+ summarize: false
486
+ summaryModel: "deepseek-chat"
487
+ forcePrompt: false
488
+ modelDisplayLabel: "DeepSeek"
489
+
490
+ # Fireworks.ai
491
+ # Models: https://fireworks.ai/models?show=Serverless
492
+ - name: "Fireworks"
493
+ apiKey: "user_provided"
494
+ baseURL: "https://api.fireworks.ai/inference/v1"
495
+ models:
496
+ default: [
497
+ "accounts/fireworks/models/devashisht-test-v2",
498
+ "accounts/fireworks/models/dt-fc-rc-v1",
499
+ "accounts/fireworks/models/firefunction-v1",
500
+ "accounts/fireworks/models/firefunction-v2",
501
+ "accounts/fireworks/models/firellava-13b",
502
+ "accounts/devashisht-72fdad/models/function-calling-v11",
503
+ "accounts/fireworks/models/fw-function-call-34b-v0",
504
+ "accounts/stability/models/japanese-stablelm-instruct-beta-70b",
505
+ "accounts/stability/models/japanese-stablelm-instruct-gamma-7b",
506
+ "accounts/fireworks/models/japanese-stable-vlm",
507
+ "accounts/fireworks/models/gemma2-9b-it",
508
+ "accounts/fireworks/models/llama-v3p1-405b-instruct",
509
+ "accounts/fireworks/models/llama-v3p1-70b-instruct",
510
+ "accounts/fireworks/models/llama-v3p1-8b-instruct",
511
+ "accounts/fireworks/models/llama-v3-70b-instruct",
512
+ "accounts/fireworks/models/llama-v3-70b-instruct-hf",
513
+ "accounts/fireworks/models/llama-v3-8b-hf",
514
+ "accounts/fireworks/models/llama-v3-8b-instruct",
515
+ "accounts/fireworks/models/llama-v3-8b-instruct-hf",
516
+ "accounts/fireworks/models/llama-v2-13b-chat",
517
+ "accounts/fireworks/models/llama-v2-13b-code-instruct",
518
+ "accounts/fireworks/models/llama-v2-34b-code-instruct",
519
+ "accounts/fireworks/models/llama-v2-70b-chat",
520
+ "accounts/fireworks/models/llama-v2-70b-code-instruct",
521
+ "accounts/fireworks/models/llama-v2-7b-chat",
522
+ "accounts/fireworks/models/deepseek-coder-v2-instruct",
523
+ "accounts/fireworks/models/deepseek-coder-v2-lite-instruct",
524
+ "accounts/fireworks/models/llava-v15-13b-fireworks",
525
+ "accounts/fireworks/models/mistral-7b-instruct-4k",
526
+ "accounts/dev-e24710/models/mistral-spellbound-format",
527
+ "accounts/fireworks/models/mixtral-8x22b-instruct",
528
+ "accounts/fireworks/models/mixtral-8x7b-instruct",
529
+ "accounts/fireworks/models/mixtral-8x7b-instruct-hf",
530
+ "accounts/fireworks/models/new-mixtral-chat",
531
+ "accounts/fireworks/models/qwen-14b-chat",
532
+ "accounts/fireworks/models/qwen-1-8b-chat",
533
+ "accounts/fireworks/models/qwen-72b-chat",
534
+ "accounts/stability/models/stablelm-zephyr-3b",
535
+ "accounts/fireworks/models/yi-34b-200k-capybara",
536
+ ]
537
+ fetch: false
538
+ titleConvo: true
539
+ titleModel: "accounts/fireworks/models/llama-v2-7b-chat"
540
+ summarize: false
541
+ summaryModel: "accounts/fireworks/models/llama-v2-7b-chat"
542
+ forcePrompt: false
543
+ modelDisplayLabel: "Fireworks"
544
+ dropParams: ["user"]
545
+
546
+ # GitHub
547
+ - name: "Github Models"
548
+ iconURL: https://github.githubassets.com/assets/GitHub-Mark-ea2971cee799.png
549
+ apiKey: "user_provided"
550
+ baseURL: "https://models.inference.ai.azure.com"
551
+ models:
552
+ default: ["gpt-4o","Phi-3.5-MoE-instruct","Phi-3.5-mini-instruct","Phi-3.5-vision-instruct"]
553
+ fetch: false
554
+ titleConvo: true
555
+ titleModel: "gpt-4o-mini"
556
+
557
+ # groq
558
+ # Model list: https://console.groq.com/settings/limits
559
+ - name: "groq"
560
+ apiKey: "user_provided"
561
+ baseURL: "https://api.groq.com/openai/v1/"
562
+ models:
563
+ default: [
564
+ "llama-3.1-405b-reasoning",
565
+ "llama-3.1-70b-versatile",
566
+ "llama-3.1-8b-instant",
567
+ "llama3-groq-70b-8192-tool-use-preview",
568
+ "llama3-groq-8b-8192-tool-use-preview",
569
+ "llama3-70b-8192",
570
+ "llama3-8b-8192",
571
+ "mixtral-8x7b-32768",
572
+ "gemma-7b-it",
573
+ "gemma2-9b-it"
574
+ ]
575
+ fetch: false
576
+ titleConvo: true
577
+ titleModel: "mixtral-8x7b-32768"
578
+ modelDisplayLabel: "groq"
579
+
580
+ # HuggingFace
581
+ # https://huggingface.co/settings/tokens
582
+ - name: 'HuggingFace'
583
+ apiKey: "user_provided"
584
+ baseURL: 'https://api-inference.huggingface.co/v1'
585
+ models:
586
+ default: [
587
+ "gemma2-9b-it",
588
+ "gemma-7b-it",
589
+ "llama-3.1-8b-instant",
590
+ "llama3-groq-70b-8192-tool-use-preview",
591
+ "llama3-groq-8b-8192-tool-use-preview",
592
+ "llama-3.1-70b-versatile",
593
+ "llama-3.1-70b-specdec",
594
+ "llama-3.1-8b-instant",
595
+ "llama-3.2-1b-preview",
596
+ "llama-3.2-3b-preview",
597
+ "llama-3.2-11b-vision-preview",
598
+ "llama-3.2-90b-vision-preview",
599
+ "llama3-70b-8192",
600
+ "llama3-8b-8192",
601
+ "mixtral-8x7b-32768",
602
+ ]
603
+ fetch: true
604
+ titleConvo: true
605
+ titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
606
+ dropParams: ["top_p"]
607
+
608
+ # Hyperbolic
609
+ # https://app.hyperbolic.xyz/models
610
+ - name: 'Hyperbolic'
611
+ apiKey: 'user_provided'
612
+ baseURL: 'https://api.hyperbolic.xyz/v1/'
613
+ models:
614
+ default: [
615
+ "deepseek-ai/DeepSeek-V2.5",
616
+ "meta-llama/Llama-3.2-3B-Instruct",
617
+ "meta-llama/Meta-Llama-3-70B-Instruct",
618
+ "meta-llama/Meta-Llama-3.1-405B",
619
+ "meta-llama/Meta-Llama-3.1-405B-FP8",
620
+ "meta-llama/Meta-Llama-3.1-405B-Instruct",
621
+ "meta-llama/Meta-Llama-3.1-70B-Instruct",
622
+ "meta-llama/Meta-Llama-3.1-8B-Instruct",
623
+ "NousResearch/Hermes-3-Llama-3.1-70B",
624
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
625
+ "Qwen/Qwen2.5-72B-Instruct",
626
+ ]
627
+ fetch: false
628
+ titleConvo: true
629
+ titleModel: "meta-llama/Meta-Llama-3.1-8B-Instruct"
630
+ modelDisplayLabel: "Hyperbolic"
631
+ iconURL: "https://pbs.twimg.com/profile_images/1775708849707819008/1RRWsmmg_400x400.jpg"
632
+
633
+ # Mistral AI API
634
+ # Model list: https://docs.mistral.ai/getting-started/models/
635
+ - name: "Mistral"
636
+ apiKey: "user_provided"
637
+ baseURL: "https://api.mistral.ai/v1"
638
+ models:
639
+ default: [
640
+ "mistral-large-latest",
641
+ "pixtral-large-latest",
642
+ "ministral-3b-latest",
643
+ "ministral-8b-latest",
644
+ "mistral-small-latest",
645
+ "codestral-latest",
646
+ "pixtral-12b-2409",
647
+ "open-mistral-nemo",
648
+ "open-codestral-mamba",
649
+ "open-mistral-7b",
650
+ "open-mixtral-8x7b",
651
+ "open-mixtral-8x22b"
652
+ ]
653
+ fetch: false
654
+ titleConvo: true
655
+ titleMethod: "completion"
656
+ titleModel: "mistral-tiny"
657
+ summarize: false
658
+ summaryModel: "mistral-tiny"
659
+ forcePrompt: false
660
+ modelDisplayLabel: "Mistral"
661
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
662
+
663
+ # NVIDIA
664
+ # https://build.nvidia.com/explore/discover
665
+ - name: "Nvidia"
666
+ apiKey: "user_provided"
667
+ baseURL: "https://integrate.api.nvidia.com/v1/"
668
+ models:
669
+ default: [
670
+ "nvidia/llama-3.1-nemotron-51b-instruct",
671
+ "nvidia/llama-3.1-nemotron-70b-instruct",
672
+ "nvidia/nemotron-mini-4b-instruct",
673
+ "nvidia/nemotron-4-340b-instruct",
674
+ ]
675
+ fetch: false
676
+ titleConvo: true
677
+ titleModel: "nvidia/nemotron-mini-4b-instruct"
678
+ modelDisplayLabel: "Nvidia"
679
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/refs/heads/main/icons/nvidia.png"
680
+
681
+ # OpenRouter.ai
682
+ # Model list: https://openrouter.ai/models
683
+ # Script to fetch models: https://github.com/LibreChat-AI/librechat-config-yaml/blob/main/scripts/openrouter.py
684
+ - name: "OpenRouter"
685
+ apiKey: "user_provided"
686
+ baseURL: "https://openrouter.ai/api/v1"
687
+ models:
688
+ default: [
689
+ "openrouter/auto",
690
+ "---FREE---",
691
+ "google/gemma-2-9b-it:free",
692
+ "gryphe/mythomax-l2-13b:free",
693
+ "huggingfaceh4/zephyr-7b-beta:free",
694
+ "liquid/lfm-40b:free",
695
+ "meta-llama/llama-3-8b-instruct:free",
696
+ "meta-llama/llama-3.1-405b-instruct:free",
697
+ "meta-llama/llama-3.1-70b-instruct:free",
698
+ "meta-llama/llama-3.1-8b-instruct:free",
699
+ "meta-llama/llama-3.2-11b-vision-instruct:free",
700
+ "meta-llama/llama-3.2-1b-instruct:free",
701
+ "meta-llama/llama-3.2-3b-instruct:free",
702
+ "meta-llama/llama-3.2-90b-vision-instruct:free",
703
+ "microsoft/phi-3-medium-128k-instruct:free",
704
+ "microsoft/phi-3-mini-128k-instruct:free",
705
+ "mistralai/mistral-7b-instruct:free",
706
+ "nousresearch/hermes-3-llama-3.1-405b:free",
707
+ "openchat/openchat-7b:free",
708
+ "qwen/qwen-2-7b-instruct:free",
709
+ "undi95/toppy-m-7b:free",
710
+ "---NITRO---",
711
+ "gryphe/mythomax-l2-13b:nitro",
712
+ "meta-llama/llama-3-70b-instruct:nitro",
713
+ "meta-llama/llama-3-8b-instruct:nitro",
714
+ "meta-llama/llama-3.1-405b-instruct:nitro",
715
+ "meta-llama/llama-3.1-70b-instruct:nitro",
716
+ "mistralai/mistral-7b-instruct:nitro",
717
+ "mistralai/mixtral-8x7b-instruct:nitro",
718
+ "undi95/toppy-m-7b:nitro",
719
+ "---BETA---",
720
+ "anthropic/claude-2.0:beta",
721
+ "anthropic/claude-2.1:beta",
722
+ "anthropic/claude-2:beta",
723
+ "anthropic/claude-3-5-haiku-20241022:beta",
724
+ "anthropic/claude-3-5-haiku:beta",
725
+ "anthropic/claude-3-haiku:beta",
726
+ "anthropic/claude-3-opus:beta",
727
+ "anthropic/claude-3-sonnet:beta",
728
+ "anthropic/claude-3.5-sonnet-20240620:beta",
729
+ "anthropic/claude-3.5-sonnet:beta",
730
+ "---EXTENDED---",
731
+ "gryphe/mythomax-l2-13b:extended",
732
+ "meta-llama/llama-3-8b-instruct:extended",
733
+ "neversleep/llama-3-lumimaid-8b:extended",
734
+ "openai/gpt-4o:extended",
735
+ "undi95/remm-slerp-l2-13b:extended",
736
+ "---AI21---",
737
+ "ai21/jamba-1-5-large",
738
+ "ai21/jamba-1-5-mini",
739
+ "ai21/jamba-instruct",
740
+ "---ANTHROPIC---",
741
+ "anthropic/claude-2",
742
+ "anthropic/claude-2.0",
743
+ "anthropic/claude-2.1",
744
+ "anthropic/claude-3-5-haiku",
745
+ "anthropic/claude-3-5-haiku-20241022",
746
+ "anthropic/claude-3-haiku",
747
+ "anthropic/claude-3-opus",
748
+ "anthropic/claude-3-sonnet",
749
+ "anthropic/claude-3.5-sonnet",
750
+ "anthropic/claude-3.5-sonnet-20240620",
751
+ "---COHERE---",
752
+ "cohere/command",
753
+ "cohere/command-r",
754
+ "cohere/command-r-03-2024",
755
+ "cohere/command-r-08-2024",
756
+ "cohere/command-r-plus",
757
+ "cohere/command-r-plus-04-2024",
758
+ "cohere/command-r-plus-08-2024",
759
+ "---GOOGLE---",
760
+ "google/gemini-exp-1114",
761
+ "google/gemini-flash-1.5",
762
+ "google/gemini-flash-1.5-8b",
763
+ "google/gemini-flash-1.5-8b-exp",
764
+ "google/gemini-flash-1.5-exp",
765
+ "google/gemini-pro",
766
+ "google/gemini-pro-1.5",
767
+ "google/gemini-pro-1.5-exp",
768
+ "google/gemini-pro-vision",
769
+ "google/gemma-2-27b-it",
770
+ "google/gemma-2-9b-it",
771
+ "google/palm-2-chat-bison",
772
+ "google/palm-2-chat-bison-32k",
773
+ "google/palm-2-codechat-bison",
774
+ "google/palm-2-codechat-bison-32k",
775
+ "---META-LLAMA---",
776
+ "meta-llama/llama-2-13b-chat",
777
+ "meta-llama/llama-3-70b-instruct",
778
+ "meta-llama/llama-3-8b-instruct",
779
+ "meta-llama/llama-3.1-405b",
780
+ "meta-llama/llama-3.1-405b-instruct",
781
+ "meta-llama/llama-3.1-70b-instruct",
782
+ "meta-llama/llama-3.1-8b-instruct",
783
+ "meta-llama/llama-3.2-11b-vision-instruct",
784
+ "meta-llama/llama-3.2-1b-instruct",
785
+ "meta-llama/llama-3.2-3b-instruct",
786
+ "meta-llama/llama-3.2-90b-vision-instruct",
787
+ "meta-llama/llama-guard-2-8b",
788
+ "---MICROSOFT---",
789
+ "microsoft/phi-3-medium-128k-instruct",
790
+ "microsoft/phi-3-mini-128k-instruct",
791
+ "microsoft/phi-3.5-mini-128k-instruct",
792
+ "microsoft/wizardlm-2-7b",
793
+ "microsoft/wizardlm-2-8x22b",
794
+ "---MISTRALAI---",
795
+ "mistralai/codestral-mamba",
796
+ "mistralai/ministral-3b",
797
+ "mistralai/ministral-8b",
798
+ "mistralai/mistral-7b-instruct",
799
+ "mistralai/mistral-7b-instruct-v0.1",
800
+ "mistralai/mistral-7b-instruct-v0.2",
801
+ "mistralai/mistral-7b-instruct-v0.3",
802
+ "mistralai/mistral-large",
803
+ "mistralai/mistral-large-2407",
804
+ "mistralai/mistral-large-2411",
805
+ "mistralai/mistral-medium",
806
+ "mistralai/mistral-nemo",
807
+ "mistralai/mistral-small",
808
+ "mistralai/mistral-tiny",
809
+ "mistralai/mixtral-8x22b-instruct",
810
+ "mistralai/mixtral-8x7b",
811
+ "mistralai/mixtral-8x7b-instruct",
812
+ "mistralai/pixtral-12b",
813
+ "mistralai/pixtral-large-2411",
814
+ "---NEVERSLEEP---",
815
+ "neversleep/llama-3-lumimaid-70b",
816
+ "neversleep/llama-3-lumimaid-8b",
817
+ "neversleep/llama-3.1-lumimaid-70b",
818
+ "neversleep/llama-3.1-lumimaid-8b",
819
+ "neversleep/noromaid-20b",
820
+ "---NOUSRESEARCH---",
821
+ "nousresearch/hermes-2-pro-llama-3-8b",
822
+ "nousresearch/hermes-3-llama-3.1-405b",
823
+ "nousresearch/hermes-3-llama-3.1-70b",
824
+ "nousresearch/nous-hermes-2-mixtral-8x7b-dpo",
825
+ "nousresearch/nous-hermes-llama2-13b",
826
+ "---OPENAI---",
827
+ "openai/chatgpt-4o-latest",
828
+ "openai/gpt-3.5-turbo",
829
+ "openai/gpt-3.5-turbo-0125",
830
+ "openai/gpt-3.5-turbo-0613",
831
+ "openai/gpt-3.5-turbo-1106",
832
+ "openai/gpt-3.5-turbo-16k",
833
+ "openai/gpt-3.5-turbo-instruct",
834
+ "openai/gpt-4",
835
+ "openai/gpt-4-0314",
836
+ "openai/gpt-4-1106-preview",
837
+ "openai/gpt-4-32k",
838
+ "openai/gpt-4-32k-0314",
839
+ "openai/gpt-4-turbo",
840
+ "openai/gpt-4-turbo-preview",
841
+ "openai/gpt-4-vision-preview",
842
+ "openai/gpt-4o",
843
+ "openai/gpt-4o-2024-05-13",
844
+ "openai/gpt-4o-2024-08-06",
845
+ "openai/gpt-4o-2024-11-20",
846
+ "openai/gpt-4o-mini",
847
+ "openai/gpt-4o-mini-2024-07-18",
848
+ "openai/o1-mini",
849
+ "openai/o1-mini-2024-09-12",
850
+ "openai/o1-preview",
851
+ "openai/o1-preview-2024-09-12",
852
+ "---PERPLEXITY---",
853
+ "perplexity/llama-3-sonar-large-32k-chat",
854
+ "perplexity/llama-3-sonar-large-32k-online",
855
+ "perplexity/llama-3-sonar-small-32k-chat",
856
+ "perplexity/llama-3.1-sonar-huge-128k-online",
857
+ "perplexity/llama-3.1-sonar-large-128k-chat",
858
+ "perplexity/llama-3.1-sonar-large-128k-online",
859
+ "perplexity/llama-3.1-sonar-small-128k-chat",
860
+ "perplexity/llama-3.1-sonar-small-128k-online",
861
+ "---QWEN---",
862
+ "qwen/qwen-2-72b-instruct",
863
+ "qwen/qwen-2-7b-instruct",
864
+ "qwen/qwen-2-vl-72b-instruct",
865
+ "qwen/qwen-2-vl-7b-instruct",
866
+ "qwen/qwen-2.5-72b-instruct",
867
+ "qwen/qwen-2.5-7b-instruct",
868
+ "qwen/qwen-2.5-coder-32b-instruct",
869
+ "---OTHERS---",
870
+ "01-ai/yi-large",
871
+ "alpindale/goliath-120b",
872
+ "alpindale/magnum-72b",
873
+ "anthracite-org/magnum-v4-72b",
874
+ "cognitivecomputations/dolphin-mixtral-8x22b",
875
+ "cognitivecomputations/dolphin-mixtral-8x7b",
876
+ "databricks/dbrx-instruct",
877
+ "deepseek/deepseek-chat",
878
+ "eva-unit-01/eva-qwen-2.5-32b",
879
+ "gryphe/mythomax-l2-13b",
880
+ "infermatic/mn-inferor-12b",
881
+ "inflection/inflection-3-pi",
882
+ "inflection/inflection-3-productivity",
883
+ "jondurbin/airoboros-l2-70b",
884
+ "liquid/lfm-40b",
885
+ "lizpreciatior/lzlv-70b-fp16-hf",
886
+ "mancer/weaver",
887
+ "nvidia/llama-3.1-nemotron-70b-instruct",
888
+ "openchat/openchat-7b",
889
+ "pygmalionai/mythalion-13b",
890
+ "raifle/sorcererlm-8x22b",
891
+ "sao10k/l3-euryale-70b",
892
+ "sao10k/l3.1-euryale-70b",
893
+ "sophosympatheia/midnight-rose-70b",
894
+ "teknium/openhermes-2.5-mistral-7b",
895
+ "thedrummer/rocinante-12b",
896
+ "thedrummer/unslopnemo-12b",
897
+ "undi95/remm-slerp-l2-13b",
898
+ "undi95/toppy-m-7b",
899
+ "x-ai/grok-beta",
900
+ "x-ai/grok-vision-beta",
901
+ "xwin-lm/xwin-lm-70b"
902
+ ]
903
+ fetch: false
904
+ dropParams: ["stop"]
905
+ titleConvo: true
906
+ titleModel: "gpt-3.5-turbo"
907
+ summarize: false
908
+ summaryModel: "gpt-3.5-turbo"
909
+ forcePrompt: false
910
+ modelDisplayLabel: "OpenRouter"
911
+
912
+ # Preplexity
913
+ # Model list: https://docs.perplexity.ai/docs/model-cards
914
+ - name: "Perplexity"
915
+ apiKey: "user_provided"
916
+ baseURL: "https://api.perplexity.ai/"
917
+ models:
918
+ default: [
919
+ "llama-3.1-sonar-small-128k-chat",
920
+ "llama-3.1-sonar-small-128k-online",
921
+ "llama-3.1-sonar-large-128k-chat",
922
+ "llama-3.1-sonar-large-128k-online",
923
+ "llama-3.1-sonar-huge-128k-online",
924
+ "llama-3.1-8b-instruct",
925
+ "llama-3.1-70b-instruct"
926
+ ]
927
+ fetch: false # fetching list of models is not supported
928
+ titleConvo: true
929
+ titleModel: "llama-3.1-sonar-small-128k-chat"
930
+ summarize: false
931
+ summaryModel: "llama-3.1-sonar-small-128k-chat"
932
+ forcePrompt: false
933
+ dropParams: ["stop", "frequency_penalty"]
934
+ modelDisplayLabel: "Perplexity"
935
+
936
+ # SambaNova
937
+ # https://cloud.sambanova.ai/apis
938
+ - name: "SambaNova"
939
+ apiKey: "user_provided"
940
+ baseURL: "https://api.sambanova.ai/v1/"
941
+ models:
942
+ default: [
943
+ "Meta-Llama-3.1-8B-Instruct",
944
+ "Meta-Llama-3.1-70B-Instruct",
945
+ "Meta-Llama-3.1-405B-Instruct",
946
+ "Meta-Llama-3.2-1B-Instruct",
947
+ "Meta-Llama-3.2-3B-Instruct",
948
+ "Llama-3.2-11B-Vision-Instruct",
949
+ "Llama-3.2-90B-Vision-Instruct",
950
+ ]
951
+ fetch: false
952
+ titleConvo: true
953
+ titleModel: "Meta-Llama-3.1-8B-Instruct"
954
+ modelDisplayLabel: "SambaNova"
955
+ iconURL: "https://global.discourse-cdn.com/sambanova/original/1X/f5ea7759d23daaad4f91a387079b8a8a71cae3f6.webp"
956
+
957
+ # ShuttleAI API
958
+ # Model list: https://shuttleai.com/models
959
+ - name: "ShuttleAI"
960
+ apiKey: "user_provided"
961
+ baseURL: "https://api.shuttleai.com/v1"
962
+ models:
963
+ default: [
964
+ "shuttleai/shuttle-3",
965
+ "shuttleai/shuttle-3-mini",
966
+ "shuttleai/s1",
967
+ "shuttleai/s1-mini",
968
+ "openai/o1-preview-2024-09-12",
969
+ "openai/o1-mini-2024-09-12",
970
+ "openai/gpt-4o-mini-2024-07-18",
971
+ "openai/chatgpt-4o-latest",
972
+ "openai/gpt-4o-2024-08-06",
973
+ "openai/gpt-4o-2024-05-13",
974
+ "openai/gpt-4-turbo-2024-04-09",
975
+ "openai/gpt-4-0125-preview",
976
+ "openai/gpt-4-1106-preview",
977
+ "openai/gpt-4-0613",
978
+ "openai/gpt-3.5-turbo-0125",
979
+ "openai/gpt-3.5-turbo-1106",
980
+ "anthropic/claude-3-5-sonnet-20240620",
981
+ "anthropic/claude-3-opus-20240229",
982
+ "anthropic/claude-3-haiku-20240307",
983
+ "google/gemini-1.5-pro",
984
+ "google/gemini-1.5-pro-exp-0827",
985
+ "google/gemini-1.5-flash",
986
+ "google/gemini-1.5-flash-exp-0827",
987
+ "google/gemini-1.5-flash-8b-exp-0924",
988
+ "meta-llama/meta-llama-3.2-90b-vision-instruct",
989
+ "meta-llama/meta-llama-3.1-405b-instruct",
990
+ "meta-llama/meta-llama-3.1-70b-instruct",
991
+ "meta-llama/meta-llama-3.1-8b-instruct",
992
+ "mattshumer/reflection-llama-3.1-70b",
993
+ "perplexity/llama-3.1-sonar-large-128k-online",
994
+ "perplexity/llama-3.1-sonar-small-128k-online",
995
+ "perplexity/llama-3.1-sonar-large-128k-chat",
996
+ "perplexity/llama-3.1-sonar-small-128k-chat",
997
+ "mistralai/mistral-nemo-instruct-2407",
998
+ "mistralai/codestral-2405",
999
+ "alibaba-cloud/qwen-2.5-72b-instruct",
1000
+ "alibaba-cloud/qwen-2.5-coder-7b",
1001
+ "alibaba-cloud/qwen-2.5-math-72b",
1002
+ "cohere/command-r-plus-08-2024",
1003
+ "cohere/command-r-plus",
1004
+ "cohere/command-r-08-2024",
1005
+ "cohere/command-r"
1006
+ ]
1007
+ fetch: true
1008
+ titleConvo: true
1009
+ titleModel: "shuttle-2.5-mini"
1010
+ summarize: false
1011
+ summaryModel: "shuttle-2.5-mini"
1012
+ forcePrompt: false
1013
+ dropParams: ["user", "frequency_penalty", "presence_penalty", "repetition_penalty"]
1014
+ modelDisplayLabel: "ShuttleAI"
1015
+
1016
+ # together.ai
1017
+ # https://api.together.ai/settings/api-keys
1018
+ # Model list: https://docs.together.ai/docs/inference-models
1019
+ - name: "together.ai"
1020
+ apiKey: "user_provided"
1021
+ baseURL: "https://api.together.xyz"
1022
+ models:
1023
+ default: [
1024
+ "Austism/chronos-hermes-13b",
1025
+ "Gryphe/MythoMax-L2-13b",
1026
+ "Gryphe/MythoMax-L2-13b-Lite",
1027
+ "HuggingFaceH4/zephyr-7b-beta",
1028
+ "NousResearch/Hermes-2-Theta-Llama-3-70B",
1029
+ "NousResearch/Nous-Capybara-7B-V1p9",
1030
+ "NousResearch/Nous-Hermes-2-Mistral-7B-DPO",
1031
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
1032
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-SFT",
1033
+ "NousResearch/Nous-Hermes-Llama2-13b",
1034
+ "NousResearch/Nous-Hermes-Llama2-70b",
1035
+ "NousResearch/Nous-Hermes-llama-2-7b",
1036
+ "Open-Orca/Mistral-7B-OpenOrca",
1037
+ "Qwen/Qwen1.5-0.5B-Chat",
1038
+ "Qwen/Qwen1.5-1.8B-Chat",
1039
+ "Qwen/Qwen1.5-14B-Chat",
1040
+ "Qwen/Qwen1.5-32B-Chat",
1041
+ "Qwen/Qwen1.5-4B-Chat",
1042
+ "Qwen/Qwen1.5-7B-Chat",
1043
+ "Qwen/Qwen2-1.5B-Instruct",
1044
+ "Qwen/Qwen2-72B-Instruct",
1045
+ "Qwen/Qwen2-7B-Instruct",
1046
+ "Qwen/Qwen2.5-72B-Instruct-Turbo",
1047
+ "Qwen/Qwen2.5-7B-Instruct-Turbo",
1048
+ "Qwen/Qwen2.5-Coder-32B-Instruct",
1049
+ "Snowflake/snowflake-arctic-instruct",
1050
+ "Undi95/ReMM-SLERP-L2-13B",
1051
+ "Undi95/Toppy-M-7B",
1052
+ "WizardLM/WizardLM-13B-V1.2",
1053
+ "allenai/OLMo-7B-Instruct",
1054
+ "carson/ml318br",
1055
+ "codellama/CodeLlama-13b-Instruct-hf",
1056
+ "codellama/CodeLlama-34b-Instruct-hf",
1057
+ "codellama/CodeLlama-70b-Instruct-hf",
1058
+ "codellama/CodeLlama-7b-Instruct-hf",
1059
+ "cognitivecomputations/dolphin-2.5-mixtral-8x7b",
1060
+ "databricks/dbrx-instruct",
1061
+ "deepseek-ai/deepseek-coder-33b-instruct",
1062
+ "deepseek-ai/deepseek-llm-67b-chat",
1063
+ "garage-bAInd/Platypus2-70B-instruct",
1064
+ "google/gemma-2-27b-it",
1065
+ "google/gemma-2-9b-it",
1066
+ "google/gemma-2b-it",
1067
+ "google/gemma-7b-it",
1068
+ "gradientai/Llama-3-70B-Instruct-Gradient-1048k",
1069
+ "llava-hf/llava-v1.6-mistral-7b-hf",
1070
+ "lmsys/vicuna-13b-v1.3",
1071
+ "lmsys/vicuna-13b-v1.5",
1072
+ "lmsys/vicuna-13b-v1.5-16k",
1073
+ "lmsys/vicuna-7b-v1.3",
1074
+ "lmsys/vicuna-7b-v1.5",
1075
+ "meta-llama/Llama-2-13b-chat-hf",
1076
+ "meta-llama/Llama-2-70b-chat-hf",
1077
+ "meta-llama/Llama-2-7b-chat-hf",
1078
+ "meta-llama/Llama-3-70b-chat-hf",
1079
+ "meta-llama/Llama-3-8b-chat-hf",
1080
+ "meta-llama/Llama-3.2-11B-Vision-Instruct-Turbo",
1081
+ "meta-llama/Llama-3.2-3B-Instruct-Turbo",
1082
+ "meta-llama/Llama-3.2-90B-Vision-Instruct-Turbo",
1083
+ "meta-llama/Llama-Vision-Free",
1084
+ "meta-llama/Meta-Llama-3-70B-Instruct",
1085
+ "meta-llama/Meta-Llama-3-70B-Instruct-Lite",
1086
+ "meta-llama/Meta-Llama-3-70B-Instruct-Turbo",
1087
+ "meta-llama/Meta-Llama-3-8B-Instruct",
1088
+ "meta-llama/Meta-Llama-3-8B-Instruct-Lite",
1089
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Lite-Pro",
1090
+ "meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
1091
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Reference",
1092
+ "meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo",
1093
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Reference",
1094
+ "meta-llama/Meta-Llama-3.1-8B-Instruct-Turbo",
1095
+ "microsoft/WizardLM-2-8x22B",
1096
+ "mistralai/Mistral-7B-Instruct-v0.1",
1097
+ "mistralai/Mistral-7B-Instruct-v0.2",
1098
+ "mistralai/Mistral-7B-Instruct-v0.3",
1099
+ "mistralai/Mixtral-8x22B-Instruct-v0.1",
1100
+ "mistralai/Mixtral-8x7B-Instruct-v0.1",
1101
+ "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
1102
+ "openchat/openchat-3.5-1210",
1103
+ "scb10x/scb10x-llama3-typhoon-v1-5-8b-instruct",
1104
+ "scb10x/scb10x-llama3-typhoon-v1-5x-4f316",
1105
+ "snorkelai/Snorkel-Mistral-PairRM-DPO",
1106
+ "teknium/OpenHermes-2-Mistral-7B",
1107
+ "teknium/OpenHermes-2p5-Mistral-7B",
1108
+ "test/test11",
1109
+ "togethercomputer/CodeLlama-13b-Instruct",
1110
+ "togethercomputer/CodeLlama-34b-Instruct",
1111
+ "togethercomputer/CodeLlama-7b-Instruct",
1112
+ "togethercomputer/Koala-13B",
1113
+ "togethercomputer/Koala-7B",
1114
+ "togethercomputer/Llama-3-8b-chat-hf-int4",
1115
+ "togethercomputer/Llama-3-8b-chat-hf-int8",
1116
+ "togethercomputer/SOLAR-10.7B-Instruct-v1.0-int4",
1117
+ "togethercomputer/alpaca-7b",
1118
+ "togethercomputer/guanaco-13b",
1119
+ "togethercomputer/guanaco-33b",
1120
+ "togethercomputer/guanaco-65b",
1121
+ "togethercomputer/guanaco-7b",
1122
+ "togethercomputer/llama-2-13b-chat",
1123
+ "togethercomputer/llama-2-70b-chat",
1124
+ "togethercomputer/llama-2-7b-chat",
1125
+ "upstage/SOLAR-10.7B-Instruct-v1.0",
1126
+ "zero-one-ai/Yi-34B-Chat"
1127
+ ]
1128
+ fetch: false
1129
+ titleConvo: true
1130
+ titleModel: "openchat/openchat-3.5-1210"
1131
+ summarize: false
1132
+ summaryModel: "openchat/openchat-3.5-1210"
1133
+ forcePrompt: false
1134
+ modelDisplayLabel: "together.ai"
1135
+
1136
+ # Unify
1137
+ # Model list: https://unify.ai/chat
1138
+ - name: "Unify"
1139
+ apiKey: "user_provided"
1140
+ baseURL: "https://api.unify.ai/v0/"
1141
+ models:
1142
+ default: [
1143
+ "router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04",
1144
+ "chatgpt-4o-latest@openai",
1145
+ "gpt-3.5-turbo@openai",
1146
+ "gpt-4-turbo@openai",
1147
+ "gpt-4@openai",
1148
+ "gpt-4o-2024-05-13@openai",
1149
+ "gpt-4o-2024-08-06@openai",
1150
+ "gpt-4o-mini@openai",
1151
+ "gpt-4o@openai",
1152
+ "o1-mini@openai",
1153
+ "o1-preview@openai",
1154
+ "claude-3-haiku@anthropic",
1155
+ "claude-3-opus@anthropic",
1156
+ "claude-3-sonnet@anthropic",
1157
+ "claude-3.5-haiku@anthropic",
1158
+ "claude-3.5-sonnet-20240620@anthropic",
1159
+ "claude-3.5-sonnet@anthropic",
1160
+ "claude-3-haiku@aws-bedrock",
1161
+ "claude-3-opus@aws-bedrock",
1162
+ "claude-3-sonnet@aws-bedrock",
1163
+ "claude-3.5-haiku@aws-bedrock",
1164
+ "claude-3.5-sonnet-20240620@aws-bedrock",
1165
+ "claude-3.5-sonnet@aws-bedrock",
1166
+ "command-r-plus@aws-bedrock",
1167
+ "llama-3-70b-chat@aws-bedrock",
1168
+ "llama-3-8b-chat@aws-bedrock",
1169
+ "llama-3.1-405b-chat@aws-bedrock",
1170
+ "llama-3.1-70b-chat@aws-bedrock",
1171
+ "llama-3.1-8b-chat@aws-bedrock",
1172
+ "llama-3.2-1b-chat@aws-bedrock",
1173
+ "llama-3.2-3b-chat@aws-bedrock",
1174
+ "mistral-7b-instruct-v0.2@aws-bedrock",
1175
+ "mistral-large@aws-bedrock",
1176
+ "mixtral-8x7b-instruct-v0.1@aws-bedrock",
1177
+ "claude-3-haiku@vertex-ai",
1178
+ "claude-3-opus@vertex-ai",
1179
+ "claude-3-sonnet@vertex-ai",
1180
+ "claude-3.5-haiku@vertex-ai",
1181
+ "claude-3.5-sonnet-20240620@vertex-ai",
1182
+ "claude-3.5-sonnet@vertex-ai",
1183
+ "gemini-1.0-pro-001@vertex-ai",
1184
+ "gemini-1.0-pro-002@vertex-ai",
1185
+ "gemini-1.0-pro@vertex-ai",
1186
+ "gemini-1.5-flash-001@vertex-ai",
1187
+ "gemini-1.5-flash-002@vertex-ai",
1188
+ "gemini-1.5-flash@vertex-ai",
1189
+ "gemini-1.5-pro-001@vertex-ai",
1190
+ "gemini-1.5-pro-002@vertex-ai",
1191
+ "gemini-1.5-pro@vertex-ai",
1192
+ "llama-3.1-405b-chat@vertex-ai",
1193
+ "llama-3.1-70b-chat@vertex-ai",
1194
+ "llama-3.1-8b-chat@vertex-ai",
1195
+ "llama-3.2-11b-chat@vertex-ai",
1196
+ "llama-3.2-90b-chat@vertex-ai",
1197
+ "mistral-large@vertex-ai",
1198
+ "mistral-nemo@vertex-ai",
1199
+ "gemma-2-27b-it@deepinfra",
1200
+ "gemma-2-9b-it@deepinfra",
1201
+ "llama-3-70b-chat@deepinfra",
1202
+ "llama-3-8b-chat@deepinfra",
1203
+ "llama-3.1-405b-chat@deepinfra",
1204
+ "llama-3.1-70b-chat@deepinfra",
1205
+ "llama-3.1-8b-chat@deepinfra",
1206
+ "llama-3.1-nemotron-70b-chat@deepinfra",
1207
+ "llama-3.2-11b-chat@deepinfra",
1208
+ "llama-3.2-1b-chat@deepinfra",
1209
+ "llama-3.2-3b-chat@deepinfra",
1210
+ "llama-3.2-90b-chat@deepinfra",
1211
+ "mistral-7b-instruct-v0.3@deepinfra",
1212
+ "mistral-nemo@deepinfra",
1213
+ "mixtral-8x7b-instruct-v0.1@deepinfra",
1214
+ "qwen-2.5-72b-instruct@deepinfra",
1215
+ "gemma-2-27b-it@together-ai",
1216
+ "gemma-2-9b-it@together-ai",
1217
+ "llama-3-70b-chat@together-ai",
1218
+ "llama-3-8b-chat@together-ai",
1219
+ "llama-3.1-405b-chat@together-ai",
1220
+ "llama-3.1-70b-chat@together-ai",
1221
+ "llama-3.1-8b-chat@together-ai",
1222
+ "llama-3.2-11b-chat@together-ai",
1223
+ "llama-3.2-3b-chat@together-ai",
1224
+ "llama-3.2-90b-chat@together-ai",
1225
+ "mistral-7b-instruct-v0.3@together-ai",
1226
+ "mixtral-8x22b-instruct-v0.1@together-ai",
1227
+ "mixtral-8x7b-instruct-v0.1@together-ai",
1228
+ "qwen-2-72b-instruct@together-ai",
1229
+ "qwen-2.5-72b-instruct@together-ai",
1230
+ "qwen-2.5-7b-instruct@together-ai",
1231
+ "gemma-2-9b-it@groq",
1232
+ "gemma-7b-it@groq",
1233
+ "llama-3-70b-chat@groq",
1234
+ "llama-3-8b-chat@groq",
1235
+ "llama-3.1-70b-chat@groq",
1236
+ "llama-3.1-8b-chat@groq",
1237
+ "llama-3.2-1b-chat@groq",
1238
+ "llama-3.2-3b-chat@groq",
1239
+ "mixtral-8x7b-instruct-v0.1@groq",
1240
+ "gemma-2-9b-it@lepton-ai",
1241
+ "llama-3-70b-chat@lepton-ai",
1242
+ "llama-3-8b-chat@lepton-ai",
1243
+ "llama-3.1-405b-chat@lepton-ai",
1244
+ "llama-3.1-70b-chat@lepton-ai",
1245
+ "llama-3.1-8b-chat@lepton-ai",
1246
+ "llama-3.2-3b-chat@lepton-ai",
1247
+ "mistral-7b-instruct-v0.3@lepton-ai",
1248
+ "mistral-nemo@lepton-ai",
1249
+ "mixtral-8x7b-instruct-v0.1@lepton-ai",
1250
+ "qwen-2-72b-instruct@lepton-ai",
1251
+ "gpt-4o-2024-05-13@azure-ai",
1252
+ "gpt-4o-2024-08-06@azure-ai",
1253
+ "gpt-4o-mini@azure-ai",
1254
+ "gpt-4o@azure-ai",
1255
+ "llama-3.1-405b-chat@azure-ai",
1256
+ "llama-3.1-70b-chat@azure-ai",
1257
+ "llama-3.1-8b-chat@azure-ai",
1258
+ "llama-3.2-11b-chat@azure-ai",
1259
+ "llama-3.2-90b-chat@azure-ai",
1260
+ "mistral-large@azure-ai",
1261
+ "mistral-nemo@azure-ai",
1262
+ "llama-3-70b-chat@fireworks-ai",
1263
+ "llama-3-8b-chat@fireworks-ai",
1264
+ "llama-3.1-405b-chat@fireworks-ai",
1265
+ "llama-3.1-70b-chat@fireworks-ai",
1266
+ "llama-3.1-8b-chat@fireworks-ai",
1267
+ "llama-3.2-11b-chat@fireworks-ai",
1268
+ "llama-3.2-1b-chat@fireworks-ai",
1269
+ "llama-3.2-3b-chat@fireworks-ai",
1270
+ "llama-3.2-90b-chat@fireworks-ai",
1271
+ "mistral-nemo@fireworks-ai",
1272
+ "mixtral-8x22b-instruct-v0.1@fireworks-ai",
1273
+ "mixtral-8x7b-instruct-v0.1@fireworks-ai",
1274
+ "qwen-2.5-14b-instruct@fireworks-ai",
1275
+ "qwen-2.5-72b-instruct@fireworks-ai",
1276
+ "qwen-2.5-7b-instruct@fireworks-ai",
1277
+ "llama-3-70b-chat@replicate",
1278
+ "llama-3-8b-chat@replicate",
1279
+ "llama-3.1-405b-chat@replicate",
1280
+ "mixtral-8x7b-instruct-v0.1@replicate",
1281
+ "llama-3.1-70b-chat@perplexity-ai",
1282
+ "llama-3.1-8b-chat@perplexity-ai",
1283
+ "ministral-3b@mistral-ai",
1284
+ "ministral-8b@mistral-ai",
1285
+ "mistral-7b-instruct-v0.3@mistral-ai",
1286
+ "mistral-large@mistral-ai",
1287
+ "mistral-nemo@mistral-ai",
1288
+ "mistral-small@mistral-ai",
1289
+ "mixtral-8x22b-instruct-v0.1@mistral-ai",
1290
+ "mixtral-8x7b-instruct-v0.1@mistral-ai",
1291
+ ]
1292
+ fetch: false
1293
+ titleConvo: true
1294
+ titleModel: "router@q:1|c:2.12e-01|t:5.00e-04|i:2.78e-04"
1295
+ dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
1296
+
1297
+ - name: "xai"
1298
+ apiKey: "user_provided"
1299
+ baseURL: "https://api.x.ai/v1"
1300
+ models:
1301
+ default: ["grok-beta"]
1302
+ fetch: false
1303
+ titleConvo: true
1304
+ titleMethod: "completion"
1305
+ titleModel: "grok-beta"
1306
+ summarize: false
1307
+ summaryModel: "grok-beta"
1308
+ forcePrompt: false
1309
+ modelDisplayLabel: "Grok"
1310
+
1311
+ # REVERSE PROXY
1312
+
1313
+ # ConvoAI
1314
+ - name: "ConvoAI"
1315
+ apiKey: "user_provided"
1316
+ baseURL: "https://api.convoai.tech/v1/"
1317
+ models:
1318
+ default: [
1319
+ "gpt-3.5-turbo",
1320
+ "gpt-3.5-turbo-1106",
1321
+ "gpt-3.5-turbo-0125",
1322
+ "gpt-3.5-turbo-16k",
1323
+ "gpt-4",
1324
+ "gpt-4-0613",
1325
+ "gpt-4-1106-preview",
1326
+ "gpt-4-0125-preview",
1327
+ "gpt-4-vision-preview",
1328
+ "gpt-4-turbo-2024-04-09",
1329
+ "convoai-pro",
1330
+ "mixtral-8x22b",
1331
+ "gpt-3.5-turbo-16k-0613",
1332
+ "gpt-3.5-turbo-0613",
1333
+ "gpt-4-32k",
1334
+ "gpt-4-1106-vision-preview",
1335
+ "claude-2",
1336
+ "claude-3-haiku",
1337
+ "claude-3-sonnet",
1338
+ "claude-3-opus",
1339
+ "claude-instant-1.2",
1340
+ "gemma-2b",
1341
+ "gemma-2b-it",
1342
+ "gemma-7b",
1343
+ "gemma-7b-it",
1344
+ "gemini-1.0-pro-001",
1345
+ "gemini-pro",
1346
+ "gemini-1.0-pro",
1347
+ "gemini-1.0-pro-latest",
1348
+ "gemini-1.5-pro",
1349
+ "gemini-1.5-pro-latest",
1350
+ "gemini-pro-vision",
1351
+ "mistral-7b",
1352
+ "mixtral-8x7b-Instruct-v0.1",
1353
+ "mistral-7b-instruct-v0.1",
1354
+ "mistral-7b-instruct-v0.2",
1355
+ "mixtral-8x7b",
1356
+ "dolphin-mixtral-8x7b",
1357
+ "mistral-tiny",
1358
+ "mistral-small",
1359
+ "mistral-medium",
1360
+ "mistral-large",
1361
+ "llama2-7b",
1362
+ "llama2-70b",
1363
+ "llama2-13b",
1364
+ "code-llama-7b",
1365
+ "code-llama-70b-instruct",
1366
+ "code-llama-13b",
1367
+ "code-llama-34b",
1368
+ "code-llama-34b-instruct",
1369
+ "openchat-3.5",
1370
+ "yi-34b-chat",
1371
+ "yi-34b-200k",
1372
+ "command-r-plus",
1373
+ "command-r-plus-4bit",
1374
+ "aya-101",
1375
+ "dbrx-instruct",
1376
+ ]
1377
+ fetch: false
1378
+ titleConvo: true
1379
+ titleModel: "gpt-3.5-turbo"
1380
+ summarize: false
1381
+ summaryModel: "gpt-3.5-turbo"
1382
+ forcePrompt: false
1383
+ modelDisplayLabel: "ConvoAI"
1384
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/ConvoAI.png"
1385
+
1386
+ # FreeGPT-4
1387
+ - name: "FreeGPT-4"
1388
+ apiKey: "user_provided"
1389
+ baseURL: "https://api.freegpt4.tech/v1/"
1390
+ models:
1391
+ default: [
1392
+ "gpt-3.5-turbo",
1393
+ "gpt-3.5-turbo-1106",
1394
+ "gpt-3.5-turbo-0125",
1395
+ "gpt-3.5-turbo-16k",
1396
+ "gpt-4",
1397
+ "gpt-4-1106-preview",
1398
+ "gpt-4-0125-preview",
1399
+ "claude",
1400
+ "gemini-pro"
1401
+ ]
1402
+ fetch: false
1403
+ titleConvo: true
1404
+ titleModel: "gpt-3.5-turbo"
1405
+ summarize: false
1406
+ summaryModel: "gpt-3.5-turbo"
1407
+ forcePrompt: false
1408
+ modelDisplayLabel: "FreeGPT-4"
1409
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/FreeGPT-4.png"
1410
+
1411
+ # Mandrill
1412
+ - name: "Mandrill"
1413
+ apiKey: "user_provided"
1414
+ baseURL: "https://api.mandrillai.tech/v1"
1415
+ models:
1416
+ default: [
1417
+ "gpt-4o",
1418
+ "gpt-4-turbo",
1419
+ "gpt-4-0125-preview",
1420
+ "gpt-4-1106-preview",
1421
+ "gpt-4",
1422
+ "gpt-3.5-turbo",
1423
+ "gpt-3.5-turbo-1106",
1424
+ "gpt-3.5-turbo-0613",
1425
+ "gpt-3.5-turbo-0301",
1426
+ "claude-3-opus",
1427
+ "gemini-pro",
1428
+ "gemini-pro-vision"
1429
+ ]
1430
+ fetch: false
1431
+ titleConvo: true
1432
+ titleModel: "gpt-3.5-turbo"
1433
+ summarize: false
1434
+ summaryModel: "gpt-3.5-turbo"
1435
+ forcePrompt: false
1436
+ modelDisplayLabel: "Mandrill"
1437
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Mandrill.png"
1438
+
1439
+ # NagaAI
1440
+ - name: "NagaAI"
1441
+ apiKey: "user_provided"
1442
+ baseURL: "https://api.naga.ac/v1"
1443
+ models:
1444
+ default: [
1445
+ "gpt-4",
1446
+ "gpt-4-vision-preview",
1447
+ "gpt-4-turbo-preview",
1448
+ "gpt-4-0125-preview",
1449
+ "gpt-4-1106-preview",
1450
+ "gpt-4-0613",
1451
+ "mistral-large",
1452
+ "mistral-large-2402",
1453
+ "mistral-next",
1454
+ "mistral-small",
1455
+ "mistral-small-2402",
1456
+ "gpt-3.5-turbo",
1457
+ "gpt-3.5-turbo-0125",
1458
+ "gpt-3.5-turbo-1106",
1459
+ "gpt-3.5-turbo-0613",
1460
+ "claude-3-opus",
1461
+ "claude-3-opus-20240229",
1462
+ "claude-3-sonnet",
1463
+ "claude-3-sonnet-20240229",
1464
+ "claude-3-haiku",
1465
+ "claude-3-haiku-20240307",
1466
+ "claude-2.1",
1467
+ "claude-instant",
1468
+ "gemini-pro",
1469
+ "gemini-pro-vision",
1470
+ "llama-2-70b-chat",
1471
+ "llama-2-13b-chat",
1472
+ "llama-2-7b-chat",
1473
+ "mistral-7b",
1474
+ "mixtral-8x7b"
1475
+ ]
1476
+ fetch: false
1477
+ titleConvo: true
1478
+ titleModel: "gpt-3.5-turbo"
1479
+ summarize: false
1480
+ summaryModel: "gpt-3.5-turbo"
1481
+ forcePrompt: false
1482
+ modelDisplayLabel: "NagaAI"
1483
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/NagaAI.png"
1484
+
1485
+ # Pawan
1486
+ - name: "Pawan"
1487
+ apiKey: "user_provided"
1488
+ baseURL: "https://api.pawan.krd/pai-001-rp/v1"
1489
+ models:
1490
+ default: [
1491
+ "pai-001-rp"
1492
+ ]
1493
+ fetch: false
1494
+ titleConvo: true
1495
+ titleModel: "pai-001-rp"
1496
+ summarize: false
1497
+ summaryModel: "pai-001-rp"
1498
+ forcePrompt: false
1499
+ modelDisplayLabel: "Pawan"
1500
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Pawan.png"
1501
+
1502
+ # Pawan light
1503
+ - name: "Pawan light"
1504
+ apiKey: "user_provided"
1505
+ baseURL: "https://api.pawan.krd/pai-001-light-rp/v1"
1506
+ models:
1507
+ default: [
1508
+ "pai-001-light-rp"
1509
+ ]
1510
+ fetch: false
1511
+ titleConvo: true
1512
+ titleModel: "pai-001-light-rp"
1513
+ summarize: false
1514
+ summaryModel: "pai-001-light-rp"
1515
+ forcePrompt: false
1516
+ modelDisplayLabel: "Pawan light"
1517
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/Pawan.png"
1518
+
1519
+ #Shard
1520
+ - name: "Shard"
1521
+ apiKey: "user_provided"
1522
+ baseURL: "https://api.shard-ai.xyz/v1/"
1523
+ models:
1524
+ default: [
1525
+ 'gpt-3.5-turbo-0301',
1526
+ 'gpt-3.5-turbo-0613',
1527
+ 'gpt-3.5-turbo',
1528
+ 'gpt-3.5-turbo-1106',
1529
+ 'gpt-3.5-turbo-0125',
1530
+ 'gpt-3.5-turbo-instruct',
1531
+ 'gpt-3.5-turbo-instruct-0914',
1532
+ 'gpt-3.5-turbo-16k',
1533
+ 'gpt-4-0613',
1534
+ 'gpt-4',
1535
+ 'gpt-4-turbo',
1536
+ 'gpt-4-turbo-2024-04-09',
1537
+ 'gpt-4-1106-preview',
1538
+ 'gpt-4-0125-preview',
1539
+ 'gpt-4-turbo-preview',
1540
+ 'gpt-4-vision-preview',
1541
+ 'command-r',
1542
+ 'command-r-plus',
1543
+ 'command-light-nightly',
1544
+ 'command',
1545
+ 'command-light',
1546
+ 'c4ai-aya',
1547
+ 'claude',
1548
+ 'claude-1.2',
1549
+ 'claude-2',
1550
+ 'claude-2.1',
1551
+ 'claude-3-haiku',
1552
+ 'claude-3-sonnet',
1553
+ 'claude-3-opus',
1554
+ 'claude-instant-v1',
1555
+ 'claude-instant-v1-100k',
1556
+ 'palm-2',
1557
+ 'dbrx-instruct',
1558
+ 'gemini-pro',
1559
+ 'gemini-1.5-pro',
1560
+ 'mixtral-8x7b-instruct',
1561
+ 'mixtral-8x7b',
1562
+ 'mixtral-8x22b',
1563
+ 'mixtral-8x22b-finetuned',
1564
+ 'zephyr-8x22b',
1565
+ 'zephyr-7b',
1566
+ 'mistral-tiny',
1567
+ 'mistral-small',
1568
+ 'mistral-medium',
1569
+ 'mistral-large',
1570
+ 'mistral-next',
1571
+ 'mistral-7b-instruct',
1572
+ 'yi-34b',
1573
+ 'gemma-2b',
1574
+ 'gemma-7b', 'gemma-1.1-7b',
1575
+ 'llamaguard-7b',
1576
+ 'llama-2-7b',
1577
+ 'llama-2-13b',
1578
+ 'llama-2-70b',
1579
+ 'llama-3-8b',
1580
+ 'llama-3-70b',
1581
+ 'openchat-3.5',
1582
+ 'phind-codellama-34b',
1583
+ 'llava-1.5',
1584
+ 'llava-1.6-34b',
1585
+ 'llava-1.6-7b',
1586
+ 'lzlv-70b',
1587
+ 'airoboros-70b',
1588
+ 'airoboros-70b-gpt4',
1589
+ 'cinematika-7b',
1590
+ 'toppy-7b',
1591
+ 'codellama-7b-instruct',
1592
+ 'codellama-13b-instruct',
1593
+ 'codellama-34b-instruct',
1594
+ 'codellama-70b-instruct',
1595
+ 'dolphine-mixtral',
1596
+ 'pi', 'mythomax-l2-13b',
1597
+ 'nous-capybara-7b',
1598
+ 'sonar-small-chat',
1599
+ 'sonar-medium-chat',
1600
+ 'sonar-small-online',
1601
+ 'sonar-medium-online',
1602
+ 'perplexity-related',
1603
+ 'hermes-2',
1604
+ 'hermes-2-pro',
1605
+ 'qwen-1.5-32b-chat'
1606
+ ]
1607
+ fetch: false
1608
+ titleConvo: true
1609
+ titleModel: "gpt-3.5-turbo"
1610
+ modelDisplayLabel: "Shard"
1611
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/shard.png"
1612
+
1613
+ # Zukijourney
1614
+ - name: "Zukijourney"
1615
+ apiKey: "user_provided"
1616
+ baseURL: "https://zukijourney.xyzbot.net/unf/"
1617
+ models:
1618
+ default: [
1619
+ "gpt-3.5-turbo",
1620
+ "gpt-3.5-turbo-1106",
1621
+ "gpt-3.5-turbo-0125",
1622
+ "gpt-3.5-turbo-instruct",
1623
+ "gpt-3.5-turbo-16k",
1624
+ "gpt-4",
1625
+ "gpt-4o",
1626
+ "gpt-4-32k",
1627
+ "gpt-4-1106-preview",
1628
+ "gpt-4-0125-preview",
1629
+ "gpt-4-vision-preview",
1630
+ "claude",
1631
+ "claude-2",
1632
+ "claude-2.1",
1633
+ "claude-instant-v1",
1634
+ "claude-instant-v1-100k",
1635
+ "claude-3-opus",
1636
+ "claude-3-sonnet",
1637
+ "claude-3.5-sonnet",
1638
+ "pplx-70b-online",
1639
+ "palm-2",
1640
+ "bard",
1641
+ "gemini-pro",
1642
+ "gemini-pro-vision",
1643
+ "mixtral-8x7b",
1644
+ "mixtral-8x7b-instruct",
1645
+ "mistral-tiny",
1646
+ "mistral-small",
1647
+ "mistral-medium",
1648
+ "mistral-7b-instruct",
1649
+ "codellama-7b-instruct",
1650
+ "llama-2-7b",
1651
+ "llama-2-70b-chat",
1652
+ "mythomax-l2-13b-8k",
1653
+ "sheep-duck-llama",
1654
+ "goliath-120b",
1655
+ "nous-llama",
1656
+ "yi-34b",
1657
+ "openchat",
1658
+ "solar10-7b",
1659
+ "pi"
1660
+ ]
1661
+ fetch: true
1662
+ titleConvo: true
1663
+ titleModel: "gpt-3.5-turbo"
1664
+ summarize: false
1665
+ summaryModel: "gpt-3.5-turbo"
1666
+ forcePrompt: false
1667
+ dropParams: ["stream"]
1668
+ iconURL: "https://raw.githubusercontent.com/LibreChat-AI/librechat-config-yaml/main/icons/zuki.png"
1669
  - name: "yi-lightning"
1670
+ apiKey: "user_provided"
1671
+ baseURL: "https://wispy-poetry-d145.baochao0912.workers.dev/web/v1"
1672
+ models:
1673
  default: ["yi-lightning"]
1674
+ titleConvo: false
1675
+ titleModel: "yi-lightning"
1676
+ modelDisplayLabel: "yi-lightning"
1677
  # addParams:
1678
  # safe_prompt: true # Mistral specific value for moderating messages
1679
  # NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
1680
+