Update librechat.yaml
Browse files- librechat.yaml +43 -41
librechat.yaml
CHANGED
@@ -1,49 +1,51 @@
|
|
|
|
1 |
cache: true
|
2 |
|
|
|
3 |
endpoints:
|
4 |
-
|
5 |
-
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
default: ["gpt-4o", "gpt-4o-mini"]
|
11 |
fetch: true
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
default: ["claude-3-opus",
|
24 |
"claude-3-sonnet",
|
25 |
"claude-3-haiku"]
|
26 |
fetch: true
|
27 |
-
|
28 |
-
|
29 |
-
|
30 |
|
31 |
|
32 |
-
|
33 |
-
|
34 |
-
|
35 |
-
|
36 |
default: [ "gemini-pro-1.5",
|
37 |
"gemini-pro","gemini-1.5-flash-latest"]
|
38 |
fetch: true
|
39 |
-
|
40 |
-
|
41 |
-
|
42 |
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
47 |
default: [
|
48 |
"codellama/CodeLlama-34b-Instruct-hf",
|
49 |
"google/gemma-1.1-2b-it",
|
@@ -58,17 +60,17 @@ endpoints:
|
|
58 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
59 |
]
|
60 |
fetch: true
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"]
|
71 |
fetch: false
|
72 |
-
|
73 |
-
|
74 |
-
|
|
|
1 |
+
# 缓存设置:设置为true以启用缓存
|
2 |
cache: true
|
3 |
|
4 |
+
# 自定义端点的定义
|
5 |
endpoints:
|
6 |
+
custom:
|
7 |
+
|
8 |
+
- name: "openAI"
|
9 |
+
apiKey: "${OPENAI_API_KEY}"
|
10 |
+
baseURL: "${OPENAI_REVERSE_PROXY}"
|
11 |
+
models:
|
12 |
default: ["gpt-4o", "gpt-4o-mini"]
|
13 |
fetch: true
|
14 |
+
titleConvo: true
|
15 |
+
titleModel: "gpt-4o-mini"
|
16 |
+
summarize: false
|
17 |
+
summaryModel: "gpt-4o-mini"
|
18 |
+
modelDisplayLabel: "ChatGPT"
|
19 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
20 |
|
21 |
+
- name: "assistants"
|
22 |
+
apiKey: "${ANTHROPIC_API_KEY}"
|
23 |
+
baseURL: "${ANTHROPIC_REVERSE_PROXY}"
|
24 |
+
models:
|
25 |
default: ["claude-3-opus",
|
26 |
"claude-3-sonnet",
|
27 |
"claude-3-haiku"]
|
28 |
fetch: true
|
29 |
+
titleConvo: true
|
30 |
+
modelDisplayLabel: "Claude"
|
31 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
32 |
|
33 |
|
34 |
+
- name: "google"
|
35 |
+
apiKey: "${GOOGLE_KEY}"
|
36 |
+
baseURL: "${GOOGLE_REVERSE_PROXY}"
|
37 |
+
models:
|
38 |
default: [ "gemini-pro-1.5",
|
39 |
"gemini-pro","gemini-1.5-flash-latest"]
|
40 |
fetch: true
|
41 |
+
titleConvo: true
|
42 |
+
modelDisplayLabel: "Gemini"
|
43 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"]
|
44 |
|
45 |
+
- name: 'HuggingFace'
|
46 |
+
apiKey: '${HUGGINGFACE_TOKEN}'
|
47 |
+
baseURL: 'https://api-inference.huggingface.co/v1'
|
48 |
+
models:
|
49 |
default: [
|
50 |
"codellama/CodeLlama-34b-Instruct-hf",
|
51 |
"google/gemma-1.1-2b-it",
|
|
|
60 |
"NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO",
|
61 |
]
|
62 |
fetch: true
|
63 |
+
titleConvo: true
|
64 |
+
titleModel: "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO"
|
65 |
+
dropParams: ["top_p"]
|
66 |
+
modelDisplayLabel: "HuggingFace"
|
67 |
|
68 |
+
- name: "cohere"
|
69 |
+
apiKey: "${COHERE_API_KEY}"
|
70 |
+
baseURL: "https://api.cohere.ai/v1"
|
71 |
+
models:
|
72 |
default: ["command-r","command-r-plus","command-light","command-light-nightly","command","command-nightly"]
|
73 |
fetch: false
|
74 |
+
modelDisplayLabel: "cohere"
|
75 |
+
titleModel: "command"
|
76 |
+
dropParams: ["stop", "user", "frequency_penalty", "presence_penalty", "temperature", "top_p"]
|