qitongwei commited on
Commit
edf9e4c
·
verified ·
1 Parent(s): 87af180

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +20 -16
librechat.yaml CHANGED
@@ -83,22 +83,26 @@ endpoints:
83
  forcePrompt: false
84
  modelDisplayLabel: "URL2"
85
  iconURL: https://cdn-icons-png.flaticon.com/128/1240/1240979.png
86
-
87
- - name: "Gemini"
88
- label: "Gemini"
89
- default: false
90
- description: "Gemini 2.5 Pro"#iconURL: "https://example.com/icon.png"
91
- preset:
92
- endpoint: "google"
93
- model: "gemini-2.5-pro-exp-03-25"
94
- maxContextTokens: 35000 # Maximum context tokens
95
- max_tokens: 16000 # Maximum output tokens
96
- temperature: 1
97
- promptCache: true
98
- modelLabel: "Gemini"
99
- greeting: |
100
- Gemini 2.5 Pro access.
101
- promptPrefix: some_cool_prompt
 
 
 
 
102
 
103
  # See the Custom Configuration Guide for more information:
104
  # https://docs.librechat.ai/install/configuration/custom_config.html
 
83
  forcePrompt: false
84
  modelDisplayLabel: "URL2"
85
  iconURL: https://cdn-icons-png.flaticon.com/128/1240/1240979.png
86
+
87
+ modelSpecs:
88
+ # ... other modelSpecs fields
89
+ addedEndpoints:
90
+ - google
91
+ - name: "Gemini"
92
+ label: "Gemini"
93
+ default: false
94
+ description: "Gemini 2.5 Pro"#iconURL: "https://example.com/icon.png"
95
+ preset:
96
+ endpoint: "google"
97
+ model: "gemini-2.5-pro-exp-03-25"
98
+ maxContextTokens: 35000 # Maximum context tokens
99
+ max_tokens: 16000 # Maximum output tokens
100
+ temperature: 1
101
+ promptCache: true
102
+ modelLabel: "Gemini"
103
+ greeting: |
104
+ Gemini 2.5 Pro access.
105
+ promptPrefix: some_cool_prompt
106
 
107
  # See the Custom Configuration Guide for more information:
108
  # https://docs.librechat.ai/install/configuration/custom_config.html