qitongwei commited on
Commit
6fdb66a
·
verified ·
1 Parent(s): 9da5a76

Update librechat.yaml

Browse files
Files changed (1) hide show
  1. librechat.yaml +15 -15
librechat.yaml CHANGED
@@ -1,5 +1,5 @@
1
  # Configuration version (required)
2
- version: 1.2.1
3
 
4
  # Cache settings: Set to true to enable caching
5
  cache: true
@@ -85,20 +85,20 @@ endpoints:
85
  iconURL: https://cdn-icons-png.flaticon.com/128/1240/1240979.png
86
 
87
  - name: "Gemini"
88
- label: "Gemini"
89
- default: false
90
- description: "Gemini 2.5 Pro"#iconURL: "https://example.com/icon.png"
91
- preset:
92
- endpoint: "google"
93
- model: "gemini-2.5-pro-exp-03-25"
94
- maxContextTokens: 35000 # Maximum context tokens
95
- max_tokens: 16000 # Maximum output tokens
96
- temperature: 1
97
- promptCache: true
98
- modelLabel: "Gemini"
99
- greeting: |
100
- Gemini 2.5 Pro access.
101
- promptPrefix: some_cool_prompt
102
 
103
  # See the Custom Configuration Guide for more information:
104
  # https://docs.librechat.ai/install/configuration/custom_config.html
 
1
  # Configuration version (required)
2
+ version: 1.2.4
3
 
4
  # Cache settings: Set to true to enable caching
5
  cache: true
 
85
  iconURL: https://cdn-icons-png.flaticon.com/128/1240/1240979.png
86
 
87
  - name: "Gemini"
88
+ label: "Gemini"
89
+ default: false
90
+ description: "Gemini 2.5 Pro"#iconURL: "https://example.com/icon.png"
91
+ preset:
92
+ endpoint: "google"
93
+ model: "gemini-2.5-pro-exp-03-25"
94
+ maxContextTokens: 35000 # Maximum context tokens
95
+ max_tokens: 16000 # Maximum output tokens
96
+ temperature: 1
97
+ promptCache: true
98
+ modelLabel: "Gemini"
99
+ greeting: |
100
+ Gemini 2.5 Pro access.
101
+ promptPrefix: some_cool_prompt
102
 
103
  # See the Custom Configuration Guide for more information:
104
  # https://docs.librechat.ai/install/configuration/custom_config.html