Create librechat.yaml
Browse files- librechat.yaml +164 -0
librechat.yaml
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# For more information, see the Configuration Guide:
|
2 |
+
# https://www.librechat.ai/docs/configuration/librechat_yaml
|
3 |
+
|
4 |
+
# Configuration version (required)
|
5 |
+
version: 1.1.5
|
6 |
+
|
7 |
+
# Cache settings: Set to true to enable caching
|
8 |
+
cache: true
|
9 |
+
|
10 |
+
# Custom interface configuration
|
11 |
+
interface:
|
12 |
+
# Privacy policy settings
|
13 |
+
privacyPolicy:
|
14 |
+
externalUrl: 'https://librechat.ai/privacy-policy'
|
15 |
+
openNewTab: true
|
16 |
+
|
17 |
+
# Terms of service
|
18 |
+
termsOfService:
|
19 |
+
externalUrl: 'https://librechat.ai/tos'
|
20 |
+
openNewTab: true
|
21 |
+
|
22 |
+
# Example Registration Object Structure (optional)
|
23 |
+
registration:
|
24 |
+
socialLogins: ['github', 'google', 'discord', 'openid', 'facebook']
|
25 |
+
# allowedDomains:
|
26 |
+
# - "gmail.com"
|
27 |
+
|
28 |
+
# speech:
|
29 |
+
# tts:
|
30 |
+
# openai:
|
31 |
+
# url: ''
|
32 |
+
# apiKey: '${TTS_API_KEY}'
|
33 |
+
# model: ''
|
34 |
+
# voice: ''
|
35 |
+
#
|
36 |
+
# stt:
|
37 |
+
# openai:
|
38 |
+
# url: ''
|
39 |
+
# apiKey: '${STT_API_KEY}'
|
40 |
+
# model: ''
|
41 |
+
|
42 |
+
# rateLimits:
|
43 |
+
# fileUploads:
|
44 |
+
# ipMax: 100
|
45 |
+
# ipWindowInMinutes: 60 # Rate limit window for file uploads per IP
|
46 |
+
# userMax: 50
|
47 |
+
# userWindowInMinutes: 60 # Rate limit window for file uploads per user
|
48 |
+
# conversationsImport:
|
49 |
+
# ipMax: 100
|
50 |
+
# ipWindowInMinutes: 60 # Rate limit window for conversation imports per IP
|
51 |
+
# userMax: 50
|
52 |
+
# userWindowInMinutes: 60 # Rate limit window for conversation imports per user
|
53 |
+
|
54 |
+
# Definition of custom endpoints
|
55 |
+
endpoints:
|
56 |
+
# assistants:
|
57 |
+
# disableBuilder: false # Disable Assistants Builder Interface by setting to `true`
|
58 |
+
# pollIntervalMs: 3000 # Polling interval for checking assistant updates
|
59 |
+
# timeoutMs: 180000 # Timeout for assistant operations
|
60 |
+
# # Should only be one or the other, either `supportedIds` or `excludedIds`
|
61 |
+
# supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"]
|
62 |
+
# # excludedIds: ["asst_excludedAssistantId"]
|
63 |
+
# Only show assistants that the user created or that were created externally (e.g. in Assistants playground).
|
64 |
+
# # privateAssistants: false # Does not work with `supportedIds` or `excludedIds`
|
65 |
+
# # (optional) Models that support retrieval, will default to latest known OpenAI models that support the feature
|
66 |
+
# retrievalModels: ["gpt-4-turbo-preview"]
|
67 |
+
# # (optional) Assistant Capabilities available to all users. Omit the ones you wish to exclude. Defaults to list below.
|
68 |
+
# capabilities: ["code_interpreter", "retrieval", "actions", "tools", "image_vision"]
|
69 |
+
custom:
|
70 |
+
# Groq Example
|
71 |
+
- name: 'groq'
|
72 |
+
apiKey: '${GROQ_API_KEY}'
|
73 |
+
baseURL: 'https://mxrkai-api.hf.space/v1/'
|
74 |
+
models:
|
75 |
+
default:
|
76 |
+
[
|
77 |
+
'gpt-3.5-turbo',
|
78 |
+
'gpt-4o-mini',
|
79 |
+
]
|
80 |
+
fetch: false
|
81 |
+
titleConvo: true
|
82 |
+
titleModel: 'gpt-3.5-turbo'
|
83 |
+
modelDisplayLabel: 'groq'
|
84 |
+
|
85 |
+
# Mistral AI Example
|
86 |
+
- name: 'Mistral' # Unique name for the endpoint
|
87 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
88 |
+
# recommended environment variables:
|
89 |
+
apiKey: '${MISTRAL_API_KEY}'
|
90 |
+
baseURL: 'https://api.mistral.ai/v1'
|
91 |
+
|
92 |
+
# Models configuration
|
93 |
+
models:
|
94 |
+
# List of default models to use. At least one value is required.
|
95 |
+
default: ['mistral-tiny', 'mistral-small', 'mistral-medium']
|
96 |
+
# Fetch option: Set to true to fetch models from API.
|
97 |
+
fetch: true # Defaults to false.
|
98 |
+
|
99 |
+
# Optional configurations
|
100 |
+
|
101 |
+
# Title Conversation setting
|
102 |
+
titleConvo: true # Set to true to enable title conversation
|
103 |
+
|
104 |
+
# Title Method: Choose between "completion" or "functions".
|
105 |
+
# titleMethod: "completion" # Defaults to "completion" if omitted.
|
106 |
+
|
107 |
+
# Title Model: Specify the model to use for titles.
|
108 |
+
titleModel: 'mistral-tiny' # Defaults to "gpt-3.5-turbo" if omitted.
|
109 |
+
|
110 |
+
# Summarize setting: Set to true to enable summarization.
|
111 |
+
# summarize: false
|
112 |
+
|
113 |
+
# Summary Model: Specify the model to use if summarization is enabled.
|
114 |
+
# summaryModel: "mistral-tiny" # Defaults to "gpt-3.5-turbo" if omitted.
|
115 |
+
|
116 |
+
# Force Prompt setting: If true, sends a `prompt` parameter instead of `messages`.
|
117 |
+
# forcePrompt: false
|
118 |
+
|
119 |
+
# The label displayed for the AI model in messages.
|
120 |
+
modelDisplayLabel: 'Mistral' # Default is "AI" when not set.
|
121 |
+
|
122 |
+
# Add additional parameters to the request. Default params will be overwritten.
|
123 |
+
# addParams:
|
124 |
+
# safe_prompt: true # This field is specific to Mistral AI: https://docs.mistral.ai/api/
|
125 |
+
|
126 |
+
# Drop Default params parameters from the request. See default params in guide linked below.
|
127 |
+
# NOTE: For Mistral, it is necessary to drop the following parameters or you will encounter a 422 Error:
|
128 |
+
dropParams: ['stop', 'user', 'frequency_penalty', 'presence_penalty']
|
129 |
+
|
130 |
+
# OpenRouter Example
|
131 |
+
- name: 'OpenRouter'
|
132 |
+
# For `apiKey` and `baseURL`, you can use environment variables that you define.
|
133 |
+
# recommended environment variables:
|
134 |
+
# Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well.
|
135 |
+
apiKey: '${OPENROUTER_KEY}'
|
136 |
+
baseURL: 'https://openrouter.ai/api/v1'
|
137 |
+
models:
|
138 |
+
default: ['meta-llama/llama-3-70b-instruct']
|
139 |
+
fetch: true
|
140 |
+
titleConvo: true
|
141 |
+
titleModel: 'meta-llama/llama-3-70b-instruct'
|
142 |
+
# Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens.
|
143 |
+
dropParams: ['stop']
|
144 |
+
modelDisplayLabel: 'OpenRouter'
|
145 |
+
# fileConfig:
|
146 |
+
# endpoints:
|
147 |
+
# assistants:
|
148 |
+
# fileLimit: 5
|
149 |
+
# fileSizeLimit: 10 # Maximum size for an individual file in MB
|
150 |
+
# totalSizeLimit: 50 # Maximum total size for all files in a single request in MB
|
151 |
+
# supportedMimeTypes:
|
152 |
+
# - "image/.*"
|
153 |
+
# - "application/pdf"
|
154 |
+
# openAI:
|
155 |
+
# disabled: true # Disables file uploading to the OpenAI endpoint
|
156 |
+
# default:
|
157 |
+
# totalSizeLimit: 20
|
158 |
+
# YourCustomEndpointName:
|
159 |
+
# fileLimit: 2
|
160 |
+
# fileSizeLimit: 5
|
161 |
+
# serverFileSizeLimit: 100 # Global server file size limit in MB
|
162 |
+
# avatarSizeLimit: 2 # Limit for user avatar image size in MB
|
163 |
+
# See the Custom Configuration Guide for more information on Assistants Config:
|
164 |
+
# https://www.librechat.ai/docs/configuration/librechat_yaml/object_structure/assistants_endpoint
|