Delete config.toml
Browse files- config.toml +0 -168
config.toml
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
[General]
|
2 |
-
# If you deploy this service on a remote server,
|
3 |
-
# it is recommended to set mode to "remote", maybe one day this program will be used
|
4 |
-
# and you **should** set host to "0.0.0.0", or you can't access the service from the outside
|
5 |
-
mode = "remote"
|
6 |
-
port = 3000
|
7 |
-
host = "0.0.0.0"
|
8 |
-
# If there are some problems, you can set debug & logger to true
|
9 |
-
debug = false
|
10 |
-
# Fastify Logger
|
11 |
-
logger = false
|
12 |
-
# If you want the service to listen to the changes of the configuration file and update automatically,
|
13 |
-
# you can set watch to true
|
14 |
-
watch = false
|
15 |
-
|
16 |
-
[General.Https]
|
17 |
-
# If you want to use HTTPS, you can set the following configuration
|
18 |
-
# enabled = true
|
19 |
-
# # You can specify the host to the certificate file (auto generate mode)
|
20 |
-
# host = '192.168.110.254'
|
21 |
-
# # You can also specify the path to your existing certificate file
|
22 |
-
# key = "path
|
23 |
-
# cert = "path"
|
24 |
-
# ca = "path"
|
25 |
-
|
26 |
-
[AI]
|
27 |
-
default = "openai"
|
28 |
-
# If the parameter is not set in the specific AI service,
|
29 |
-
# this value will be used
|
30 |
-
# For example:
|
31 |
-
# If I don't set the temperature parameter in AI.OpenAI, this value will be used
|
32 |
-
# But if I set the temperature parameter in AI.Gemini, the temperature parameter in AI.Gemini will be used
|
33 |
-
# temperature = 0.5
|
34 |
-
# max_tokens = 100
|
35 |
-
|
36 |
-
[AI.Functions]
|
37 |
-
# You should enter plugin IDs that you want to enable here.
|
38 |
-
# The following plugins are supported:
|
39 |
-
# serp, web_search
|
40 |
-
# You can go to src/features/ai/functionsCall to see more details.
|
41 |
-
plugins = [
|
42 |
-
'serp',
|
43 |
-
'web_search'
|
44 |
-
]
|
45 |
-
|
46 |
-
[AI.Functions.Serp]
|
47 |
-
apiHub_api_key = "APY04V154epVA7X87TSe6OmYnPTMr58CyTExbPzUAnaFg7t2YcY46YqRMfNPDZAJQ0V5CQbuGilK6r"
|
48 |
-
# tavily_ai_api_key = "" # Tavily currently doesn't support.
|
49 |
-
|
50 |
-
[AI.OpenAI]
|
51 |
-
# If the default model is not set,
|
52 |
-
# or...
|
53 |
-
# if the default model is set,
|
54 |
-
# but the specific AI service's model is not set,
|
55 |
-
# the default model written in the code will be used
|
56 |
-
# default = "gpt-3.5-turbo-16k.legacy"
|
57 |
-
# You can edit the base_url if you want to use the custom OpenAI server
|
58 |
-
base_url = "https://api.smgc.cc/v1"
|
59 |
-
api_key = "sk-uFCx2OYWLKJaHtfy7b0cBc517f3c4cF5A1279c855f9f6aE6"
|
60 |
-
|
61 |
-
# if you'd like to use azure openai
|
62 |
-
# is_azure = true
|
63 |
-
# base_url = "https://<resource_name>.openai.azure.com"
|
64 |
-
# azure_deployment_name = "YOUR_AZURE_DEPLOYMENT_ID" # if not provided, use req.body.model
|
65 |
-
|
66 |
-
# If the parameter is set in the specific AI service
|
67 |
-
# this value will be used, and it has the highest priority
|
68 |
-
# temperature = 0.5
|
69 |
-
# max_tokens = 100
|
70 |
-
|
71 |
-
# Custom OpenAI Model
|
72 |
-
# You can add your own OpenAI model just like the following:
|
73 |
-
# # [NOTICE] You shouldn't use the dot in the model name. It will be parsed as a section
|
74 |
-
[AI.OpenAI.Models.GPT4P]
|
75 |
-
id = "openai-gpt-4-preview" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
|
76 |
-
model = "gpt-4-preview" # if it's not provided, it will be generated from the Object key.
|
77 |
-
name = "GPT-4 Preview" # if it's not provided, it will be generated from the Object key.
|
78 |
-
description = "GPT-4 Preview from OpenAI has a big context window that fits hundreds of pages of text, making it a great choice for workloads that involve longer prompts.\n"
|
79 |
-
# speed = 3 # if it's not provided, the default value will be used.
|
80 |
-
# intelligence = 3 # if it's not provided, the default value will be used.
|
81 |
-
# context = 8 # if it's not provided, the default value will be used.
|
82 |
-
# status = "beta"
|
83 |
-
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
|
84 |
-
# image_generation = true # Not supported yet
|
85 |
-
# web_search = true # The premise is that the model needs to support Function Call.
|
86 |
-
|
87 |
-
[AI.OpenAI.Models.GPT4T]
|
88 |
-
id = "openai-gpt-4-turbo" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
|
89 |
-
model = "gpt-4-turbo" # if it's not provided, it will be generated from the Object key.
|
90 |
-
name = "GPT-4 Turbo" # if it's not provided, it will be generated from the Object key.
|
91 |
-
description = "GPT-4 Turbo from OpenAI has a big context window that fits hundreds of pages of text, making it a great choice for workloads that involve longer prompts.\n"
|
92 |
-
# speed = 3 # if it's not provided, the default value will be used.
|
93 |
-
# intelligence = 3 # if it's not provided, the default value will be used.
|
94 |
-
# context = 8 # if it's not provided, the default value will be used.
|
95 |
-
# status = "beta"
|
96 |
-
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
|
97 |
-
# image_generation = true # Not supported yet
|
98 |
-
# web_search = true # The premise is that the model needs to support Function Call.
|
99 |
-
|
100 |
-
[AI.OpenAI.Models.GPT4o]
|
101 |
-
id = "openai-gpt-4o" # if it's not provided, it will be generated from the Object key. For example, here it will be "gpt4"
|
102 |
-
model = "gpt-4o" # if it's not provided, it will be generated from the Object key.
|
103 |
-
name = "GPT-4o" # if it's not provided, it will be generated from the Object key.
|
104 |
-
description = "GPT-4o is the most advanced and fastest model from OpenAI, making it a great choice for complex everyday problems and deeper conversations.\n"
|
105 |
-
# speed = 3 # if it's not provided, the default value will be used.
|
106 |
-
# intelligence = 3 # if it's not provided, the default value will be used.
|
107 |
-
# context = 8 # if it's not provided, the default value will be used.
|
108 |
-
# status = "beta"
|
109 |
-
# [AI.OpenAI.Models.GPT4.Capabilities] # Features control
|
110 |
-
# image_generation = true # Not supported yet
|
111 |
-
# web_search = true # The premise is that the model needs to support Function Call.
|
112 |
-
|
113 |
-
[AI.Groq]
|
114 |
-
# refresh_token = '<your refresh token>'
|
115 |
-
# temperature = 0.5
|
116 |
-
# max_tokens = 100
|
117 |
-
|
118 |
-
[AI.Gemini]
|
119 |
-
api_key = ""
|
120 |
-
# temperature = 0.5
|
121 |
-
# max_tokens = 100
|
122 |
-
|
123 |
-
[AI.Cohere]
|
124 |
-
email = ""
|
125 |
-
password = ""
|
126 |
-
|
127 |
-
[Translate]
|
128 |
-
# You can choose the default translation service from the following:
|
129 |
-
# shortcut, deeplx, ai, libretranslate
|
130 |
-
# Default: deeplx
|
131 |
-
default = "ai"
|
132 |
-
|
133 |
-
# Maybe one day there will be a [Translate.Shortcuts] configuration here...
|
134 |
-
# [Translate.Shortcuts]
|
135 |
-
|
136 |
-
[Translate.DeepLX]
|
137 |
-
# proxy_endpoint = ""
|
138 |
-
# access_token = ""
|
139 |
-
|
140 |
-
[Translate.AI]
|
141 |
-
# If the default model is not set,
|
142 |
-
# or...
|
143 |
-
# if the default model is set,
|
144 |
-
# but the specific AI service's model is not set,
|
145 |
-
# the default model written in the code will be used
|
146 |
-
# Default: openai
|
147 |
-
default = "openai"
|
148 |
-
# The model used by the AI service
|
149 |
-
# (only effective for openai, groq)
|
150 |
-
# Default: gpt-3.5-turbo
|
151 |
-
model = "gpt-3.5-turbo"
|
152 |
-
|
153 |
-
[Translate.LibreTranslate]
|
154 |
-
base_url = "https://libretranslate.com"
|
155 |
-
# You can choose the type from the following:
|
156 |
-
# reserve, api
|
157 |
-
# Default: reserve
|
158 |
-
type = "reserve"
|
159 |
-
# If you choose api, you should set the api_key
|
160 |
-
api_key = ""
|
161 |
-
# The following is for the legacy configuration
|
162 |
-
# They will be removed in the future
|
163 |
-
|
164 |
-
[Sync]
|
165 |
-
# The location of the sync file, default is "icloud", and you can also set it to "local"
|
166 |
-
# # The iCloud storage solution is only effective when deployed on the macOS client
|
167 |
-
# # **iCloud storage solution** is the *default* option in *macOS* deployments
|
168 |
-
type = "icloud" # icloud / local
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|