Spaces:
Runtime error
Runtime error
Create config.toml
Browse files- config.toml +45 -0
config.toml
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[core]
|
2 |
+
workspace_base = "C:/Users/rog_j/OneDrive/Pulpit/GIT/react/ui-dashboard/workspace/frontend"
|
3 |
+
default_agent = "CodeActAgent"
|
4 |
+
|
5 |
+
[llm]
|
6 |
+
custom_llm_provider = "huggingface"
|
7 |
+
|
8 |
+
|
9 |
+
#model = "huggingface/Qwen/Qwen2.5-Coder-32B-Instruct"
|
10 |
+
#base_url = "https://api-inference.huggingface.co/models/Qwen/Qwen2.5-Coder-32B/v1/chat/completions"
|
11 |
+
|
12 |
+
#model = "deepseek-ai/DeepSeek-Coder-V2-Instruct"
|
13 |
+
#base_url = "https://api-inference.huggingface.co/models/deepseek-ai/DeepSeek-Coder-V2-Instruct/v1/chat/completions"
|
14 |
+
|
15 |
+
#model = "hf.space/qwen2.5-coder-32b-instruct."
|
16 |
+
#base_url = "https://ojciectadeusz.fastapi-inference-qwen2.5-coder-32b-instruct.hf.space/v1/chat/completions"
|
17 |
+
|
18 |
+
model = "huggingface/Qwen/Qwen2.5-Coder-32B-Instruct"
|
19 |
+
base_url = "https://ojciectadeusz-fastapi-inference-qwen2-5-coder-32-a0ab504.hf.space/v1/chat/completions"
|
20 |
+
|
21 |
+
#model = "openrouter/google/gemini-pro-1.5-exp"
|
22 |
+
#api_key = "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"
|
23 |
+
#base_url = "https://openrouter.ai/api/v1"
|
24 |
+
|
25 |
+
temperature = 0.1
|
26 |
+
# Changed top_p to be within valid range (e.g., 0.85)
|
27 |
+
top_p = 0.85
|
28 |
+
max_output_tokens = 200000
|
29 |
+
|
30 |
+
# Disable function calling
|
31 |
+
function_calling = false
|
32 |
+
|
33 |
+
# Additional error handling settings
|
34 |
+
num_retries = 8
|
35 |
+
retry_min_wait = 15
|
36 |
+
retry_max_wait = 120
|
37 |
+
retry_multiplier = 2.0
|
38 |
+
|
39 |
+
[agent.CodeActAgent]
|
40 |
+
llm_config = "llm"
|
41 |
+
memory_enabled = false
|
42 |
+
|
43 |
+
|
44 |
+
|
45 |
+
[auth]
|