Update mcp_hub/config.py
Browse filesChand to a non reasoning model for code generation
- mcp_hub/config.py +1 -1
mcp_hub/config.py
CHANGED
@@ -56,7 +56,7 @@ class ModelConfig:
|
|
56 |
# Default models (Nebius/HuggingFace compatible)
|
57 |
question_enhancer_model: str = "Qwen/Qwen3-4B-fast"
|
58 |
llm_processor_model: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
59 |
-
code_generator_model: str = "
|
60 |
orchestrator_model: str = "Qwen/Qwen3-32B-fast"
|
61 |
|
62 |
def get_model_for_provider(self, task: str, provider: str) -> str:
|
|
|
56 |
# Default models (Nebius/HuggingFace compatible)
|
57 |
question_enhancer_model: str = "Qwen/Qwen3-4B-fast"
|
58 |
llm_processor_model: str = "meta-llama/Meta-Llama-3.1-8B-Instruct"
|
59 |
+
code_generator_model: str = "nvidia/Llama-3_3-Nemotron-Super-49B-v1"
|
60 |
orchestrator_model: str = "Qwen/Qwen3-32B-fast"
|
61 |
|
62 |
def get_model_for_provider(self, task: str, provider: str) -> str:
|