Spaces:
Running
Running
fixes circular import
Browse files- config/__init__.py +63 -25
- config/train_gpt_oss_memory_optimized.py +12 -1
config/__init__.py
CHANGED
@@ -2,26 +2,63 @@
|
|
2 |
Configuration package for SmolLM3 and GPT-OSS training
|
3 |
"""
|
4 |
|
5 |
-
|
6 |
-
from .
|
7 |
-
|
8 |
-
|
9 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
|
11 |
# GPT-OSS configurations
|
12 |
-
|
13 |
-
from .
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
25 |
from .train_gpt_oss_custom import GPTOSSEnhancedCustomConfig
|
26 |
|
27 |
# Pre-baked GPT-OSS configs exposing a `config` instance
|
@@ -38,17 +75,18 @@ def get_config(config_path: str):
|
|
38 |
import importlib.util as _importlib
|
39 |
|
40 |
if not os.path.exists(config_path):
|
41 |
-
|
|
|
42 |
|
43 |
# Try to determine config type based on filename
|
44 |
if "a100_large" in config_path:
|
45 |
-
return get_a100_large_config(config_path)
|
46 |
elif "a100_multiple_passes" in config_path:
|
47 |
-
return get_multiple_passes_config(config_path)
|
48 |
elif "a100_max_performance" in config_path:
|
49 |
-
return get_max_performance_config(config_path)
|
50 |
elif "openhermes_fr" in config_path:
|
51 |
-
return get_openhermes_fr_config(config_path)
|
52 |
elif "gpt_oss" in config_path:
|
53 |
# Load GPT-OSS style config module dynamically and return its `config` instance if present
|
54 |
try:
|
@@ -61,9 +99,9 @@ def get_config(config_path: str):
|
|
61 |
except Exception:
|
62 |
# Fallback to base config if dynamic load fails
|
63 |
pass
|
64 |
-
return get_base_config(config_path)
|
65 |
else:
|
66 |
-
return get_base_config(config_path)
|
67 |
|
68 |
__all__ = [
|
69 |
'SmolLM3Config',
|
|
|
2 |
Configuration package for SmolLM3 and GPT-OSS training
|
3 |
"""
|
4 |
|
5 |
+
try:
|
6 |
+
from .train_smollm3 import SmolLM3Config, get_config as get_base_config
|
7 |
+
except Exception:
|
8 |
+
SmolLM3Config = None # type: ignore
|
9 |
+
def get_base_config(config_path: str): # type: ignore
|
10 |
+
raise ImportError("train_smollm3 not available")
|
11 |
+
try:
|
12 |
+
from .train_smollm3_openhermes_fr import SmolLM3ConfigOpenHermesFR, get_config as get_openhermes_fr_config
|
13 |
+
except Exception:
|
14 |
+
SmolLM3ConfigOpenHermesFR = None # type: ignore
|
15 |
+
get_openhermes_fr_config = None # type: ignore
|
16 |
+
try:
|
17 |
+
from .train_smollm3_openhermes_fr_a100_large import SmolLM3ConfigOpenHermesFRA100Large, get_config as get_a100_large_config
|
18 |
+
except Exception:
|
19 |
+
SmolLM3ConfigOpenHermesFRA100Large = None # type: ignore
|
20 |
+
get_a100_large_config = None # type: ignore
|
21 |
+
try:
|
22 |
+
from .train_smollm3_openhermes_fr_a100_multiple_passes import SmolLM3ConfigOpenHermesFRMultiplePasses, get_config as get_multiple_passes_config
|
23 |
+
except Exception:
|
24 |
+
SmolLM3ConfigOpenHermesFRMultiplePasses = None # type: ignore
|
25 |
+
get_multiple_passes_config = None # type: ignore
|
26 |
+
try:
|
27 |
+
from .train_smollm3_openhermes_fr_a100_max_performance import SmolLM3ConfigOpenHermesFRMaxPerformance, get_config as get_max_performance_config
|
28 |
+
except Exception:
|
29 |
+
SmolLM3ConfigOpenHermesFRMaxPerformance = None # type: ignore
|
30 |
+
get_max_performance_config = None # type: ignore
|
31 |
|
32 |
# GPT-OSS configurations
|
33 |
+
try:
|
34 |
+
from .train_gpt_oss_basic import GPTOSSBasicConfig, get_config as get_gpt_oss_basic_config
|
35 |
+
except Exception:
|
36 |
+
GPTOSSBasicConfig = None # type: ignore
|
37 |
+
get_gpt_oss_basic_config = None # type: ignore
|
38 |
+
try:
|
39 |
+
from .train_gpt_oss_multilingual_reasoning import (
|
40 |
+
GPTOSSMultilingualReasoningConfig,
|
41 |
+
get_config as get_gpt_oss_multilingual_reasoning_config,
|
42 |
+
)
|
43 |
+
except Exception:
|
44 |
+
GPTOSSMultilingualReasoningConfig = None # type: ignore
|
45 |
+
get_gpt_oss_multilingual_reasoning_config = None # type: ignore
|
46 |
+
try:
|
47 |
+
from .train_gpt_oss_h100_optimized import (
|
48 |
+
GPTOSSH100OptimizedConfig,
|
49 |
+
get_config as get_gpt_oss_h100_optimized_config,
|
50 |
+
)
|
51 |
+
except Exception:
|
52 |
+
GPTOSSH100OptimizedConfig = None # type: ignore
|
53 |
+
get_gpt_oss_h100_optimized_config = None # type: ignore
|
54 |
+
try:
|
55 |
+
from .train_gpt_oss_memory_optimized import (
|
56 |
+
GPTOSSMemoryOptimizedConfig,
|
57 |
+
get_config as get_gpt_oss_memory_optimized_config,
|
58 |
+
)
|
59 |
+
except Exception:
|
60 |
+
GPTOSSMemoryOptimizedConfig = None # type: ignore
|
61 |
+
get_gpt_oss_memory_optimized_config = None # type: ignore
|
62 |
from .train_gpt_oss_custom import GPTOSSEnhancedCustomConfig
|
63 |
|
64 |
# Pre-baked GPT-OSS configs exposing a `config` instance
|
|
|
75 |
import importlib.util as _importlib
|
76 |
|
77 |
if not os.path.exists(config_path):
|
78 |
+
# Fall back to base config accessor if available
|
79 |
+
return get_base_config(config_path) if get_base_config else None
|
80 |
|
81 |
# Try to determine config type based on filename
|
82 |
if "a100_large" in config_path:
|
83 |
+
return get_a100_large_config(config_path) if get_a100_large_config else None
|
84 |
elif "a100_multiple_passes" in config_path:
|
85 |
+
return get_multiple_passes_config(config_path) if get_multiple_passes_config else None
|
86 |
elif "a100_max_performance" in config_path:
|
87 |
+
return get_max_performance_config(config_path) if get_max_performance_config else None
|
88 |
elif "openhermes_fr" in config_path:
|
89 |
+
return get_openhermes_fr_config(config_path) if get_openhermes_fr_config else None
|
90 |
elif "gpt_oss" in config_path:
|
91 |
# Load GPT-OSS style config module dynamically and return its `config` instance if present
|
92 |
try:
|
|
|
99 |
except Exception:
|
100 |
# Fallback to base config if dynamic load fails
|
101 |
pass
|
102 |
+
return get_base_config(config_path) if get_base_config else None
|
103 |
else:
|
104 |
+
return get_base_config(config_path) if get_base_config else None
|
105 |
|
106 |
__all__ = [
|
107 |
'SmolLM3Config',
|
config/train_gpt_oss_memory_optimized.py
CHANGED
@@ -146,4 +146,15 @@ class GPTOSSMemoryOptimizedConfig:
|
|
146 |
print(f"Memory optimization: Enabled")
|
147 |
print(f"Quantization: {self.quantization_config}")
|
148 |
print(f"Max memory per GPU: {self.model_kwargs.get('max_memory', 'Auto')}")
|
149 |
-
print("==================================================")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
146 |
print(f"Memory optimization: Enabled")
|
147 |
print(f"Quantization: {self.quantization_config}")
|
148 |
print(f"Max memory per GPU: {self.model_kwargs.get('max_memory', 'Auto')}")
|
149 |
+
print("==================================================")
|
150 |
+
|
151 |
+
# Provide a module-level config instance for dynamic loaders
|
152 |
+
config = GPTOSSMemoryOptimizedConfig()
|
153 |
+
|
154 |
+
def get_config(config_path: str) -> GPTOSSMemoryOptimizedConfig:
|
155 |
+
"""Return a configured GPTOSSMemoryOptimizedConfig instance.
|
156 |
+
|
157 |
+
The config_path argument is accepted for API compatibility with other
|
158 |
+
get_config functions, but is not used to alter the configuration here.
|
159 |
+
"""
|
160 |
+
return GPTOSSMemoryOptimizedConfig()
|