Spaces:
Sleeping
Sleeping
TeleologyHI
commited on
Commit
·
54b77be
1
Parent(s):
c227032
up
Browse files- app.py +1 -1
- config/__init__.py +2 -0
- config/environment_config.py +13 -17
- config/model_config.py +19 -35
app.py
CHANGED
@@ -9,7 +9,7 @@ def initialize_model():
|
|
9 |
model_config = HIMConfig()
|
10 |
env_config = EnvironmentConfig()
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() and env_config.device == "cuda" else "cpu")
|
12 |
-
model = HIMModel(model_config
|
13 |
return model
|
14 |
|
15 |
async def chat(message: str,
|
|
|
9 |
model_config = HIMConfig()
|
10 |
env_config = EnvironmentConfig()
|
11 |
device = torch.device("cuda" if torch.cuda.is_available() and env_config.device == "cuda" else "cpu")
|
12 |
+
model = HIMModel(model_config).to(device) # Pass model_config directly instead of model_config.to_dict()
|
13 |
return model
|
14 |
|
15 |
async def chat(message: str,
|
config/__init__.py
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
from .model_config import HIMConfig
|
2 |
+
from .environment_config import EnvironmentConfig
|
config/environment_config.py
CHANGED
@@ -3,21 +3,17 @@ from typing import Optional
|
|
3 |
|
4 |
@dataclass
|
5 |
class EnvironmentConfig:
|
6 |
-
|
7 |
-
|
8 |
-
|
|
|
|
|
9 |
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
wandb_project: str = "HIM-self"
|
19 |
-
|
20 |
-
# API configuration
|
21 |
-
api_host: str = "0.0.0.0"
|
22 |
-
api_port: int = 7860
|
23 |
-
enable_cors: bool = True
|
|
|
3 |
|
4 |
@dataclass
|
5 |
class EnvironmentConfig:
|
6 |
+
def __init__(self):
|
7 |
+
self.device = "cuda" # or "cpu"
|
8 |
+
self.log_level = "INFO"
|
9 |
+
self.api_port = 8000
|
10 |
+
self.threads = 4
|
11 |
|
12 |
+
def to_dict(self):
|
13 |
+
"""Convert config to dictionary format"""
|
14 |
+
return {
|
15 |
+
"device": self.device,
|
16 |
+
"log_level": self.log_level,
|
17 |
+
"api_port": self.api_port,
|
18 |
+
"threads": self.threads
|
19 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
config/model_config.py
CHANGED
@@ -3,39 +3,23 @@ from typing import Dict, List
|
|
3 |
|
4 |
@dataclass
|
5 |
class HIMConfig:
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
|
|
|
|
12 |
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
|
25 |
-
# Training configuration
|
26 |
-
batch_size: int = 8
|
27 |
-
learning_rate: float = 2e-5
|
28 |
-
num_train_epochs: int = 3
|
29 |
-
gradient_accumulation_steps: int = 1
|
30 |
-
warmup_steps: int = 500
|
31 |
-
|
32 |
-
# Architecture configuration
|
33 |
-
hidden_size: int = 768
|
34 |
-
intermediate_size: int = 3072
|
35 |
-
num_hidden_layers: int = 12
|
36 |
-
num_attention_heads: int = 12
|
37 |
-
|
38 |
-
# Memory configuration
|
39 |
-
memory_size: int = 1024
|
40 |
-
context_length: int = 2048
|
41 |
-
cache_size: int = 512
|
|
|
3 |
|
4 |
@dataclass
|
5 |
class HIMConfig:
|
6 |
+
def __init__(self):
|
7 |
+
self.base_model = "gpt2-medium"
|
8 |
+
self.max_length = 1024
|
9 |
+
self.temperature = 0.7
|
10 |
+
self.top_p = 0.95
|
11 |
+
self.consciousness_integration_factor = 0.8
|
12 |
+
self.emotional_intelligence_weight = 0.6
|
13 |
+
self.semiotic_processing_depth = 3
|
14 |
|
15 |
+
def to_dict(self):
|
16 |
+
"""Convert config to dictionary format"""
|
17 |
+
return {
|
18 |
+
"base_model": self.base_model,
|
19 |
+
"max_length": self.max_length,
|
20 |
+
"temperature": self.temperature,
|
21 |
+
"top_p": self.top_p,
|
22 |
+
"consciousness_integration_factor": self.consciousness_integration_factor,
|
23 |
+
"emotional_intelligence_weight": self.emotional_intelligence_weight,
|
24 |
+
"semiotic_processing_depth": self.semiotic_processing_depth
|
25 |
+
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|