TeleologyHI commited on
Commit
54b77be
·
1 Parent(s): c227032
app.py CHANGED
@@ -9,7 +9,7 @@ def initialize_model():
9
  model_config = HIMConfig()
10
  env_config = EnvironmentConfig()
11
  device = torch.device("cuda" if torch.cuda.is_available() and env_config.device == "cuda" else "cpu")
12
- model = HIMModel(model_config.to_dict()).to(device)
13
  return model
14
 
15
  async def chat(message: str,
 
9
  model_config = HIMConfig()
10
  env_config = EnvironmentConfig()
11
  device = torch.device("cuda" if torch.cuda.is_available() and env_config.device == "cuda" else "cpu")
12
+ model = HIMModel(model_config).to(device) # Pass model_config directly instead of model_config.to_dict()
13
  return model
14
 
15
  async def chat(message: str,
config/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from .model_config import HIMConfig
2
+ from .environment_config import EnvironmentConfig
config/environment_config.py CHANGED
@@ -3,21 +3,17 @@ from typing import Optional
3
 
4
  @dataclass
5
  class EnvironmentConfig:
6
- # Hugging Face configuration
7
- hf_model_path: str = "TeleologyHI/HIM-self"
8
- hf_token: Optional[str] = None
 
 
9
 
10
- # Hardware configuration
11
- device: str = "cuda"
12
- num_gpus: int = 1
13
- mixed_precision: bool = True
14
-
15
- # Logging configuration
16
- log_level: str = "INFO"
17
- enable_wandb: bool = False
18
- wandb_project: str = "HIM-self"
19
-
20
- # API configuration
21
- api_host: str = "0.0.0.0"
22
- api_port: int = 7860
23
- enable_cors: bool = True
 
3
 
4
  @dataclass
5
  class EnvironmentConfig:
6
+ def __init__(self):
7
+ self.device = "cuda" # or "cpu"
8
+ self.log_level = "INFO"
9
+ self.api_port = 8000
10
+ self.threads = 4
11
 
12
+ def to_dict(self):
13
+ """Convert config to dictionary format"""
14
+ return {
15
+ "device": self.device,
16
+ "log_level": self.log_level,
17
+ "api_port": self.api_port,
18
+ "threads": self.threads
19
+ }
 
 
 
 
 
 
config/model_config.py CHANGED
@@ -3,39 +3,23 @@ from typing import Dict, List
3
 
4
  @dataclass
5
  class HIMConfig:
6
- # Base model configuration
7
- model_name: str = "HIM-self"
8
- base_model: str = "gpt2"
9
- max_length: int = 512
10
- temperature: float = 0.7
11
- top_p: float = 0.95
 
 
12
 
13
- # Consciousness parameters
14
- self_awareness_level: float = 0.8
15
- ethical_reasoning_weight: float = 0.9
16
- symbolic_interpretation_capacity: float = 0.85
17
- consciousness_dimension: int = 768
18
- attention_heads: int = 12
19
-
20
- # Teleological parameters
21
- purpose_driven_bias: float = 0.75
22
- spiritual_awareness: float = 0.8
23
- meaning_dimension: int = 256
24
-
25
- # Training configuration
26
- batch_size: int = 8
27
- learning_rate: float = 2e-5
28
- num_train_epochs: int = 3
29
- gradient_accumulation_steps: int = 1
30
- warmup_steps: int = 500
31
-
32
- # Architecture configuration
33
- hidden_size: int = 768
34
- intermediate_size: int = 3072
35
- num_hidden_layers: int = 12
36
- num_attention_heads: int = 12
37
-
38
- # Memory configuration
39
- memory_size: int = 1024
40
- context_length: int = 2048
41
- cache_size: int = 512
 
3
 
4
  @dataclass
5
  class HIMConfig:
6
+ def __init__(self):
7
+ self.base_model = "gpt2-medium"
8
+ self.max_length = 1024
9
+ self.temperature = 0.7
10
+ self.top_p = 0.95
11
+ self.consciousness_integration_factor = 0.8
12
+ self.emotional_intelligence_weight = 0.6
13
+ self.semiotic_processing_depth = 3
14
 
15
+ def to_dict(self):
16
+ """Convert config to dictionary format"""
17
+ return {
18
+ "base_model": self.base_model,
19
+ "max_length": self.max_length,
20
+ "temperature": self.temperature,
21
+ "top_p": self.top_p,
22
+ "consciousness_integration_factor": self.consciousness_integration_factor,
23
+ "emotional_intelligence_weight": self.emotional_intelligence_weight,
24
+ "semiotic_processing_depth": self.semiotic_processing_depth
25
+ }