Spaces:
Sleeping
Sleeping
TeleologyHI
commited on
Commit
·
da4d45a
1
Parent(s):
b2d45c4
Update to async processing in app and HIMModel
Browse files- app.py +3 -2
- src/model/him_model.py +3 -2
app.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1 |
import gradio as gr
|
2 |
import torch
|
|
|
3 |
from src.model.him_model import HIMModel
|
4 |
from config.model_config import HIMConfig
|
5 |
from config.environment_config import EnvironmentConfig
|
@@ -11,7 +12,7 @@ def initialize_model():
|
|
11 |
model = HIMModel(model_config).to(device)
|
12 |
return model
|
13 |
|
14 |
-
def chat(message: str,
|
15 |
system_message: str = "You are a friendly Chatbot.",
|
16 |
max_tokens: int = 512,
|
17 |
temperature: float = 0.7,
|
@@ -28,7 +29,7 @@ def chat(message: str,
|
|
28 |
}
|
29 |
|
30 |
with torch.no_grad():
|
31 |
-
result = model.generate_response(input_data)
|
32 |
return result["response"]
|
33 |
|
34 |
model = initialize_model()
|
|
|
1 |
import gradio as gr
|
2 |
import torch
|
3 |
+
import asyncio
|
4 |
from src.model.him_model import HIMModel
|
5 |
from config.model_config import HIMConfig
|
6 |
from config.environment_config import EnvironmentConfig
|
|
|
12 |
model = HIMModel(model_config).to(device)
|
13 |
return model
|
14 |
|
15 |
+
async def chat(message: str,
|
16 |
system_message: str = "You are a friendly Chatbot.",
|
17 |
max_tokens: int = 512,
|
18 |
temperature: float = 0.7,
|
|
|
29 |
}
|
30 |
|
31 |
with torch.no_grad():
|
32 |
+
result = await model.generate_response(input_data)
|
33 |
return result["response"]
|
34 |
|
35 |
model = initialize_model()
|
src/model/him_model.py
CHANGED
@@ -1,6 +1,7 @@
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
from typing import Dict, Any
|
|
|
4 |
from ..core.consciousness_kernel import ConsciousnessKernel
|
5 |
from ..core.emotional_intelligence import EmotionalProcessor
|
6 |
from ..core.theory_of_mind import TheoryOfMind
|
@@ -15,11 +16,11 @@ class HIMModel(nn.Module):
|
|
15 |
self.theory_of_mind = TheoryOfMind()
|
16 |
self.semiotic_processor = SemioticProcessor()
|
17 |
|
18 |
-
def generate_response(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
19 |
consciousness_state = self.consciousness_kernel.process_consciousness_cycle(input_data)
|
20 |
emotional_context = self.emotional_processor.process_emotional_context(input_data)
|
21 |
social_understanding = self.theory_of_mind.model_agent_mind(input_data)
|
22 |
-
semiotic_analysis = self.semiotic_processor.
|
23 |
|
24 |
return self._integrate_outputs(
|
25 |
consciousness_state,
|
|
|
1 |
import torch
|
2 |
import torch.nn as nn
|
3 |
from typing import Dict, Any
|
4 |
+
import asyncio
|
5 |
from ..core.consciousness_kernel import ConsciousnessKernel
|
6 |
from ..core.emotional_intelligence import EmotionalProcessor
|
7 |
from ..core.theory_of_mind import TheoryOfMind
|
|
|
16 |
self.theory_of_mind = TheoryOfMind()
|
17 |
self.semiotic_processor = SemioticProcessor()
|
18 |
|
19 |
+
async def generate_response(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
20 |
consciousness_state = self.consciousness_kernel.process_consciousness_cycle(input_data)
|
21 |
emotional_context = self.emotional_processor.process_emotional_context(input_data)
|
22 |
social_understanding = self.theory_of_mind.model_agent_mind(input_data)
|
23 |
+
semiotic_analysis = await self.semiotic_processor.process(input_data)
|
24 |
|
25 |
return self._integrate_outputs(
|
26 |
consciousness_state,
|