ThreatLevelD
commited on
Commit
·
712720b
1
Parent(s):
603561f
Refactor MEC MVP pipeline: Integrate Codex Informer, HEIInference, and confidence routing logic
Browse files- core/codex_informer.py +43 -7
- core/confidence_gate.py +0 -25
- core/eil_processor.py +42 -9
- core/eris_reasoner.py +11 -4
- core/esil_inference.py +34 -5
- core/hei_inference.py +29 -6
- core/input_preprocessor.py +0 -21
- main.py +14 -20
core/codex_informer.py
CHANGED
@@ -3,20 +3,56 @@
|
|
3 |
import yaml
|
4 |
|
5 |
class CodexInformer:
|
6 |
-
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
11 |
|
12 |
def load_yaml(self, path):
|
|
|
13 |
with open(path, 'r', encoding='utf-8') as f:
|
14 |
return yaml.safe_load(f)
|
15 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
16 |
def map_tokens_to_codex(self, token_stream):
|
17 |
codex_hits = []
|
18 |
for token in token_stream:
|
19 |
-
for trigger in self.sal_triggers
|
20 |
if trigger['trigger'].lower() in token.lower():
|
21 |
codex_hits.append({
|
22 |
'type': 'SAL_TRIGGER',
|
@@ -27,7 +63,7 @@ class CodexInformer:
|
|
27 |
return codex_hits
|
28 |
|
29 |
def lookup_meta_mapping(self, meta_marker):
|
30 |
-
for meta in self.meta_mappings
|
31 |
if meta['marker'] == meta_marker:
|
32 |
return meta
|
33 |
return None
|
|
|
3 |
import yaml
|
4 |
|
5 |
class CodexInformer:
|
6 |
+
_instance = None # Singleton pattern to ensure only one instance
|
7 |
+
|
8 |
+
def __new__(cls):
|
9 |
+
if not cls._instance:
|
10 |
+
cls._instance = super(CodexInformer, cls).__new__(cls)
|
11 |
+
# Load the emotion families, sal triggers, and meta mappings from YAML files
|
12 |
+
cls._instance.load_data()
|
13 |
+
return cls._instance
|
14 |
+
|
15 |
+
def load_data(self):
|
16 |
+
# Load YAML data for emotion families, sal triggers, and meta mappings
|
17 |
+
self.emotion_families = self.load_yaml('config/emotion_families.yaml')['emotion_families']
|
18 |
+
self.sal_triggers = self.load_yaml('config/sal_triggers.yaml')['sal_triggers']
|
19 |
+
self.meta_mappings = self.load_yaml('config/meta_mappings.yaml')['meta_mappings']
|
20 |
|
21 |
def load_yaml(self, path):
|
22 |
+
# Loads the YAML file and returns the parsed data
|
23 |
with open(path, 'r', encoding='utf-8') as f:
|
24 |
return yaml.safe_load(f)
|
25 |
|
26 |
+
def get_emotion_family(self, emotion_name):
|
27 |
+
# Retrieve the emotion family code based on emotion name (e.g., "joy" -> "FAM-JOY")
|
28 |
+
for family in self.emotion_families:
|
29 |
+
if family['name'].lower() == emotion_name.lower():
|
30 |
+
return family['code']
|
31 |
+
return "Unknown"
|
32 |
+
|
33 |
+
def get_arc(self, emotion_code):
|
34 |
+
# Retrieve the arc for the given emotion code (this can be enhanced to map arc based on emotion family)
|
35 |
+
return self.get_emotion_family(emotion_code) # Placeholder for arc mapping logic
|
36 |
+
|
37 |
+
def get_resonance(self, emotion_code):
|
38 |
+
# Retrieve the resonance for the given emotion code (this can be enhanced to map resonance patterns)
|
39 |
+
return self.get_emotion_family(emotion_code) # Placeholder for resonance mapping logic
|
40 |
+
|
41 |
+
def map_emotion_to_codex(self, emotion):
|
42 |
+
# Logic to map general emotion to Codex key (e.g., 'anger' -> 'FAM-ANG')
|
43 |
+
emotion_map = {
|
44 |
+
"joy": "FAM-JOY",
|
45 |
+
"anger": "FAM-ANG",
|
46 |
+
"fear": "FAM-FEA",
|
47 |
+
"sadness": "FAM-SAD",
|
48 |
+
"love": "FAM-LOV",
|
49 |
+
}
|
50 |
+
return emotion_map.get(emotion.lower(), "Unknown")
|
51 |
+
|
52 |
def map_tokens_to_codex(self, token_stream):
|
53 |
codex_hits = []
|
54 |
for token in token_stream:
|
55 |
+
for trigger in self.sal_triggers:
|
56 |
if trigger['trigger'].lower() in token.lower():
|
57 |
codex_hits.append({
|
58 |
'type': 'SAL_TRIGGER',
|
|
|
63 |
return codex_hits
|
64 |
|
65 |
def lookup_meta_mapping(self, meta_marker):
|
66 |
+
for meta in self.meta_mappings:
|
67 |
if meta['marker'] == meta_marker:
|
68 |
return meta
|
69 |
return None
|
core/confidence_gate.py
DELETED
@@ -1,25 +0,0 @@
|
|
1 |
-
# core/confidence_gate.py
|
2 |
-
# Master Emotional Core (MEC) - Confidence Gate
|
3 |
-
|
4 |
-
class ConfidenceGate:
|
5 |
-
def __init__(self, confidence_threshold=0.65):
|
6 |
-
self.confidence_threshold = confidence_threshold
|
7 |
-
|
8 |
-
def evaluate_confidence(self, esil_packet):
|
9 |
-
confidence_score = esil_packet.get("confidence_score", 0.0)
|
10 |
-
|
11 |
-
if confidence_score >= self.confidence_threshold:
|
12 |
-
routing_decision = "proceed_to_eris"
|
13 |
-
elif confidence_score >= 0.50:
|
14 |
-
routing_decision = "trigger_llm_assist"
|
15 |
-
else:
|
16 |
-
routing_decision = "escalate_to_hei"
|
17 |
-
|
18 |
-
routing_packet = {
|
19 |
-
"confidence_score": confidence_score,
|
20 |
-
"routing_decision": routing_decision,
|
21 |
-
"source_metadata": esil_packet.get("source_metadata", {})
|
22 |
-
}
|
23 |
-
|
24 |
-
print(f"[ConfidenceGate] Packet: {routing_packet}")
|
25 |
-
return routing_packet
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
core/eil_processor.py
CHANGED
@@ -1,26 +1,59 @@
|
|
1 |
# core/eil_processor.py
|
2 |
-
|
|
|
3 |
|
4 |
class EILProcessor:
|
5 |
-
def __init__(self):
|
6 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
7 |
|
8 |
def process_eil(self, preprocessing_packet):
|
|
|
9 |
normalized_text = preprocessing_packet.get("normalized_text", "")
|
10 |
|
11 |
-
#
|
12 |
-
#
|
13 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
14 |
|
|
|
15 |
eil_packet = {
|
16 |
"phrases": phrases,
|
17 |
"emotion_candidates": [
|
18 |
{"phrase": p.strip(), "candidate_emotion": "Pending"} for p in phrases if p.strip()
|
19 |
],
|
20 |
"metadata": {
|
21 |
-
"source": "InputPreprocessor"
|
22 |
-
}
|
|
|
|
|
|
|
23 |
}
|
24 |
|
25 |
-
print(f"[EILProcessor] Packet: {eil_packet}")
|
26 |
return eil_packet
|
|
|
1 |
# core/eil_processor.py
|
2 |
+
|
3 |
+
from core.codex_informer import CodexInformer
|
4 |
|
5 |
class EILProcessor:
|
6 |
+
def __init__(self, signal_floor=0.2, max_token_length=1024, enable_noise_filter=True):
|
7 |
+
self.signal_floor = signal_floor
|
8 |
+
self.max_token_length = max_token_length
|
9 |
+
self.enable_noise_filter = enable_noise_filter
|
10 |
+
# Initialize Codex Informer for shared emotion lookups
|
11 |
+
self.codex_informer = CodexInformer()
|
12 |
+
|
13 |
+
def preprocess_text(self, raw_text):
|
14 |
+
# Basic text normalization (simulating real preprocessing)
|
15 |
+
normalized_text = raw_text.strip().lower()
|
16 |
+
|
17 |
+
# Simulate preprocessing packet
|
18 |
+
preprocessing_packet = {
|
19 |
+
"normalized_text": normalized_text,
|
20 |
+
"length": len(normalized_text)
|
21 |
+
}
|
22 |
+
print(f"[EILProcessor] Preprocessing Packet: {preprocessing_packet}")
|
23 |
+
return preprocessing_packet
|
24 |
|
25 |
def process_eil(self, preprocessing_packet):
|
26 |
+
# Now handle both preprocessing and EIL processing here in a single step
|
27 |
normalized_text = preprocessing_packet.get("normalized_text", "")
|
28 |
|
29 |
+
# Simple placeholder logic: split into phrases and tag emotion candidates
|
30 |
+
phrases = normalized_text.split(".") # Simple split based on periods
|
31 |
+
|
32 |
+
# Retrieve emotion family, arc, and resonance using Codex Informer
|
33 |
+
# For now, we're mapping to a sample emotion code "happy_code", this should be dynamic
|
34 |
+
emotion_code = "joy" # Example emotion, this should be extracted based on text analysis
|
35 |
+
|
36 |
+
emotion_family = self.codex_informer.get_emotion_family(emotion_code) # Resolve emotion family
|
37 |
+
arc = self.codex_informer.get_arc(emotion_code) # Resolve emotion arc
|
38 |
+
resonance = self.codex_informer.get_resonance(emotion_code) # Resolve resonance
|
39 |
+
|
40 |
+
# If we don't find a match, we can use a fallback or 'hidden emotion' state
|
41 |
+
if emotion_family == "Unknown":
|
42 |
+
emotion_family = "Hidden Emotion Detected" # Placeholder for hidden emotion
|
43 |
|
44 |
+
# Build the EIL packet with additional emotion data from Codex Informer
|
45 |
eil_packet = {
|
46 |
"phrases": phrases,
|
47 |
"emotion_candidates": [
|
48 |
{"phrase": p.strip(), "candidate_emotion": "Pending"} for p in phrases if p.strip()
|
49 |
],
|
50 |
"metadata": {
|
51 |
+
"source": "InputPreprocessor + EILProcessor"
|
52 |
+
},
|
53 |
+
"emotion_family": emotion_family, # From Codex Informer
|
54 |
+
"arc": arc, # From Codex Informer
|
55 |
+
"resonance": resonance # From Codex Informer
|
56 |
}
|
57 |
|
58 |
+
print(f"[EILProcessor] EIL Packet: {eil_packet}")
|
59 |
return eil_packet
|
core/eris_reasoner.py
CHANGED
@@ -1,10 +1,12 @@
|
|
1 |
import hashlib
|
2 |
import time
|
|
|
3 |
from core.hei_inference import HEIInference
|
4 |
|
5 |
class ERISReasoner:
|
6 |
def __init__(self):
|
7 |
-
|
|
|
8 |
|
9 |
def reason_emotion_state(self, esil_packet):
|
10 |
# Generate EmID (using user_id + primary_emotion + timestamp)
|
@@ -14,19 +16,24 @@ class ERISReasoner:
|
|
14 |
primary_emotion_code = esil_packet.get("primary_emotion_code", "UNK")
|
15 |
emotion_family = esil_packet.get("emotion_family", "UNK")
|
16 |
|
|
|
|
|
|
|
|
|
17 |
# Start building the UESP Packet
|
18 |
uesp_packet = {
|
19 |
"Primary Emotion": primary_emotion,
|
20 |
"Primary Emotion Code": primary_emotion_code,
|
21 |
-
"Emotion Arc Trajectory":
|
22 |
-
"Resonance Pattern":
|
23 |
-
"HEART Compliance Flags": ["HVC-000"], #
|
24 |
"Empathy First Response": esil_packet.get("response", "Emotion being processed..."),
|
25 |
"emotion_family": emotion_family # Always include this
|
26 |
}
|
27 |
|
28 |
# If the packet is empty or has no emotion_family, we call HEI for fallback
|
29 |
if emotion_family == "UNK" or not emotion_family:
|
|
|
30 |
hei = HEIInference()
|
31 |
uesp_packet = hei.detect_low_signal(uesp_packet) # HEI will generate a valid packet if needed
|
32 |
|
|
|
1 |
import hashlib
|
2 |
import time
|
3 |
+
from core.codex_informer import CodexInformer
|
4 |
from core.hei_inference import HEIInference
|
5 |
|
6 |
class ERISReasoner:
|
7 |
def __init__(self):
|
8 |
+
# Initialize Codex Informer as part of ERIS
|
9 |
+
self.codex_informer = CodexInformer()
|
10 |
|
11 |
def reason_emotion_state(self, esil_packet):
|
12 |
# Generate EmID (using user_id + primary_emotion + timestamp)
|
|
|
16 |
primary_emotion_code = esil_packet.get("primary_emotion_code", "UNK")
|
17 |
emotion_family = esil_packet.get("emotion_family", "UNK")
|
18 |
|
19 |
+
# Retrieve emotion arc and resonance from Codex Informer (shared service)
|
20 |
+
arc = self.codex_informer.get_arc(primary_emotion_code)
|
21 |
+
resonance = self.codex_informer.get_resonance(primary_emotion_code)
|
22 |
+
|
23 |
# Start building the UESP Packet
|
24 |
uesp_packet = {
|
25 |
"Primary Emotion": primary_emotion,
|
26 |
"Primary Emotion Code": primary_emotion_code,
|
27 |
+
"Emotion Arc Trajectory": arc, # From Codex Informer
|
28 |
+
"Resonance Pattern": resonance, # From Codex Informer
|
29 |
+
"HEART Compliance Flags": ["HVC-000"], # Placeholder, can be updated based on conditions
|
30 |
"Empathy First Response": esil_packet.get("response", "Emotion being processed..."),
|
31 |
"emotion_family": emotion_family # Always include this
|
32 |
}
|
33 |
|
34 |
# If the packet is empty or has no emotion_family, we call HEI for fallback
|
35 |
if emotion_family == "UNK" or not emotion_family:
|
36 |
+
print("[ERIS] No valid emotion family found — triggering HEI fallback...")
|
37 |
hei = HEIInference()
|
38 |
uesp_packet = hei.detect_low_signal(uesp_packet) # HEI will generate a valid packet if needed
|
39 |
|
core/esil_inference.py
CHANGED
@@ -1,10 +1,15 @@
|
|
1 |
# core/esil_inference.py
|
2 |
# Master Emotional Core (MEC) - ESIL Inference
|
3 |
|
|
|
|
|
4 |
class ESILInference:
|
5 |
-
def __init__(self, enable_gradient_blending=True, blend_maximum=3):
|
6 |
self.enable_gradient_blending = enable_gradient_blending
|
7 |
self.blend_maximum = blend_maximum
|
|
|
|
|
|
|
8 |
|
9 |
def infer_esil(self, eil_packet):
|
10 |
phrases = eil_packet.get("phrases", [])
|
@@ -12,21 +17,45 @@ class ESILInference:
|
|
12 |
|
13 |
# Trigger HEI if vague phrases detected:
|
14 |
low_conf_phrases = ["meh", "...", "idk", "whatever", "fine"]
|
15 |
-
|
|
|
16 |
if any(lp in phrases for lp in low_conf_phrases):
|
17 |
confidence_score = 0.3
|
18 |
else:
|
19 |
confidence_score = 0.85
|
20 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
21 |
esil_packet = {
|
22 |
"blend_weights": [
|
23 |
{"emotion": "Pending", "weight": 0.8}
|
24 |
],
|
25 |
"trajectory": "Stable",
|
26 |
"confidence_score": confidence_score,
|
27 |
-
"
|
|
|
|
|
|
|
|
|
28 |
}
|
29 |
|
30 |
-
|
31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
32 |
return esil_packet
|
|
|
1 |
# core/esil_inference.py
|
2 |
# Master Emotional Core (MEC) - ESIL Inference
|
3 |
|
4 |
+
from core.codex_informer import CodexInformer
|
5 |
+
|
6 |
class ESILInference:
|
7 |
+
def __init__(self, enable_gradient_blending=True, blend_maximum=3, confidence_threshold=0.65):
|
8 |
self.enable_gradient_blending = enable_gradient_blending
|
9 |
self.blend_maximum = blend_maximum
|
10 |
+
self.confidence_threshold = confidence_threshold
|
11 |
+
# Initialize Codex Informer for shared emotion lookups
|
12 |
+
self.codex_informer = CodexInformer()
|
13 |
|
14 |
def infer_esil(self, eil_packet):
|
15 |
phrases = eil_packet.get("phrases", [])
|
|
|
17 |
|
18 |
# Trigger HEI if vague phrases detected:
|
19 |
low_conf_phrases = ["meh", "...", "idk", "whatever", "fine"]
|
20 |
+
|
21 |
+
# Check if any low-confidence phrases are present
|
22 |
if any(lp in phrases for lp in low_conf_phrases):
|
23 |
confidence_score = 0.3
|
24 |
else:
|
25 |
confidence_score = 0.85
|
26 |
|
27 |
+
# Retrieve emotion family, arc, and resonance from Codex Informer
|
28 |
+
primary_emotion_code = eil_packet.get("primary_emotion_code", "UNK")
|
29 |
+
emotion_family = self.codex_informer.get_emotion_family(primary_emotion_code)
|
30 |
+
arc = self.codex_informer.get_arc(primary_emotion_code)
|
31 |
+
resonance = self.codex_informer.get_resonance(primary_emotion_code)
|
32 |
+
|
33 |
+
# If no emotion family is found, flag it as a "hidden emotion"
|
34 |
+
if emotion_family == "Unknown":
|
35 |
+
emotion_family = "Hidden Emotion Detected" # Fallback for hidden emotion logic
|
36 |
+
|
37 |
+
# Build ESIL packet with updated emotion data from Codex Informer
|
38 |
esil_packet = {
|
39 |
"blend_weights": [
|
40 |
{"emotion": "Pending", "weight": 0.8}
|
41 |
],
|
42 |
"trajectory": "Stable",
|
43 |
"confidence_score": confidence_score,
|
44 |
+
"emotion_family": emotion_family, # From Codex Informer
|
45 |
+
"arc": arc, # From Codex Informer
|
46 |
+
"resonance": resonance, # From Codex Informer
|
47 |
+
"source_metadata": eil_packet.get("metadata", {}),
|
48 |
+
"tokens": phrases
|
49 |
}
|
50 |
|
51 |
+
# Confidence routing logic: Directly to ERIS if confidence is high
|
52 |
+
if confidence_score >= self.confidence_threshold:
|
53 |
+
routing_decision = "proceed_to_eris"
|
54 |
+
# Trigger HEI if confidence is low and unresolved
|
55 |
+
elif confidence_score < self.confidence_threshold:
|
56 |
+
routing_decision = "escalate_to_hei"
|
57 |
+
|
58 |
+
esil_packet['routing_decision'] = routing_decision
|
59 |
+
print(f"[ESILInference] ESIL Packet with Routing Decision: {esil_packet}")
|
60 |
+
|
61 |
return esil_packet
|
core/hei_inference.py
CHANGED
@@ -2,24 +2,30 @@
|
|
2 |
|
3 |
import yaml
|
4 |
import datetime
|
|
|
5 |
|
6 |
class HEIInference:
|
7 |
def __init__(self):
|
8 |
-
# Load config files
|
9 |
self.sal_triggers = self.load_yaml('config/sal_triggers.yaml')
|
10 |
self.meta_mappings = self.load_yaml('config/meta_mappings.yaml')
|
|
|
|
|
|
|
11 |
|
12 |
def load_yaml(self, path):
|
13 |
with open(path, 'r', encoding='utf-8') as f:
|
14 |
return yaml.safe_load(f)
|
15 |
|
16 |
def lookup_meta_mapping(self, meta_marker):
|
17 |
-
|
|
|
18 |
if meta['marker'] == meta_marker:
|
19 |
return meta
|
20 |
return None
|
21 |
|
22 |
def detect_low_signal(self, failed_esil_packet):
|
|
|
23 |
pseudo_esil_packet = {
|
24 |
'pseudo_esil_packet_id': f"PSUESP-{datetime.datetime.now().isoformat()}",
|
25 |
'detected_tokens': failed_esil_packet.get('tokens', []),
|
@@ -31,10 +37,11 @@ class HEIInference:
|
|
31 |
|
32 |
# Check for SAL triggers in tokens
|
33 |
for token in pseudo_esil_packet['detected_tokens']:
|
34 |
-
for trigger in self.sal_triggers['sal_triggers']:
|
35 |
-
if trigger
|
36 |
-
|
37 |
-
|
|
|
38 |
|
39 |
# Check for META markers if present
|
40 |
if 'meta_marker' in failed_esil_packet:
|
@@ -47,12 +54,28 @@ class HEIInference:
|
|
47 |
}
|
48 |
pseudo_esil_packet['reasoning_flags'].append(f"META_INFER-{meta['marker']}")
|
49 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
50 |
# Log for audit
|
51 |
self.log_audit(pseudo_esil_packet)
|
52 |
|
53 |
return pseudo_esil_packet
|
54 |
|
55 |
def log_audit(self, packet):
|
|
|
56 |
print("---- HEI Inference Audit Log ----")
|
57 |
print(f"Packet ID: {packet['pseudo_esil_packet_id']}")
|
58 |
print(f"Timestamp: {packet['timestamp']}")
|
|
|
2 |
|
3 |
import yaml
|
4 |
import datetime
|
5 |
+
from core.codex_informer import CodexInformer # Ensure CodexInformer is imported
|
6 |
|
7 |
class HEIInference:
|
8 |
def __init__(self):
|
9 |
+
# Load config files for SAL triggers and meta mappings
|
10 |
self.sal_triggers = self.load_yaml('config/sal_triggers.yaml')
|
11 |
self.meta_mappings = self.load_yaml('config/meta_mappings.yaml')
|
12 |
+
|
13 |
+
# Initialize Codex Informer as a persistent service
|
14 |
+
self.codex_informer = CodexInformer()
|
15 |
|
16 |
def load_yaml(self, path):
|
17 |
with open(path, 'r', encoding='utf-8') as f:
|
18 |
return yaml.safe_load(f)
|
19 |
|
20 |
def lookup_meta_mapping(self, meta_marker):
|
21 |
+
# Look up a META marker in the provided mapping
|
22 |
+
for meta in self.meta_mappings:
|
23 |
if meta['marker'] == meta_marker:
|
24 |
return meta
|
25 |
return None
|
26 |
|
27 |
def detect_low_signal(self, failed_esil_packet):
|
28 |
+
# Generate a pseudo ESIL packet when low signal is detected
|
29 |
pseudo_esil_packet = {
|
30 |
'pseudo_esil_packet_id': f"PSUESP-{datetime.datetime.now().isoformat()}",
|
31 |
'detected_tokens': failed_esil_packet.get('tokens', []),
|
|
|
37 |
|
38 |
# Check for SAL triggers in tokens
|
39 |
for token in pseudo_esil_packet['detected_tokens']:
|
40 |
+
for trigger in self.sal_triggers['sal_triggers']: # Make sure this is a list of dictionaries
|
41 |
+
if isinstance(trigger, dict) and 'trigger' in trigger:
|
42 |
+
if trigger['trigger'].lower() in token.lower():
|
43 |
+
pseudo_esil_packet['sal_trigger_flags'].append(trigger['flag'])
|
44 |
+
pseudo_esil_packet['reasoning_flags'].append(f"SAL_MATCH-{trigger['flag']}")
|
45 |
|
46 |
# Check for META markers if present
|
47 |
if 'meta_marker' in failed_esil_packet:
|
|
|
54 |
}
|
55 |
pseudo_esil_packet['reasoning_flags'].append(f"META_INFER-{meta['marker']}")
|
56 |
|
57 |
+
# Retrieve emotion data (family, arc, resonance) from Codex Informer
|
58 |
+
primary_emotion_code = failed_esil_packet.get("primary_emotion_code", "UNK")
|
59 |
+
emotion_family = self.codex_informer.get_emotion_family(primary_emotion_code)
|
60 |
+
arc = self.codex_informer.get_arc(primary_emotion_code)
|
61 |
+
resonance = self.codex_informer.get_resonance(primary_emotion_code)
|
62 |
+
|
63 |
+
# If no emotion family is found, flag it as a "hidden emotion"
|
64 |
+
if emotion_family == "Unknown":
|
65 |
+
emotion_family = "Hidden Emotion Detected" # Fallback for hidden emotion logic
|
66 |
+
|
67 |
+
# Add the emotion family, arc, and resonance to the pseudo ESIL packet
|
68 |
+
pseudo_esil_packet['emotion_family'] = emotion_family
|
69 |
+
pseudo_esil_packet['arc'] = arc
|
70 |
+
pseudo_esil_packet['resonance'] = resonance
|
71 |
+
|
72 |
# Log for audit
|
73 |
self.log_audit(pseudo_esil_packet)
|
74 |
|
75 |
return pseudo_esil_packet
|
76 |
|
77 |
def log_audit(self, packet):
|
78 |
+
# Log audit for HEI inference process
|
79 |
print("---- HEI Inference Audit Log ----")
|
80 |
print(f"Packet ID: {packet['pseudo_esil_packet_id']}")
|
81 |
print(f"Timestamp: {packet['timestamp']}")
|
core/input_preprocessor.py
DELETED
@@ -1,21 +0,0 @@
|
|
1 |
-
# core/input_preprocessor.py
|
2 |
-
# Master Emotional Core (MEC) - Input Preprocessor
|
3 |
-
|
4 |
-
class InputPreprocessor:
|
5 |
-
def __init__(self, signal_floor=0.2, max_token_length=1024, enable_noise_filter=True):
|
6 |
-
self.signal_floor = signal_floor
|
7 |
-
self.max_token_length = max_token_length
|
8 |
-
self.enable_noise_filter = enable_noise_filter
|
9 |
-
|
10 |
-
def preprocess_text(self, raw_text):
|
11 |
-
# Basic text normalization placeholder
|
12 |
-
normalized_text = raw_text.strip().lower()
|
13 |
-
# Simulate preprocessing packet
|
14 |
-
preprocessing_packet = {
|
15 |
-
"normalized_text": normalized_text,
|
16 |
-
"length": len(normalized_text)
|
17 |
-
}
|
18 |
-
print(f"[InputPreprocessor] Packet: {preprocessing_packet}")
|
19 |
-
return preprocessing_packet
|
20 |
-
|
21 |
-
# Audio and video preprocessing would be added here in full system
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
main.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
-
from core.input_preprocessor import InputPreprocessor
|
2 |
from core.eil_processor import EILProcessor
|
3 |
from core.esil_inference import ESILInference
|
4 |
-
from core.confidence_gate import ConfidenceGate
|
5 |
from core.eris_reasoner import ERISReasoner
|
6 |
from core.hei_inference import HEIInference
|
7 |
from core.fec_controller import FECController
|
@@ -10,50 +8,46 @@ def run_pipeline(user_input_text, force_hei=False):
|
|
10 |
print("\n--- MEC MVP Test Run ---")
|
11 |
print(f"[Main] Pipeline Input: {user_input_text}")
|
12 |
|
13 |
-
# 1️⃣
|
14 |
-
pre = InputPreprocessor()
|
15 |
-
pre_packet = pre.preprocess_text(user_input_text)
|
16 |
-
print(f"[Main] Preprocessing Packet Output: {pre_packet}")
|
17 |
-
|
18 |
-
# 2️⃣ EIL Processor
|
19 |
eil = EILProcessor()
|
|
|
|
|
|
|
20 |
eil_packet = eil.process_eil(pre_packet)
|
21 |
print(f"[Main] EIL Packet Output: {eil_packet}")
|
22 |
|
23 |
-
#
|
24 |
esil = ESILInference()
|
25 |
esil_packet = esil.infer_esil(eil_packet)
|
26 |
|
27 |
-
#
|
28 |
if force_hei:
|
29 |
print("\n[Main] FORCE HEI MODE ENABLED — Routing to HEI Inference")
|
30 |
esil_packet['confidence_score'] = 0.40 # force low confidence to trigger HEI
|
31 |
|
32 |
print(f"[Main] ESIL Packet Output: {esil_packet}")
|
33 |
|
34 |
-
# 4️⃣ Confidence Gate
|
35 |
-
gate = ConfidenceGate()
|
36 |
-
route = gate.evaluate_confidence(esil_packet)
|
37 |
-
print(f"[Main] Confidence Gate Output: {route}")
|
38 |
-
|
39 |
# Routing logic:
|
40 |
-
if
|
41 |
print("\n[Main] Routing: proceed_to_eris")
|
|
|
42 |
eris = ERISReasoner()
|
43 |
final_uesp = eris.reason_emotion_state(esil_packet)
|
44 |
print(f"[Main] ERIS Packet Output: {final_uesp}")
|
45 |
|
|
|
46 |
fec = FECController()
|
47 |
fusion_prompt = fec.generate_prompt(final_uesp)
|
48 |
print(f"[Main] Final Fusion Prompt:\n{fusion_prompt}")
|
49 |
|
50 |
-
elif
|
51 |
print("\n[Main] Routing: escalate_to_hei")
|
|
|
52 |
hei = HEIInference()
|
53 |
pseudo_esil = hei.detect_low_signal(esil_packet)
|
54 |
print(f"[Main] Pseudo-ESIL Output:\n{pseudo_esil}")
|
55 |
|
56 |
-
#
|
57 |
print("\n[Main] Continuing to ERIS Reasoner (Post-HEI Path)")
|
58 |
eris = ERISReasoner()
|
59 |
final_uesp = eris.reason_emotion_state(pseudo_esil)
|
@@ -68,7 +62,7 @@ def run_pipeline(user_input_text, force_hei=False):
|
|
68 |
|
69 |
if __name__ == "__main__":
|
70 |
# Example input that matches a SAL Trigger:
|
71 |
-
test_input = "I
|
72 |
|
73 |
# Run in FORCE HEI mode → set to True to test Symbolic Layer
|
74 |
-
run_pipeline(test_input, force_hei=
|
|
|
|
|
1 |
from core.eil_processor import EILProcessor
|
2 |
from core.esil_inference import ESILInference
|
|
|
3 |
from core.eris_reasoner import ERISReasoner
|
4 |
from core.hei_inference import HEIInference
|
5 |
from core.fec_controller import FECController
|
|
|
8 |
print("\n--- MEC MVP Test Run ---")
|
9 |
print(f"[Main] Pipeline Input: {user_input_text}")
|
10 |
|
11 |
+
# 1️⃣ EIL Processor (handles both preprocessing and emotion processing)
|
|
|
|
|
|
|
|
|
|
|
12 |
eil = EILProcessor()
|
13 |
+
# Preprocess the input text first
|
14 |
+
pre_packet = eil.preprocess_text(user_input_text)
|
15 |
+
# Now pass the preprocessing packet to process_eil
|
16 |
eil_packet = eil.process_eil(pre_packet)
|
17 |
print(f"[Main] EIL Packet Output: {eil_packet}")
|
18 |
|
19 |
+
# 2️⃣ ESIL Inference
|
20 |
esil = ESILInference()
|
21 |
esil_packet = esil.infer_esil(eil_packet)
|
22 |
|
23 |
+
# 3️⃣ Forced HEI Mode: Ensure it forces the low confidence path if True
|
24 |
if force_hei:
|
25 |
print("\n[Main] FORCE HEI MODE ENABLED — Routing to HEI Inference")
|
26 |
esil_packet['confidence_score'] = 0.40 # force low confidence to trigger HEI
|
27 |
|
28 |
print(f"[Main] ESIL Packet Output: {esil_packet}")
|
29 |
|
|
|
|
|
|
|
|
|
|
|
30 |
# Routing logic:
|
31 |
+
if esil_packet['confidence_score'] >= 0.65:
|
32 |
print("\n[Main] Routing: proceed_to_eris")
|
33 |
+
# 4️⃣ ERIS Reasoning (Final UESP Creation)
|
34 |
eris = ERISReasoner()
|
35 |
final_uesp = eris.reason_emotion_state(esil_packet)
|
36 |
print(f"[Main] ERIS Packet Output: {final_uesp}")
|
37 |
|
38 |
+
# 5️⃣ FEC Controller (Final Fusion Prompt)
|
39 |
fec = FECController()
|
40 |
fusion_prompt = fec.generate_prompt(final_uesp)
|
41 |
print(f"[Main] Final Fusion Prompt:\n{fusion_prompt}")
|
42 |
|
43 |
+
elif esil_packet['confidence_score'] < 0.65:
|
44 |
print("\n[Main] Routing: escalate_to_hei")
|
45 |
+
# 6️⃣ Trigger HEI Inference (Fallback for Low Confidence)
|
46 |
hei = HEIInference()
|
47 |
pseudo_esil = hei.detect_low_signal(esil_packet)
|
48 |
print(f"[Main] Pseudo-ESIL Output:\n{pseudo_esil}")
|
49 |
|
50 |
+
# 7️⃣ Post-HEI Path: Continue to ERIS → UESP → FEC
|
51 |
print("\n[Main] Continuing to ERIS Reasoner (Post-HEI Path)")
|
52 |
eris = ERISReasoner()
|
53 |
final_uesp = eris.reason_emotion_state(pseudo_esil)
|
|
|
62 |
|
63 |
if __name__ == "__main__":
|
64 |
# Example input that matches a SAL Trigger:
|
65 |
+
test_input = "I'm okay, really"
|
66 |
|
67 |
# Run in FORCE HEI mode → set to True to test Symbolic Layer
|
68 |
+
run_pipeline(test_input, force_hei=True)
|