Spaces:
Sleeping
Sleeping
TeleologyHI
commited on
Commit
·
0018cbc
1
Parent(s):
87f22e8
Project init
Browse files- src/model/him_model.py +154 -0
src/model/him_model.py
CHANGED
@@ -6,6 +6,13 @@ from ..core.consciousness_kernel import ConsciousnessKernel
|
|
6 |
from ..core.emotional_intelligence import EmotionalProcessor
|
7 |
from ..core.theory_of_mind import TheoryOfMind
|
8 |
from ..core.semiotic_processor import SemioticProcessor
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
9 |
|
10 |
class HIMModel(nn.Module):
|
11 |
def __init__(self, config: Dict[str, Any]):
|
@@ -16,20 +23,53 @@ class HIMModel(nn.Module):
|
|
16 |
self.theory_of_mind = TheoryOfMind()
|
17 |
self.semiotic_processor = SemioticProcessor()
|
18 |
|
|
|
19 |
async def generate_response(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
20 |
consciousness_state = await self.consciousness_kernel.process_consciousness_cycle(input_data)
|
21 |
# Fixed awaitable issues by removing await for non-awaitable methods
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
22 |
emotional_context = self.emotional_processor.process_emotional_context(input_data)
|
23 |
social_understanding = self.theory_of_mind.model_agent_mind(input_data)
|
24 |
semiotic_analysis = await self.semiotic_processor.process(input_data)
|
25 |
|
|
|
26 |
return self._integrate_outputs(
|
|
|
|
|
|
|
|
|
27 |
consciousness_state,
|
28 |
emotional_context,
|
29 |
social_understanding,
|
30 |
semiotic_analysis
|
31 |
)
|
32 |
|
|
|
33 |
def _integrate_outputs(self, *states) -> Dict[str, Any]:
|
34 |
# Integration implementation
|
35 |
return {
|
@@ -38,4 +78,118 @@ class HIMModel(nn.Module):
|
|
38 |
"emotional_context": states[1],
|
39 |
"social_understanding": states[2],
|
40 |
"semiotic_analysis": states[3]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
41 |
}
|
|
|
6 |
from ..core.emotional_intelligence import EmotionalProcessor
|
7 |
from ..core.theory_of_mind import TheoryOfMind
|
8 |
from ..core.semiotic_processor import SemioticProcessor
|
9 |
+
<<<<<<< HEAD
|
10 |
+
=======
|
11 |
+
import random
|
12 |
+
import json
|
13 |
+
import os
|
14 |
+
from transformers import AutoModelForCausalLM, AutoTokenizer
|
15 |
+
>>>>>>> c5fae60 (Initial commit)
|
16 |
|
17 |
class HIMModel(nn.Module):
|
18 |
def __init__(self, config: Dict[str, Any]):
|
|
|
23 |
self.theory_of_mind = TheoryOfMind()
|
24 |
self.semiotic_processor = SemioticProcessor()
|
25 |
|
26 |
+
<<<<<<< HEAD
|
27 |
async def generate_response(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
28 |
consciousness_state = await self.consciousness_kernel.process_consciousness_cycle(input_data)
|
29 |
# Fixed awaitable issues by removing await for non-awaitable methods
|
30 |
+
=======
|
31 |
+
# Inicializar o modelo de linguagem e o tokenizer para geração de respostas
|
32 |
+
try:
|
33 |
+
model_name = config.get("base_model", "gpt2")
|
34 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
35 |
+
self.language_model = AutoModelForCausalLM.from_pretrained(model_name)
|
36 |
+
except Exception as e:
|
37 |
+
print(f"Erro ao carregar o modelo de linguagem: {e}")
|
38 |
+
# Fallback para um modelo menor caso haja erro
|
39 |
+
self.tokenizer = AutoTokenizer.from_pretrained("distilgpt2")
|
40 |
+
self.language_model = AutoModelForCausalLM.from_pretrained("distilgpt2")
|
41 |
+
|
42 |
+
async def generate_response(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
|
43 |
+
# Extrair o texto da mensagem e o sistema do input
|
44 |
+
message = input_data.get("message", "")
|
45 |
+
system_message = input_data.get("system_message", "You are a helpful assistant.")
|
46 |
+
|
47 |
+
# Obter parâmetros de geração
|
48 |
+
parameters = input_data.get("parameters", {})
|
49 |
+
max_tokens = parameters.get("max_tokens", 512)
|
50 |
+
temperature = parameters.get("temperature", 0.7)
|
51 |
+
top_p = parameters.get("top_p", 0.95)
|
52 |
+
|
53 |
+
# Processar o texto como antes
|
54 |
+
consciousness_state = await self.consciousness_kernel.process_consciousness_cycle(input_data)
|
55 |
+
>>>>>>> c5fae60 (Initial commit)
|
56 |
emotional_context = self.emotional_processor.process_emotional_context(input_data)
|
57 |
social_understanding = self.theory_of_mind.model_agent_mind(input_data)
|
58 |
semiotic_analysis = await self.semiotic_processor.process(input_data)
|
59 |
|
60 |
+
<<<<<<< HEAD
|
61 |
return self._integrate_outputs(
|
62 |
+
=======
|
63 |
+
# Extrair insights dos estados processados para enriquecer a resposta
|
64 |
+
context_insights = self._extract_context_insights(
|
65 |
+
>>>>>>> c5fae60 (Initial commit)
|
66 |
consciousness_state,
|
67 |
emotional_context,
|
68 |
social_understanding,
|
69 |
semiotic_analysis
|
70 |
)
|
71 |
|
72 |
+
<<<<<<< HEAD
|
73 |
def _integrate_outputs(self, *states) -> Dict[str, Any]:
|
74 |
# Integration implementation
|
75 |
return {
|
|
|
78 |
"emotional_context": states[1],
|
79 |
"social_understanding": states[2],
|
80 |
"semiotic_analysis": states[3]
|
81 |
+
=======
|
82 |
+
# Gerar resposta usando o modelo de linguagem
|
83 |
+
response = await self._generate_response_with_lm(
|
84 |
+
message,
|
85 |
+
system_message,
|
86 |
+
context_insights,
|
87 |
+
max_tokens,
|
88 |
+
temperature,
|
89 |
+
top_p
|
90 |
+
)
|
91 |
+
|
92 |
+
return {
|
93 |
+
"response": response,
|
94 |
+
"consciousness_state": consciousness_state,
|
95 |
+
"emotional_context": emotional_context,
|
96 |
+
"social_understanding": social_understanding,
|
97 |
+
"semiotic_analysis": semiotic_analysis
|
98 |
+
}
|
99 |
+
|
100 |
+
def _extract_context_insights(self,
|
101 |
+
consciousness_state: Dict[str, Any],
|
102 |
+
emotional_context: Any,
|
103 |
+
social_understanding: Dict[str, Any],
|
104 |
+
semiotic_analysis: Any) -> str:
|
105 |
+
"""
|
106 |
+
Extrai insights dos estados processados para enriquecer o contexto da resposta.
|
107 |
+
"""
|
108 |
+
insights = []
|
109 |
+
|
110 |
+
# Extrair do estado de consciência
|
111 |
+
if isinstance(consciousness_state, dict):
|
112 |
+
if "emotional_tone" in consciousness_state:
|
113 |
+
emotional_tone = consciousness_state.get("emotional_tone", {})
|
114 |
+
if emotional_tone:
|
115 |
+
insights.append(f"Tone: {json.dumps(emotional_tone)}")
|
116 |
+
|
117 |
+
if "meta_cognition" in consciousness_state:
|
118 |
+
meta = consciousness_state.get("meta_cognition", {})
|
119 |
+
if meta:
|
120 |
+
insights.append(f"Consider: {json.dumps(meta)}")
|
121 |
+
|
122 |
+
# Extrair do contexto emocional
|
123 |
+
if hasattr(emotional_context, "valence"):
|
124 |
+
valence = getattr(emotional_context, "valence", 0)
|
125 |
+
if valence > 0.3:
|
126 |
+
insights.append("Use a positive and supportive tone.")
|
127 |
+
elif valence < -0.3:
|
128 |
+
insights.append("Address potential concerns compassionately.")
|
129 |
+
|
130 |
+
# Extrair do entendimento social
|
131 |
+
if isinstance(social_understanding, dict):
|
132 |
+
beliefs = social_understanding.get("beliefs", {})
|
133 |
+
if beliefs:
|
134 |
+
insights.append(f"Consider beliefs: {json.dumps(beliefs)}")
|
135 |
+
|
136 |
+
# Combinar insights em uma string de contexto
|
137 |
+
if insights:
|
138 |
+
return "Additional context: " + " ".join(insights)
|
139 |
+
return ""
|
140 |
+
|
141 |
+
async def _generate_response_with_lm(self,
|
142 |
+
message: str,
|
143 |
+
system_message: str,
|
144 |
+
context_insights: str,
|
145 |
+
max_tokens: int,
|
146 |
+
temperature: float,
|
147 |
+
top_p: float) -> str:
|
148 |
+
"""
|
149 |
+
Gera uma resposta usando o modelo de linguagem.
|
150 |
+
"""
|
151 |
+
try:
|
152 |
+
# Construir o prompt completo
|
153 |
+
prompt = f"{system_message}\n\n"
|
154 |
+
if context_insights:
|
155 |
+
prompt += f"{context_insights}\n\n"
|
156 |
+
prompt += f"User: {message}\nHIM:"
|
157 |
+
|
158 |
+
# Tokenizar o prompt
|
159 |
+
inputs = self.tokenizer(prompt, return_tensors="pt")
|
160 |
+
|
161 |
+
# Gerar a resposta
|
162 |
+
with torch.no_grad():
|
163 |
+
outputs = self.language_model.generate(
|
164 |
+
inputs["input_ids"],
|
165 |
+
max_length=inputs["input_ids"].shape[1] + max_tokens,
|
166 |
+
temperature=temperature,
|
167 |
+
top_p=top_p,
|
168 |
+
do_sample=True,
|
169 |
+
pad_token_id=self.tokenizer.eos_token_id
|
170 |
+
)
|
171 |
+
|
172 |
+
# Decodificar apenas a parte da resposta que é a resposta gerada pelo modelo
|
173 |
+
full_output = self.tokenizer.decode(outputs[0], skip_special_tokens=True)
|
174 |
+
response = full_output[len(prompt):]
|
175 |
+
|
176 |
+
# Limpar a resposta (remover quebras de linha extras, etc.)
|
177 |
+
response = response.strip()
|
178 |
+
|
179 |
+
return response
|
180 |
+
|
181 |
+
except Exception as e:
|
182 |
+
print(f"Erro ao gerar resposta com o modelo de linguagem: {e}")
|
183 |
+
# Resposta de fallback em caso de erro
|
184 |
+
return f"Desculpe, tive um problema ao processar sua pergunta: '{message}'. Poderia reformulá-la?"
|
185 |
+
|
186 |
+
def _integrate_outputs(self, *states) -> Dict[str, Any]:
|
187 |
+
# Este método é mantido para compatibilidade com código existente
|
188 |
+
return {
|
189 |
+
"response": "Integrated response based on multiple processing layers",
|
190 |
+
"consciousness_state": states[0] if len(states) > 0 else {},
|
191 |
+
"emotional_context": states[1] if len(states) > 1 else {},
|
192 |
+
"social_understanding": states[2] if len(states) > 2 else {},
|
193 |
+
"semiotic_analysis": states[3] if len(states) > 3 else {}
|
194 |
+
>>>>>>> c5fae60 (Initial commit)
|
195 |
}
|