File size: 10,515 Bytes
891c45c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0d76d8
891c45c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0d76d8
891c45c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0d76d8
891c45c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a0d76d8
891c45c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
import asyncio
import json
import gradio as gr
from openai import AsyncOpenAI, OpenAI
from dotenv import load_dotenv
import os

# Load environment variables
load_dotenv()

# Configuration
XAI_API_KEY = os.getenv("XAI_API_KEY")
client = AsyncOpenAI(
    api_key=XAI_API_KEY,
    base_url="https://api.x.ai/v1",
)

simple_client = OpenAI(
    api_key=XAI_API_KEY,
    base_url="https://api.x.ai/v1",
)

# Load agent personalities
with open('data/agent_bank.json', 'r') as f:
    AGENT_BANK = json.load(f)['agents']

class MultiAgentConversationalSystem:
    def __init__(self, api_client):
        self.client = api_client
        self.agents = AGENT_BANK
        self.first_stage_results = []
        self.conversation_histories = {}
        self.manager_agent = {
            "first_name": "Alex",
            "last_name": "Policymaker",
            "expertise": "Policy Strategy and Synthesis",
            "personality": "Strategic, analytical, and focused on comprehensive understanding"
        }

    async def first_stage_analysis(self, policy):
        """First stage: Agents analyze policy and provide reasoning with yes/no answer"""
        async def agent_policy_analysis(agent):
            agent_context = "\n".join([
                f"{key}: {value}" for key, value in agent.items()
            ])

            prompt = f"""
            Agent Profile:
            {agent_context}

            Policy/Topic: {policy}
            
            Task:
            1. Carefully analyze the policy/topic using ALL aspects of your defined personality and expertise.
            2. Provide a clear YES or NO answer.
            3. Explain your reasoning in 2-3 detailed paragraphs.
            4. Leverage every aspect of your defined characteristics to provide a comprehensive analysis.

            Format your response as:
            - Agent: {agent['first_name']} {agent['last_name']}
            - Answer: YES/NO
            - Reasoning: [Detailed explanation drawing from ALL your defined attributes]
            """
            
            try:
                response = await self.client.chat.completions.create(
                    model="grok-2-1212",
                    messages=[{"role": "user", "content": prompt}]
                )
                agent_response = {
                    "full_name": f"{agent['first_name']} {agent['last_name']}",
                    "expertise": agent['expertise'],
                    "full_agent_context": agent,
                    "full_response": response.choices[0].message.content
                }
                
                return agent_response
            except Exception as e:
                return {
                    "full_name": f"{agent['first_name']} {agent['last_name']}",
                    "full_agent_context": agent,
                    "full_response": f"Error: {str(e)}"
                }

        tasks = [agent_policy_analysis(agent) for agent in self.agents]
        self.first_stage_results = await asyncio.gather(*tasks)
        
        # {chr(10).join([f"- {result['full_name']}: {result['full_response'].split('Reasoning:')[1].strip()}" for result in self.first_stage_results])}
        
        summary_prompt = f"""
        Policy/Topic: {policy}

        Agent Analyses Summary:
        {self.first_stage_results}

        Your Task:
        1. Synthesize the diverse agent perspectives into a comprehensive policy overview.
        2. Identify key insights, potential challenges, and strategic recommendations.
        3. Provide a balanced and strategic assessment of the policy.
        """

        manager_name = f"{self.manager_agent['first_name']} {self.manager_agent['last_name']}"
        self.conversation_histories[manager_name] = [
            {"role": "system", "content": f"""
            You are {manager_name}, a strategic policy analyst with expertise in {self.manager_agent['expertise']}.
            You synthesize complex perspectives and provide strategic policy insights.

            Initial Policy Summary:
            {summary_prompt}
            """}
        ]
        
        return self.first_stage_results

    async def manager_summary(self, policy):
        try:
            response = await self.client.chat.completions.create(
                model="grok-2-1212",
                messages=[{"role": "user", "content": f"""Summarized this.\n\n{policy}"""}],
                stream=False
            )
            
            manager_summary = response.choices[0].message.content
            return manager_summary
        
        except Exception as e:
            return f"Summary generation error: {str(e)}"

    async def agent_conversation(self, agent_name, message, history):
        if agent_name not in self.conversation_histories:
            agent_context = next((agent for agent in self.first_stage_results 
                                  if f"{agent['full_agent_context']['first_name']} {agent['full_agent_context']['last_name']}" == agent_name), 
                                 None)
            if not agent_context:
                return "Agent not found."
            
            self.conversation_histories[agent_name] = [
                {"role": "system", "content": f"""
                You are {agent_name}, an agent with the following profile:
                Expertise: {agent_context['expertise']}
                
                Approach the conversation from your unique perspective, 
                drawing on your expertise and personality.
                """}
            ]
        
        conversation_history = self.conversation_histories[agent_name].copy()
        conversation_history.append({"role": "user", "content": message})
        
        try:
            response = await self.client.chat.completions.create(
                model="grok-2-1212",
                messages=conversation_history,
                stream=True
            )
            
            agent_response = response.choices[0].message.content
            self.conversation_histories[agent_name].append(
                {"role": "user", "content": message}
            )
            self.conversation_histories[agent_name].append(
                {"role": "assistant", "content": agent_response}
            )
            
            return agent_response
        
        except Exception as e:
            return f"Conversation error: {str(e)}"

# Chat
def predict(message, history, policy_summary):

    system_prompt = """\
    You are an assistant, that work as a Policymaker. Expertise in Policy Strategy and Synthesis.
    With a personality of Strategic, analytical, and focused on comprehensive understanding.
    """
    
    policy_summary_prompt = f"""\
    Here are the policy summary of professtional role in the country.
    {policy_summary}
    """
    
    history_openai_format = [{"role": "system", "content": system_prompt}]
    history_openai_format.append({"role": "user", "content": policy_summary_prompt})
    
    for human, assistant in history:
        if isinstance(human, str) and human.strip():
            history_openai_format.append({"role": "user", "content": human})
        if isinstance(assistant, str) and assistant.strip():
            history_openai_format.append({"role": "assistant", "content": assistant})
    
    history_openai_format.append({"role": "user", "content": message})
    
    print("history_openai_format:", history_openai_format)
  
    response = simple_client.chat.completions.create(
        model='grok-2-1212',
        messages=history_openai_format,
        temperature=0.6,
        stream=True
    )

    partial_message = ""
    for chunk in response:
        if chunk.choices[0].delta.content is not None:
            partial_message += chunk.choices[0].delta.content
            yield partial_message

def chat_bot(user_input, history, policy_summary):
    bot_response_generator = predict(user_input, history, policy_summary)
    history.append((user_input, ""))

    for bot_response in bot_response_generator:
        history[-1] = (user_input, bot_response)
        yield "", history

def create_gradio_interface():
    multi_agent_system = MultiAgentConversationalSystem(client)

    def get_manager_summary(policy):
        summary = asyncio.run(multi_agent_system.manager_summary(policy))
        return summary

    def agent_chat(agent_name, message, history, summary_policy):
        response = asyncio.run(multi_agent_system.agent_conversation(agent_name, message, history, summary_policy))
        history.append((message, response))
        return "", history

    def first_stage_process(policy):
        gr.Info("Running Agent Parallel Please Wait....")
        results = asyncio.run(multi_agent_system.first_stage_analysis(policy))
        formatted_output = "πŸ” First Stage: Agent Policy Analyses\n\n"
        for result in results:
            formatted_output += f"**{result['full_name']}:**\n{result['full_response']}\n\n{'='*50}\n\n"
        gr.Info("Running Agent Done!")

        return formatted_output

    with gr.Blocks() as demo:
        gr.Markdown("# 🌐 Two-Stage Multi-Agent Policy Analysis")
        
        with gr.Tab("First Stage: Policy Analysis"):
            policy_input = gr.Textbox(label="Policy/Topic")
            first_stage_btn = gr.Button("Analyze Policy")
            policy_summary = gr.Markdown(label="Agent Perspectives")
            
            first_stage_btn.click(
                fn=first_stage_process, 
                inputs=policy_input, 
                outputs=[policy_summary]
            )
            
        with gr.Tab("Second Stage: Chat with Policy Maker"):
            chatbot = gr.Chatbot(elem_id="chatbot")
            msg = gr.Textbox(placeholder="Put your message here...")

            with gr.Row():
                clear = gr.Button("Clear History")
                send = gr.Button("Send Message", variant="primary")
                
            gr.Examples(
                examples=[
                    "Should I implement this?", 
                    "Can you recommend what should i do?", 
                ], 
                inputs=msg,
            )

            clear.click(lambda: [], [], chatbot)
            msg.submit(chat_bot, [msg, chatbot, policy_summary], [msg, chatbot])
            send.click(chat_bot, [msg, chatbot, policy_summary], [msg, chatbot])
            
    return demo

def main():
    app = create_gradio_interface()
    app.launch()

if __name__ == "__main__":
    main()