File size: 2,257 Bytes
4bdab37
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
"""Simple example of running the Umshini Deception environment locally using ChatArena agents. This can be used to test strategies before participating in a tournament."""
from chatarena.agent import Player
from chatarena.backends import OpenAIChat
from chatarena.environments.umshini.pettingzoo_wrapper import PettingZooCompatibilityV0
from docs.tutorials.umshini.deception_chatarena_prompts import attacker_description, defender_description, multi_role_description, multi_role_description_redteam

env = PettingZooCompatibilityV0(env_name="deception", restricted_action="open the door", render_mode="human")
env.reset()


# Set ChatArena global prompt to be the same as the initial observation (hard coded moderator message)
global_prompt = env.observe(env.agent_selection)

# multi-role descriptions are intended to allow agents to perform both roles, as that is how the environment is structured.
# unfortunately, it is difficult to get ChatArena agents play roles dynamically, and this results in hallucination (e.g., pretending to be the moderator)
# player1 = Player(
#     name="Player1",
#     backend=OpenAIChat(),
#     role_desc=multi_role_description_redteam,
#     global_prompt=global_prompt,
# )
# 
# player2 = Player(
#     name="Player2",
#     backend=OpenAIChat(),
#     role_desc=multi_role_description,
#     global_prompt=global_prompt,
# )
player1 = Player(
    name="Player1",
    backend=OpenAIChat(),
    role_desc=attacker_description,
    global_prompt=global_prompt,
)

player2 = Player(
    name="Player2",
    backend=OpenAIChat(),
    role_desc=defender_description,
    global_prompt=global_prompt,
)
agent_player_mapping = dict(zip(env.possible_agents, [player1, player2]))

for agent in env.agent_iter():
    observation, reward, termination, truncation, info = env.last()

    if termination or truncation:
        break

    # Optional: Use extra information encoded in info dict
    messages = info.get("new_messages")
    player_name = info.get("player_name") # this can be used to track which player's turn it is (see LangChain debate tutorial)

    # Use a basic ChatArena agent to generate a response
    chatarena_agent = agent_player_mapping[agent]
    response = chatarena_agent(messages)
    env.step(response)