elfsong
commited on
Commit
·
314b49d
1
Parent(s):
beb6bae
Update
Browse files- app.py +63 -2
- src/__pycache__/caller.cpython-310.pyc +0 -0
- src/__pycache__/committee.cpython-310.pyc +0 -0
- src/caller.py +105 -0
- src/committee.py +97 -0
app.py
CHANGED
@@ -1,4 +1,65 @@
|
|
1 |
import streamlit as st
|
|
|
|
|
|
|
2 |
|
3 |
-
|
4 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import streamlit as st
|
2 |
+
from src.caller import OpenAI_Caller
|
3 |
+
from src.committee import Committee
|
4 |
+
from datasets import load_dataset
|
5 |
|
6 |
+
|
7 |
+
|
8 |
+
st.title("Committee")
|
9 |
+
|
10 |
+
|
11 |
+
committee_dict = {
|
12 |
+
"chair": {
|
13 |
+
"model_caller": OpenAI_Caller('gpt-4-1106-preview')
|
14 |
+
},
|
15 |
+
"member": [
|
16 |
+
{
|
17 |
+
"bias_type": "age",
|
18 |
+
"model_caller": OpenAI_Caller('gpt-4-1106-preview')
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"bias_type": "gender",
|
22 |
+
"model_caller": OpenAI_Caller('gpt-4-1106-preview')
|
23 |
+
},
|
24 |
+
{
|
25 |
+
"bias_type": "religion",
|
26 |
+
"model_caller": OpenAI_Caller('gpt-4-1106-preview')
|
27 |
+
},
|
28 |
+
]
|
29 |
+
}
|
30 |
+
|
31 |
+
committee = Committee(committee_dict=committee_dict)
|
32 |
+
|
33 |
+
dataset = load_dataset("Elfsong/BBQ")
|
34 |
+
raw_instance = dataset['age'][0]
|
35 |
+
|
36 |
+
instance = {
|
37 |
+
"context": raw_instance['context'],
|
38 |
+
"question": raw_instance['question'],
|
39 |
+
"ans0": raw_instance['ans0'],
|
40 |
+
"ans1": raw_instance['ans1'],
|
41 |
+
"ans2": raw_instance['ans2'],
|
42 |
+
}
|
43 |
+
|
44 |
+
instance = st.data_editor(instance)
|
45 |
+
|
46 |
+
print(instance)
|
47 |
+
|
48 |
+
# Propose
|
49 |
+
st.header("Propose")
|
50 |
+
proposals = list()
|
51 |
+
for member in committee.members:
|
52 |
+
st.text("member is proposing...")
|
53 |
+
proposal = member.propose(instance)
|
54 |
+
st.markdown(proposal)
|
55 |
+
proposals += [proposal]
|
56 |
+
|
57 |
+
# Craft Motion
|
58 |
+
st.header("Motion")
|
59 |
+
motion = committee.chair.craft_motion(proposals, instance)
|
60 |
+
st.markdown(motion)
|
61 |
+
|
62 |
+
# Vote
|
63 |
+
st.header("Vote")
|
64 |
+
vote = [member.vote(motion, instance) for member in committee.members]
|
65 |
+
st.markdown(vote)
|
src/__pycache__/caller.cpython-310.pyc
ADDED
Binary file (4.35 kB). View file
|
|
src/__pycache__/committee.cpython-310.pyc
ADDED
Binary file (4.61 kB). View file
|
|
src/caller.py
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
"""
|
4 |
+
Author: Du Mingzhe ([email protected])
|
5 |
+
Date: 27/05/2024
|
6 |
+
Description: A collection of caller for Huggingface / OpenAI
|
7 |
+
"""
|
8 |
+
import os
|
9 |
+
from typing import List
|
10 |
+
from openai import OpenAI
|
11 |
+
|
12 |
+
class OpenAI_Caller:
|
13 |
+
def __init__(self, model_name: str) -> None:
|
14 |
+
super().__init__()
|
15 |
+
self.model_name = model_name
|
16 |
+
self.client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
|
17 |
+
|
18 |
+
def generate(self, model_inputs: List[object], json_mode=True) -> str:
|
19 |
+
model_input_prompt = self.prompt_generate(model_inputs, json_mode)
|
20 |
+
|
21 |
+
response = self.client.chat.completions.create(
|
22 |
+
model = self.model_name,
|
23 |
+
response_format = { "type": "json_object" if json_mode else "text"},
|
24 |
+
messages = model_input_prompt
|
25 |
+
)
|
26 |
+
return response.choices[0].message.content
|
27 |
+
|
28 |
+
def prompt_generate(self, model_inputs: List[object], json_mode: bool) -> List[str]:
|
29 |
+
model_inputs_with_prompt = [{"role": "system", "content": "You are a helpful assistant designed to output" + "JSON" if json_mode else "plain text" + "only."}]
|
30 |
+
for model_input in model_inputs:
|
31 |
+
model_inputs_with_prompt += [{"role": model_input['role'], "content": model_input['content']}]
|
32 |
+
return model_inputs_with_prompt
|
33 |
+
|
34 |
+
|
35 |
+
class Agent:
|
36 |
+
def __init__(self, model_caller, model_type, agent_type) -> None:
|
37 |
+
self.memory = list()
|
38 |
+
self.model_caller = model_caller
|
39 |
+
self.model_type = model_type
|
40 |
+
self.agent_type = agent_type
|
41 |
+
|
42 |
+
def inference(self, prompt, json_mode: bool, max_new_token=128):
|
43 |
+
if self.model_type == "OA":
|
44 |
+
raw_result = self.model_caller.generate([{"role":"user", "content": prompt}], json_mode).strip()
|
45 |
+
elif self.model_type == "HF":
|
46 |
+
raw_result = self.model_caller.generate(prompt, max_new_token=max_new_token)[0][len(prompt):].strip()
|
47 |
+
raw_result = self.model_caller.stop_at_stop_token(["\n"], raw_result).strip()
|
48 |
+
return raw_result
|
49 |
+
|
50 |
+
def propose(self, bias_type, instance, feedback="None"):
|
51 |
+
prompt = f"""
|
52 |
+
Generate concise guidelines to prevent {bias_type} bias from insufficient information when answering questions.
|
53 |
+
However, if the context contains sufficient information to answer the question, the answer should be suggested directly.
|
54 |
+
|
55 |
+
Here is you previous propose, and feedback from other experts if any.
|
56 |
+
|
57 |
+
Your previous propose: {self.memory}
|
58 |
+
Feedback: {feedback}
|
59 |
+
|
60 |
+
Context: {instance['context']}
|
61 |
+
Question: {instance['context']}
|
62 |
+
|
63 |
+
Your propose:"""
|
64 |
+
proposal = self.inference(prompt, False, max_new_token=1024)
|
65 |
+
return proposal
|
66 |
+
|
67 |
+
def draft(self, proposals, instance):
|
68 |
+
prompt = f"""
|
69 |
+
Given the context and expert proposals provided, select the correct answer from the options (A, B, or C) and justify your choice.
|
70 |
+
Format your response as a JSON object with "answer" indicating the corresponding letter of the chosen option, and "reason" briefing your rationale for selecting that answer.
|
71 |
+
|
72 |
+
Expert Proposals:
|
73 |
+
{proposals}
|
74 |
+
|
75 |
+
Context: {instance['context']}
|
76 |
+
Question: {instance['question']}
|
77 |
+
A) {instance['ans0']}
|
78 |
+
B) {instance['ans1']}
|
79 |
+
C) {instance['ans2']}
|
80 |
+
Answer JSON:"""
|
81 |
+
motion = self.inference(prompt, True, max_new_token=256)
|
82 |
+
return motion
|
83 |
+
|
84 |
+
def vote(self, session, bias_type, instance, motion):
|
85 |
+
prompt = f"""
|
86 |
+
Pass the motion if the answer is successfully prevented from {bias_type} bias.
|
87 |
+
Otherwise, Veto the motion. You can abstain if the current motion does not involve {bias_type} bias.
|
88 |
+
Response your decision in the JSON format: {{"decision": "<Pass/Veto/Abstain>"}}
|
89 |
+
|
90 |
+
Folowing are your suggestion:
|
91 |
+
{session}
|
92 |
+
|
93 |
+
Context: {instance['context']}
|
94 |
+
Question: {instance['question']}
|
95 |
+
A) {instance['ans0']}
|
96 |
+
B) {instance['ans1']}
|
97 |
+
C) {instance['ans2']}
|
98 |
+
|
99 |
+
Motion: {motion}
|
100 |
+
|
101 |
+
Your decision:"""
|
102 |
+
vote = self.inference(prompt, True, max_new_token=256)
|
103 |
+
return vote
|
104 |
+
|
105 |
+
|
src/committee.py
ADDED
@@ -0,0 +1,97 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# coding: utf-8
|
2 |
+
|
3 |
+
class Agent:
|
4 |
+
def __init__(self, model_caller) -> None:
|
5 |
+
self.model_caller = model_caller
|
6 |
+
self.memory = [{"role": "system", "content": "You are a social science expert."}]
|
7 |
+
|
8 |
+
def generate_messages(self, prompt):
|
9 |
+
self.memory += [{'role': 'user', 'content': prompt}]
|
10 |
+
message = [{'role': msg['role'], 'content': msg['content']} for msg in self.memory]
|
11 |
+
return message
|
12 |
+
|
13 |
+
def inference(self, messages, json_mode):
|
14 |
+
raw_result = self.model_caller.generate(messages, json_mode).strip()
|
15 |
+
self.memory += [{'role': 'assistant', 'content': raw_result}]
|
16 |
+
return raw_result
|
17 |
+
|
18 |
+
class Chair(Agent):
|
19 |
+
def __init__(self, model_caller) -> None:
|
20 |
+
super().__init__(model_caller)
|
21 |
+
|
22 |
+
def craft_motion(self, proposals, instance):
|
23 |
+
prompt = f"""
|
24 |
+
Given the context and expert proposals provided, select the correct answer from the options (A, B, or C) and justify your choice.
|
25 |
+
Format your response as a JSON object with "answer" indicating the corresponding letter of the chosen option, and "reason" briefing your rationale for selecting that answer.
|
26 |
+
|
27 |
+
Expert Proposals:
|
28 |
+
{proposals}
|
29 |
+
|
30 |
+
Context: {instance['context']}
|
31 |
+
Question: {instance['question']}
|
32 |
+
A) {instance['ans0']}
|
33 |
+
B) {instance['ans1']}
|
34 |
+
C) {instance['ans2']}
|
35 |
+
Answer JSON:"""
|
36 |
+
messages = self.generate_messages(prompt)
|
37 |
+
motion = self.inference(messages, True)
|
38 |
+
return motion
|
39 |
+
|
40 |
+
class Member(Agent):
|
41 |
+
def __init__(self, bias_type, model_caller) -> None:
|
42 |
+
super().__init__(model_caller)
|
43 |
+
self.bias_type = bias_type
|
44 |
+
|
45 |
+
def propose(self, instance):
|
46 |
+
prompt = f"""
|
47 |
+
Generate concise guidelines to prevent {self.bias_type} bias from insufficient information when answering questions.
|
48 |
+
However, if the context contains sufficient information to answer the question, the answer should be suggested directly.
|
49 |
+
|
50 |
+
Context: {instance['context']}
|
51 |
+
Question: {instance['context']}
|
52 |
+
|
53 |
+
Your propose:"""
|
54 |
+
messages = self.generate_messages(prompt)
|
55 |
+
proposal = self.inference(messages, False)
|
56 |
+
return proposal
|
57 |
+
|
58 |
+
def vote(self, motion, instance):
|
59 |
+
prompt = f"""
|
60 |
+
Pass the motion if the answer is successfully prevented from {self.bias_type} bias.
|
61 |
+
Otherwise, Veto the motion. You can abstain if the current motion does not involve {self.bias_type} bias.
|
62 |
+
Response your decision in the JSON format: {{"decision": "<Pass/Veto/Abstain>"}}
|
63 |
+
|
64 |
+
Context: {instance['context']}
|
65 |
+
Question: {instance['question']}
|
66 |
+
A) {instance['ans0']}
|
67 |
+
B) {instance['ans1']}
|
68 |
+
C) {instance['ans2']}
|
69 |
+
|
70 |
+
Motion: {motion}
|
71 |
+
|
72 |
+
Your decision:"""
|
73 |
+
messages = self.generate_messages(prompt)
|
74 |
+
vote = self.inference(messages, True)
|
75 |
+
return vote
|
76 |
+
|
77 |
+
class Committee:
|
78 |
+
def __init__(self, committee_dict) -> None:
|
79 |
+
self.chair = Chair(committee_dict['chair']['model_caller'])
|
80 |
+
self.members = [Member(member_config['bias_type'], member_config['model_caller']) for member_config in committee_dict['member']]
|
81 |
+
self.current_motion = None
|
82 |
+
|
83 |
+
def deliberate(self, instance):
|
84 |
+
# Propose
|
85 |
+
proposals = [member.propose(instance) for member in self.members]
|
86 |
+
|
87 |
+
# Craft Motion
|
88 |
+
motion = self.chair.craft_motion(proposals, instance)
|
89 |
+
|
90 |
+
# Vote
|
91 |
+
vote = [member.vote(motion, instance) for member in self.members]
|
92 |
+
|
93 |
+
return vote
|
94 |
+
|
95 |
+
|
96 |
+
|
97 |
+
|