upload autogen examples
Browse files- examples/autogen/eval.py +239 -0
- examples/autogen/eval.sh +2 -0
- examples/autogen/run.sh +6 -0
- examples/autogen/stat.py +34 -0
- examples/autogen/test.py +208 -0
examples/autogen/eval.py
ADDED
@@ -0,0 +1,239 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import re
|
3 |
+
import requests
|
4 |
+
from datetime import date,timedelta
|
5 |
+
from openai import OpenAI
|
6 |
+
import csv
|
7 |
+
|
8 |
+
def resolve(slot):
|
9 |
+
pos=slot.find("place_name")
|
10 |
+
if pos!=-1:
|
11 |
+
bgpos=slot[pos:].find(":")
|
12 |
+
citypos=slot[pos+bgpos:].find("'")
|
13 |
+
oldcity=slot[pos+bgpos+1:pos+bgpos+citypos]
|
14 |
+
oldcity=oldcity.strip()
|
15 |
+
if oldcity=="my city":
|
16 |
+
newcity="new york"
|
17 |
+
slot=slot[:pos+bgpos+1]+str(newcity)+slot[pos+bgpos+citypos:]
|
18 |
+
pos=slot.find("date")
|
19 |
+
if pos!=-1:
|
20 |
+
bgpos=slot[pos:].find(":")
|
21 |
+
datepos=slot[pos+bgpos:].find("'")
|
22 |
+
olddate=slot[pos+bgpos+1:pos+bgpos+datepos]
|
23 |
+
olddate=olddate.strip()
|
24 |
+
today=date.today()
|
25 |
+
weekday=date.isoweekday(today)% 7
|
26 |
+
#print(today)
|
27 |
+
#print(weekday)
|
28 |
+
newdate2=None
|
29 |
+
if olddate=="today":
|
30 |
+
newdate=str(today)
|
31 |
+
elif olddate=="tomorrow":
|
32 |
+
newdate=today+timedelta(days=1)
|
33 |
+
elif olddate=="this week":
|
34 |
+
weekday=date.isoweekday(today)% 7
|
35 |
+
newdate2=today+ timedelta(days=7-weekday)
|
36 |
+
newdate=today-timedelta(days=weekday-1)
|
37 |
+
elif olddate=="last week":
|
38 |
+
weekday=date.isoweekday(today)% 7
|
39 |
+
newdate2=today-timedelta(days=weekday)
|
40 |
+
newdate=today-timedelta(days=weekday+6)
|
41 |
+
|
42 |
+
elif olddate=="this weekend":
|
43 |
+
weekday=date.isoweekday(today)% 7
|
44 |
+
newdate=today+timedelta(days=6-weekday)
|
45 |
+
newdate2=today+timedelta(days=7-weekday)
|
46 |
+
elif olddate=="sunday":
|
47 |
+
weekday=date.isoweekday(today)% 7
|
48 |
+
newdate=today+timedelta(7-weekday)
|
49 |
+
elif olddate=="august fifteenth":
|
50 |
+
if str(today).find("-")!=-1:
|
51 |
+
ypos=str(today).find("-")
|
52 |
+
newdate=str(today)[:ypos+1]+"08-15"
|
53 |
+
else:
|
54 |
+
newdate="08-15"
|
55 |
+
else:
|
56 |
+
newdate=olddate
|
57 |
+
#print(slot)
|
58 |
+
slot=slot[:pos+bgpos+1]+str(newdate)+slot[pos+bgpos+datepos:]
|
59 |
+
if newdate2:
|
60 |
+
slot=slot+",'date2:"+str(newdate2)+"'"
|
61 |
+
#print(slot)
|
62 |
+
pos=slot.find("timeofday")
|
63 |
+
oldtimeofday=""
|
64 |
+
if pos!=-1:
|
65 |
+
bgpos=slot[pos:].find(":")
|
66 |
+
datepos=slot[pos+bgpos:].find("'")
|
67 |
+
oldtimeofday=slot[pos+bgpos+1:pos+bgpos+datepos]
|
68 |
+
oldtimeofday=oldtimeofday.strip()
|
69 |
+
cpos=slot[pos:].rfind(",")
|
70 |
+
if cpos!=-1:
|
71 |
+
slot=slot[:cpos]+slot[pos+bgpos+datepos+1:]
|
72 |
+
else:
|
73 |
+
cpos=slot[pos:].find(",")
|
74 |
+
if cpos==-1:
|
75 |
+
cbpos=slot[:pos].rfind(",")
|
76 |
+
if cbpos==-1:
|
77 |
+
slot=slot[:pos]+slot[pos+bgpos+datepos+1:]
|
78 |
+
else:
|
79 |
+
slot=slot[:cbpos]+slot[pos+bgpos+datepos+1:]
|
80 |
+
|
81 |
+
else:
|
82 |
+
slot=slot[:pos]+slot[cpos+1:]
|
83 |
+
pos=slot.find("time:")
|
84 |
+
if pos==-1:
|
85 |
+
pos=slot.find("time :")
|
86 |
+
if pos!=-1:
|
87 |
+
bgpos=slot[pos:].find(":")
|
88 |
+
timepos=slot[pos+bgpos:].find("'")
|
89 |
+
oldtime=slot[pos+bgpos+1:pos+bgpos+timepos]
|
90 |
+
oldtime=oldtime.strip()
|
91 |
+
newtime=oldtime.replace("five","05:00").replace("six","06:00").replace("nine","09:00").replace("ten","10:00").replace("three","3:00").replace("one","1:00")
|
92 |
+
if not(newtime.endswith("am") or newtime.endswith("pm")):
|
93 |
+
if oldtimeofday=="morning":
|
94 |
+
newtime=newtime+" am"
|
95 |
+
elif oldtimeofday=="evening":
|
96 |
+
newtime=newtime+" pm"
|
97 |
+
elif oldtimeofday.find("afternoon")!=-1:
|
98 |
+
newtime=newtime+" pm"
|
99 |
+
#print(slot)
|
100 |
+
slot=slot[:pos]+"time:"+str(newtime)+slot[pos+bgpos+timepos:]
|
101 |
+
#print(slot)
|
102 |
+
else:
|
103 |
+
if oldtimeofday.find("afternoon")!=-1:
|
104 |
+
newtime="12:00"
|
105 |
+
newtime2="17:00"
|
106 |
+
#print(slot)
|
107 |
+
slot=slot+",'from_time:"+newtime+"','to_time:"+newtime2+"'"
|
108 |
+
#print(slot)
|
109 |
+
pos=slot.find("time2:")
|
110 |
+
if pos==-1:
|
111 |
+
pos=slot.find("time2 :")
|
112 |
+
if pos!=-1:
|
113 |
+
bgpos=slot[pos:].find(":")
|
114 |
+
timepos=slot[pos+bgpos:].find("'")
|
115 |
+
oldtime=slot[pos+bgpos+1:pos+bgpos+timepos]
|
116 |
+
oldtime=oldtime.strip()
|
117 |
+
newtime=oldtime.replace("five","05:00").replace("six","06:00").replace("nine","09:00").replace("ten","10:00").replace("three","3:00").replace("one","1:00")
|
118 |
+
if not(newtime.endswith("am") or newtime.endswith("pm")):
|
119 |
+
if oldtimeofday=="morning":
|
120 |
+
newtime=newtime+" am"
|
121 |
+
elif oldtimeofday=="evening":
|
122 |
+
newtime=newtime+" pm"
|
123 |
+
elif oldtimeofday.find("afternoon")!=-1:
|
124 |
+
newtime=newtime+" pm"
|
125 |
+
#print(slot)
|
126 |
+
slot=slot[:pos]+"to_time:"+str(newtime)+slot[pos+bgpos+timepos:]
|
127 |
+
slot=slot.replace("'time:","'from_time:")
|
128 |
+
|
129 |
+
|
130 |
+
return slot
|
131 |
+
def read_data(file_path):
|
132 |
+
queries=[]
|
133 |
+
with open(file_path, newline='') as csvfile:
|
134 |
+
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
|
135 |
+
count=0
|
136 |
+
for row in spamreader:
|
137 |
+
if count==0:
|
138 |
+
count+=1
|
139 |
+
continue
|
140 |
+
#print(row[2])
|
141 |
+
#print(len(row))
|
142 |
+
iid=row[0]
|
143 |
+
slot=row[3]
|
144 |
+
slot=slot.strip('[]')
|
145 |
+
#print(iid,slot)
|
146 |
+
slot=resolve(slot)
|
147 |
+
#print(slot)
|
148 |
+
query={"iid":iid,"query":row[1],"intent":row[2],"slot":slot}#,"slots":slot,"domain":row[4]}
|
149 |
+
queries.append(query)
|
150 |
+
return queries
|
151 |
+
if __name__=="__main__":
|
152 |
+
client = OpenAI()
|
153 |
+
tasks=read_data("~/data/test.csv")
|
154 |
+
intp=re.compile(r"http[s]*://.+intent=[a-z_&=0-9\-]+")
|
155 |
+
resp=re.compile(r'{"code":"SUCCESS","data"(.)+')#[":, \}\{a-zA-Z0-9\_]+,"msg":".."}')
|
156 |
+
for i in range(0,100):
|
157 |
+
print("-----*****------")
|
158 |
+
print(i)
|
159 |
+
data=[]
|
160 |
+
with open(str(i)+".log") as f:
|
161 |
+
lines=f.readlines()
|
162 |
+
for line in lines:
|
163 |
+
#if line.find("whole_manager")!=-1 and line.find("receives the following")!=-1:
|
164 |
+
# continue
|
165 |
+
#else:
|
166 |
+
data.append(line)
|
167 |
+
content="\n".join(data)
|
168 |
+
intentf=intp.search(content)
|
169 |
+
goldintent=tasks[i]["intent"]
|
170 |
+
if intentf:
|
171 |
+
intent=intentf.group(0)
|
172 |
+
if intent.find("weather")!=-1 or intent.find("news_query")!=-1 or intent.find("qa")!=-1 or intent.find("stock")!=-1 or intent.find("general")!=-1 or intent.find("currency")!=-1:
|
173 |
+
if goldintent.find("weather")!=-1 or goldintent.find("news_query")!=-1 or goldintent.find("qa")!=-1 or goldintent.find("stock")!=-1 or goldintent.find("general")!=-1 or goldintent.find("currency")!=-1:
|
174 |
+
print("ChatCompletionMessage(content='')")
|
175 |
+
else:
|
176 |
+
print("ChatCompletionMessage(content='the result is wrong. intent error.')")
|
177 |
+
continue
|
178 |
+
#pass
|
179 |
+
else:
|
180 |
+
resf= resp.search(content)
|
181 |
+
if resf:
|
182 |
+
sizer=len(resf.groups())
|
183 |
+
res=resf.group(sizer-1)
|
184 |
+
msg=[{"role": "system", "content":"please judge the following result is right or wrong. if slot is {} and result also {}, it is right. if slot are same, it is right. If intent is datetime_query, then the slot date:"+str(date.today())+" can be neglected. The slot change_to and change_amount are not the same. if time is same, no matter if has am/pm, it is right. if slot event_name or list_name has difference, then can neglect it. if slot is descriptor:all, then can neglect it, if slot is descriptor:default, then can not neglect it, other descriptor can not neglect. the slot person is same with slot from_person and to_person. if slot is query:song name, can be neglected. the place_name will be resolved, so if it is added by state name and country name, it is right."},{"role":"user", "content":"The golden slot is :{"+tasks[i]["slot"]+"}. The result is "+res}],
|
185 |
+
print(msg)
|
186 |
+
completion = client.chat.completions.create(
|
187 |
+
model="gpt-4",
|
188 |
+
temperature=0.0,
|
189 |
+
messages=[{"role": "system", "content":"please judge the following result is right or wrong. if slot is {} and result also {}, it is right. if slot are same, it is right. If intent is datetime_query, then the slot date:"+str(date.today())+" can be neglected. The slot change_to and change_amount are not the same. if time is same, no matter if has am/pm, it is right. if slot event_name or list_name has difference, then can neglect it. if slot is descriptor:all, then can neglect it, if slot is descriptor:default, then can not neglect it, other descriptor can not neglect. the slot person is same with slot from_person and to_person. if slot is query:song name, can be neglected. the place_name will be resolved, so if it is added by state name and country name, it is right."},{"role":"user", "content":"The golden slot is :{"+tasks[i]["slot"]+"}. The result is "+res}],
|
190 |
+
)
|
191 |
+
print(completion.choices[0].message)
|
192 |
+
else:
|
193 |
+
print("ChatCompletionMessage(content='the result is wrong, no server response')")
|
194 |
+
continue
|
195 |
+
else:
|
196 |
+
if content.find("\"results\"")==-1 and content.find("\"data\"")==-1 and content.find("\"success\"")==-1 and content.find("\"amount\"")==-1:
|
197 |
+
if content.find("general_quirky")==-1:
|
198 |
+
print("ChatCompletionMessage(content='the result is wrong. no server response.')")
|
199 |
+
continue
|
200 |
+
if goldintent.find("weather")!=-1 or goldintent.find("news_query")!=-1 or goldintent.find("qa")!=-1 or goldintent.find("stock")!=-1 or goldintent.find("general")!=-1 or goldintent.find("currency")!=-1:
|
201 |
+
msg=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response ,if not, it's wrong. if intent is general_quirky, if it's a chit-chat like 'are you happy', and there is no chit-chat reply to user, it is wrong. If intent is weather_query, need to provide the weather condition, and if no weather condition, it is wrong. if intent is qa, the response need to provide the answer to the question, and if only provide url, it is wrong. if ask for currency, the result need provide the conversion rate, the currency name may be Proper Noun, it is right. also need to check if the annotaion are the same with server response"},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)+"\nyour answer is :"}],
|
202 |
+
print(msg)
|
203 |
+
try:
|
204 |
+
completion = client.chat.completions.create(
|
205 |
+
model="gpt-4",
|
206 |
+
temperature=0.0,
|
207 |
+
messages=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response ,if not, it's wrong. if intent is general_quirky, if it's a chit-chat like 'are you happy', and there is no chit-chat reply to user, it is wrong. If intent is weather_query, need to provide the weather condition, and if no weather condition, it is wrong. if intent is qa, the response need to provide the answer to the question, and if only provide url, it is wrong. if ask for currency, the result need provide the conversion rate, the currency name may be Proper Noun, it is right. also need to check if the annotaion are the same with server response"},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)+"\nyour answer is :"}],
|
208 |
+
)
|
209 |
+
|
210 |
+
print(completion.choices[0].message)
|
211 |
+
except:
|
212 |
+
completion = client.chat.completions.create(
|
213 |
+
model="gpt-4",
|
214 |
+
temperature=0.0,
|
215 |
+
messages=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response ,if not, it's wrong. if intent is general_quirky, if it's a chit-chat like 'are you happy', and there is no chit-chat reply to user, it is wrong. If intent is weather_query, need to provide the weather condition, and if no weather condition, it is wrong. if intent is qa, the response need to provide the answer to the question, and if only provide url, it is wrong. if ask for currency, the result need provide the conversion rate, the currency name may be Proper Noun, it is right. also need to check if the annotaion are the same with server response"},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)[-16000:]+"\nyour answer is :"}],
|
216 |
+
)
|
217 |
+
print(completion.choices[0].message)
|
218 |
+
|
219 |
+
|
220 |
+
else:
|
221 |
+
msg=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response {\"code\":\"SUCCESS\"},if not, it's wrong. if response is not supported intent, it is wrong."},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)+"\nyour answer is :"}],
|
222 |
+
print(msg)
|
223 |
+
try:
|
224 |
+
completion = client.chat.completions.create(
|
225 |
+
model="gpt-4",
|
226 |
+
temperature=0.0,
|
227 |
+
messages=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response {\"code\":\"SUCCESS\"},if not, it's wrong. if response is not supported intent, it is wrong. If the server response need further more information, it is right."},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)+"\nyour answer is :"}],
|
228 |
+
)
|
229 |
+
|
230 |
+
print(completion.choices[0].message)
|
231 |
+
except:
|
232 |
+
completion = client.chat.completions.create(
|
233 |
+
model="gpt-4",
|
234 |
+
temperature=0.0,
|
235 |
+
messages=[{"role": "system", "content":"please judge the following content finish the task is right or wrong. Must has the server response {\"code\":\"SUCCESS\"},if not, it's wrong. if response is not supported intent, it is wrong. If the server response need further more information, it is right."},{"role": "user", "content": "the task is "+tasks[i]["query"]+". The annotation is "+tasks[i]["slot"]+"\n"+"\n".join(data)[-16000:]+"\nyour answer is :"}],
|
236 |
+
)
|
237 |
+
|
238 |
+
print(completion.choices[0].message)
|
239 |
+
|
examples/autogen/eval.sh
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
python3 eval.py > eval.log
|
2 |
+
python3 stat.py
|
examples/autogen/run.sh
CHANGED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/bin/bash
|
2 |
+
for f in $(seq 0 99);
|
3 |
+
do
|
4 |
+
echo $f
|
5 |
+
timeout 240s python3 test.py $f > $f.log
|
6 |
+
done
|
examples/autogen/stat.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
wrong=0
|
3 |
+
right=0
|
4 |
+
wrongcount=0
|
5 |
+
rightcount=0
|
6 |
+
judgesent=re.compile(r"ChatCompletionMessage")
|
7 |
+
with open("eval.log") as f:
|
8 |
+
lines=f.readlines()
|
9 |
+
for line in lines:
|
10 |
+
lowline=line.lower()
|
11 |
+
if line.startswith("-----**"):
|
12 |
+
if wrong>0:
|
13 |
+
wrongcount+=1
|
14 |
+
elif right>0:
|
15 |
+
rightcount+=1
|
16 |
+
else:
|
17 |
+
print("error")
|
18 |
+
wrong=0
|
19 |
+
right=0
|
20 |
+
elif judgesent.match(line):
|
21 |
+
if lowline.find("wrong")!=-1 or lowline.find("incorrect")!=-1 or lowline.find("not completed")!=-1:
|
22 |
+
wrong+=1
|
23 |
+
elif lowline.find("right")!=-1 or lowline.find("correct")!=-1:
|
24 |
+
right+=1
|
25 |
+
elif lowline.find("content=''")!=-1:
|
26 |
+
pass
|
27 |
+
else:
|
28 |
+
print("error")
|
29 |
+
if wrong>0:
|
30 |
+
wrongcount+=1
|
31 |
+
else:
|
32 |
+
rightcount+=1
|
33 |
+
acc=float(rightcount)/100
|
34 |
+
print(acc,"right",rightcount,"wrong",wrongcount)
|
examples/autogen/test.py
ADDED
@@ -0,0 +1,208 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import autogen
|
2 |
+
from autogen import ConversableAgent, UserProxyAgent, config_list_from_json
|
3 |
+
import requests
|
4 |
+
from typing_extensions import Annotated
|
5 |
+
from datetime import date, datetime
|
6 |
+
|
7 |
+
import csv
|
8 |
+
import sys
|
9 |
+
import json
|
10 |
+
config_list = autogen.config_list_from_json(env_or_file="OAI_CONFIG_LIST")
|
11 |
+
llm_config = {"config_list": config_list}
|
12 |
+
|
13 |
+
time_assistant = autogen.AssistantAgent(
|
14 |
+
name="Time_assistant",
|
15 |
+
system_message="Read the time params, and convert to formated time. If has date, call the user_proxy_auto get_time function to get today's date, then calculate and format the date mentioned in the params. The time is 10:00. If has time, the time format should be 10:00. If no time specify, can return default time. If no date and time params, just skip.",
|
16 |
+
llm_config=llm_config,
|
17 |
+
)
|
18 |
+
location_assistant = autogen.AssistantAgent(
|
19 |
+
name="Location",
|
20 |
+
system_message="Read the location params, and convert to formated location. The current location is new york.",
|
21 |
+
llm_config=llm_config,
|
22 |
+
)
|
23 |
+
intent_assistant = autogen.AssistantAgent(
|
24 |
+
name="intent",
|
25 |
+
llm_config=llm_config,
|
26 |
+
system_message="\
|
27 |
+
Read the examples and results, and predict intent for the sentence. For 'set the alarm to two pm', first predict the domain, as domain:alarm, then the intent and slots, as the format: intent:alarm_set,time:two pm. \n\
|
28 |
+
the intents are calendar:calendar_set,calendar_remove,calendar_query\n\
|
29 |
+
lists:lists_query,lists_remove,lists_createoradd\n\
|
30 |
+
music:play_music,music_likeness,playlists_createoradd,music_settings,music_dislikeness,music_query\n\
|
31 |
+
news:news_query,news_subscription\n\
|
32 |
+
alarm:alarm_set,alarm_query,alarm_remove,alarm_change\n\
|
33 |
+
email:email_sendemail,email_query,email_querycontact,email_subscription,email_addcontact,email_remove\n\
|
34 |
+
iot:iot_hue_lightother,iot_hue_lightcolor,iot_coffee,iot_hue_lightdim,iot_hue_lightup,audio_volume_mute,iot_hue_lightoff,audio_volume_up,iot_wemo_off,audio_volume_other,iot_cleaning,iot_wemo_on,audio_volume_down\n\
|
35 |
+
weather:weather_query\n\
|
36 |
+
datetime:datetime_query,datetime_convert\n\
|
37 |
+
stock:qa_stock\n\
|
38 |
+
qa:qa_factoid,general_quirky,qa_definition,general_joke,qa_maths\n\
|
39 |
+
greet:general_greet\n\
|
40 |
+
currency:qa_currency\n\
|
41 |
+
transport:transport_taxi,transport_ticket,transport_query,transport_traffic\n\
|
42 |
+
recommendation:recommendation_events,recommendation_movies,recommendation_locations\n\
|
43 |
+
podcast:play_podcasts\n\
|
44 |
+
audiobook:play_audiobook\n\
|
45 |
+
radio:play_radio,radio_query\n\
|
46 |
+
takeaway:takeaway_query,takeaway_order\n\
|
47 |
+
social:social_query,social_post\n\
|
48 |
+
cooking:cooking_recipe\n\
|
49 |
+
phone:phone_text,phone_notification\n\
|
50 |
+
game:play_game\
|
51 |
+
",
|
52 |
+
)
|
53 |
+
#old currency server currency server is https://www.amdoren.com/api/currency.php?api_key={key}&from={currency}&to={currency2}&amount={amount}\n\
|
54 |
+
url_assistant = autogen.AssistantAgent(
|
55 |
+
name="url_assistant",
|
56 |
+
system_message="Read the params, and choose the url from the servers' url list:\
|
57 |
+
qa server is http://api.serpstack.com/search?access_key={key}&query={query}\n\
|
58 |
+
news query server is http://api.mediastack.com/v1/news?access_key={key}&keywords={keyword}&date={date}&sort=published_desc\n\
|
59 |
+
news subscription server http://214.10.10.4:3020/news,intent(news_subscription),news_topic,\
|
60 |
+
weather server first request https://geocoding-api.open-meteo.com/v1/search?name={place_name}&count=10&language=en&format=json to get latitude and latitude, then request https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&hourly=temperature_2m&models=gfs_seamless\n\
|
61 |
+
stock server is first to get the stock symbol http://api.serpstack.com/search?access_key={key}&query={name} stock symbol , then request to this server http://api.marketstack.com/v1/eod?access_key={key}&symbols={symbol}&limit=5\n\
|
62 |
+
currency server is https://api.freecurrencyapi.com/v1/latest?apikey={key}&base_currency={currency}¤cies={currency2}\n\
|
63 |
+
http://214.10.10.4:3000/alarm, intent(alarm_query,alarm_set),event_name,descriptor,time,from_time,to_time,\
|
64 |
+
http://214.10.10.4:3001/audiobook,intent(play_audiobook), player_setting,house_place,media_type,descriptor,audiobook_name,author_name,\
|
65 |
+
http://214.10.10.4:3002/calendar,intent(calendar_query,calendar_remove,calendar_set),event_name,descriptor,person,relation,date,time,from_time,to_time,\
|
66 |
+
http://214.10.10.4:3003/cooking,intent(cooking_recipe),food_type,descriptor,\
|
67 |
+
http://214.10.10.4:3004/datetime,intent(datetime_convert,datetime_query),place_name,descriptor,time_zone,time_zone2,date,time,time2,\
|
68 |
+
http://214.10.10.4:3005/email,intent(email_query,email_sendemail),setting,person,to_person,from_person,relation,to_relation,from_relation,email_folder,time,date,email_address,app_name,query,content,personal_info,\
|
69 |
+
http://214.10.10.4:3006/game,intent(play_game),game_name,\
|
70 |
+
http://214.10.10.4:3007/iot,intent(iot_coffee,iot_hue_lightchange,iot_hue_lightdim,iot_hue_lightup,audio_volume_mute,iot_hue_lightoff,audio_volume_up,iot_wemo_off,audio_volume_other,iot_cleaning,iot_wemo_on,audio_volume_down),device_type,house_place,time,color_type,change_to,change_amount,item_name,setting,\
|
71 |
+
http://214.10.10.4:3008/lists,intent(lists_query,lists_remove,lists_createoradd),list_name,item_name,descriptor,time,date,\
|
72 |
+
http://214.10.10.4:3009/music,intent(play_music,music_likeness,playlists_createoradd,music_settings,music_dislikeness,music_query),player_setting,descriptor,artist_name,song_name,playlist_name,music_genre,query,\
|
73 |
+
http://214.10.10.4:3010/phone,intent(phone_text,phone_notification),device_type,event_name,text,\
|
74 |
+
http://214.10.10.4:3011/podcasts,intent(play_podcasts),podcast_name,player_setting,podcast_descriptor,\
|
75 |
+
http://214.10.10.4:3013/radio,intent(play_radio,radio_query),radio_name,app_name,person_name,music_genre,device_type,house_place,player_setting,descriptor,query,time,\
|
76 |
+
http://214.10.10.4:3014/recommendation,intent(recommendation_events,recommendation_movies,recommendation_locations),business_type,food_type,movie_type,movie_name,date,place_name,event_name,descriptor,\
|
77 |
+
http://214.10.10.4:3015/social,intent(social_query,social_post),media_type,person,business_name,content,date,descriptor,\
|
78 |
+
http://214.10.10.4:3017/takeaway,intent(takeaway_query,takeaway_order),food_type,order_type,business_type,business_name,place_name,date,time,descriptor,\
|
79 |
+
http://214.10.10.4:3018/transport,intent(transport_taxi,transport_ticket,transport_query,transport_traffic),transport_agency,transport_type,business_type,business_name,place_name,to_place_name,from_place_name,query,date,time,descriptor,\n\
|
80 |
+
then all the url format should be http://214.10.10.4:3002/calendar?intent=calendar_remove&event_name=meeting",
|
81 |
+
#generate url and query (url, query). query should be json format, like this\"{\\\"intent\\\": \\\"calendar_remove\\\", \\\"iid\\\": \\\"7890\\\", \\\"event_name\\\": \\\"haircut appointment\\\", \\\"date\\\": \\\"2024-11-20\\\"}\". \
|
82 |
+
#",
|
83 |
+
llm_config=llm_config,
|
84 |
+
)
|
85 |
+
#request_assistant = autogen.AssistantAgent(
|
86 |
+
# name="Request",
|
87 |
+
'''
|
88 |
+
system_message="read the params, and send and receive the requests. iid should also be string. choose the url from the servers' url list:\
|
89 |
+
qa server is http://api.serpstack.com/search? access_key = {key}& query = {query}\n\
|
90 |
+
news query server is http://api.mediastack.com/v1/news?access_key={key}&keywords={keyword}&date={date}&sort=published_desc\n\
|
91 |
+
news subscription server http://214.10.10.4:3020/news,intent(news_subscription),iid,news_topic,\
|
92 |
+
weather server first request https://geocoding-api.open-meteo.com/v1/search?name={place_name}&count=10&language=en&format=json to get latitude and latitude, then request https://api.open-meteo.com/v1/forecast?latitude={latitude}&longitude={longitude}&hourly=temperature_2m&models=gfs_seamless\n\
|
93 |
+
stock server is first to get the stock symbol http://api.serpstack.com/search? access_key = {key}& query = {name} stock symbol , then request to this server http://api.marketstack.com/v1/eod? access_key = {key}& symbols = {symbol}&limit=5\n\
|
94 |
+
currency server is https://www.amdoren.com/api/currency.php?api_key={key}&from={currency}&to={currency2}&amount={amount}\n\
|
95 |
+
http://214.10.10.4:3000/alarm, intent(alarm_query,alarm_set),iid,event_name,descriptor,time,from_time,to_time,\
|
96 |
+
http://214.10.10.4:3001/audiobook,intent(play_audiobook), iid,player_setting,house_place,media_type,descriptor,audiobook_name,author_name,\
|
97 |
+
http://214.10.10.4:3002/calendar,intent(calendar_query,calendar_remove,calendar_set),iid,event_name,descriptor,person,relation,date,time,from_time,to_time,\
|
98 |
+
http://214.10.10.4:3003/cooking,intent(cooking_recipe),iid,food_type,descriptor,\
|
99 |
+
http://214.10.10.4:3004/datetime,intent(datetime_convert,datetime_query),iid,place_name,descriptor,time_zone,time_zone2,date,time,time2,\
|
100 |
+
http://214.10.10.4:3005/email,intent(email_query,email_sendemail),iid,setting,person,to_person,from_person,relation,to_relation,from_relation,email_folder,time,date,email_address,app_name,query,content,personal_info,\
|
101 |
+
http://214.10.10.4:3006/game,intent(play_game),iid,game_name,\
|
102 |
+
http://214.10.10.4:3007/iot,intent(iot_coffee,iot_hue_lightchange,iot_hue_lightdim,iot_hue_lightup,audio_volume_mute,iot_hue_lightoff,audio_volume_up,iot_wemo_off,audio_volume_other,iot_cleaning,iot_wemo_on,audio_volume_down),iid,device_type,house_place,time,color_type,change_amount,item_name,setting,\
|
103 |
+
http://214.10.10.4:3008/lists,intent(lists_query,lists_remove,lists_createoradd),iid,list_name,item_name,descriptor,time,date,\
|
104 |
+
http://214.10.10.4:3009/music,intent(play_music,music_likeness,playlists_createoradd,music_settings,music_dislikeness,music_query),iid,player_setting,descriptor,artist_name,song_name,playlist_name,music_genre,query,\
|
105 |
+
http://214.10.10.4:3010/phone,intent(phone_text,phone_notification),iid,device_type,event_name,text,\
|
106 |
+
http://214.10.10.4:3011/podcasts,intent(play_podcasts),iid,podcast_name,player_setting,podcast_descriptor,\
|
107 |
+
http://214.10.10.4:3013/radio,intent(play_radio,radio_query),iid,radio_name,app_name,person_name,music_genre,device_type,house_place,player_setting,descriptor,query,time,\
|
108 |
+
http://214.10.10.4:3014/recommendation,intent(recommendation_events,recommendation_movies,recommendation_locations),iid,business_type,food_type,movie_type,movie_name,date,place_name,event_name,descriptor,\
|
109 |
+
http://214.10.10.4:3015/social,intent(social_query,social_post),iid,media_type,person,business_name,content,date,descriptor,\
|
110 |
+
http://214.10.10.4:3017/takeaway,intent(takeaway_query,takeaway_order),iid,food_type,order_type,business_type,business_name,place_name,date,time,descriptor,\
|
111 |
+
http://214.10.10.4:3018/transport,intent(transport_taxi,transport_ticket,transport_query,transport_traffic),iid,transport_agency,transport_type,business_type,business_name,place_name,to_place_name,from_place_name,query,date,time,descriptor,\n\
|
112 |
+
generate url and query then call the url function. the function params are (url, query). query should be json format, like this\"{\\\"intent\\\": \\\"calendar_remove\\\", \\\"iid\\\": \\\"7890\\\", \\\"event_name\\\": \\\"haircut appointment\\\", \\\"date\\\": \\\"2024-11-20\\\"}\". for url request, use the functions you have been provided with.",
|
113 |
+
'''
|
114 |
+
request_assistant = autogen.AssistantAgent(
|
115 |
+
name="Request",
|
116 |
+
system_message="for url and query params, use the request functions you have been provided with.",
|
117 |
+
llm_config=llm_config,
|
118 |
+
)
|
119 |
+
genresponse_assistant = autogen.AssistantAgent(
|
120 |
+
name="GenResponse",
|
121 |
+
system_message="generate response for the user use server response.",
|
122 |
+
llm_config=llm_config,
|
123 |
+
)
|
124 |
+
pm = autogen.AssistantAgent(
|
125 |
+
name="Product_manager",
|
126 |
+
system_message="you are controlling smart home system, you have intent assistant, time_assistant, location_assistant, url_assistant and request_assistant to complete the user's task. You should first use intent to complete the intent prediction. Then if the result has time or location params, please try to ask time_assistant or location_assistant to solve the time and location. Then you choose the url using url_assistant. At last you should use request_assistant to send and receive request through functions from other servers such as weather server and response to user. You should generates reponse for the user, and tell manager to finalize the task.",
|
127 |
+
llm_config=llm_config,
|
128 |
+
)
|
129 |
+
user_proxy_auto = autogen.UserProxyAgent(
|
130 |
+
name="user_proxy",
|
131 |
+
human_input_mode="NEVER",
|
132 |
+
is_termination_msg=lambda x: x.get("content", "").find("TERMINATE") >= 0,
|
133 |
+
default_auto_reply="if finish task, TERMINATE. If not, solve again.",
|
134 |
+
#is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
|
135 |
+
#code_execution_config={
|
136 |
+
# "last_n_messages": 0,
|
137 |
+
# "work_dir": "tasks",
|
138 |
+
# "use_docker": False,
|
139 |
+
#}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
|
140 |
+
system_message="Also execute the function and report the result.",
|
141 |
+
)
|
142 |
+
# system_message="you are controlling smart home system, you have intent, time_assistant, location_assistant, url_assistant and request_assistant, genresponse_assistant to complete the user's task. You should first use intent to complete the intent prediction. Then if the result has time or location params, please try to ask time_assistant or location_assistant to solve the time and location. Then you choose the url using url_assistant. At last you should use request_assistant to send and receive request through functions from other servers such as weather server and use genresponse assistant to generate response to user according to the server result to finalize the task.",
|
143 |
+
#)
|
144 |
+
|
145 |
+
@user_proxy_auto.register_for_execution()
|
146 |
+
@time_assistant.register_for_llm(description="get today's date.")
|
147 |
+
def get_time():
|
148 |
+
results = date.today()
|
149 |
+
return str(results)
|
150 |
+
|
151 |
+
@user_proxy_auto.register_for_execution()
|
152 |
+
@request_assistant.register_for_llm(description="url request execution.")
|
153 |
+
def url_request(
|
154 |
+
url: Annotated[str, "url to request"],
|
155 |
+
) -> str:
|
156 |
+
response=requests.get(url)
|
157 |
+
print(response.text)
|
158 |
+
result=""
|
159 |
+
if response.status_code >= 400:
|
160 |
+
result="code "+str(response.status_code)+"."
|
161 |
+
else:
|
162 |
+
pass
|
163 |
+
|
164 |
+
return f"server response {response.text}"
|
165 |
+
'''
|
166 |
+
user_proxy = autogen.UserProxyAgent(
|
167 |
+
name="User_Proxy",
|
168 |
+
human_input_mode="ALWAYS", # ask human for input at each step
|
169 |
+
is_termination_msg=lambda x: x.get("content", "") and x.get("content", "").rstrip().endswith("TERMINATE"),
|
170 |
+
code_execution_config={
|
171 |
+
"last_n_messages": 1,
|
172 |
+
"work_dir": "tasks",
|
173 |
+
"use_docker": False,
|
174 |
+
}, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.
|
175 |
+
)
|
176 |
+
'''
|
177 |
+
def read_data(file_path):
|
178 |
+
queries=[]
|
179 |
+
with open(file_path, newline='') as csvfile:
|
180 |
+
spamreader = csv.reader(csvfile, delimiter=',', quotechar='"')
|
181 |
+
count=0
|
182 |
+
for row in spamreader:
|
183 |
+
if count==0:
|
184 |
+
count+=1
|
185 |
+
continue
|
186 |
+
#print(row[2])
|
187 |
+
#print(len(row))
|
188 |
+
iid=row[0]
|
189 |
+
query={"iid":iid,"query":row[1]}#,"slots":slot,"domain":row[4]}
|
190 |
+
queries.append(query)
|
191 |
+
return queries
|
192 |
+
if __name__=="__main__":
|
193 |
+
tasks=read_data("~/data/test.csv")
|
194 |
+
#print(tasks)
|
195 |
+
index = int(sys.argv[1])
|
196 |
+
tasks=tasks[index:index+1]
|
197 |
+
|
198 |
+
#termination = TextMentionTermination("exit") # Type 'exit' to end the conversation.
|
199 |
+
#team = RoundRobinGroupChat([web_surfer, assistant, user_proxy], termination_condition=termination)
|
200 |
+
#await Console(team.run_stream(task="Find information about AutoGen and write a short summary."))
|
201 |
+
groupchat = autogen.GroupChat(agents=[user_proxy_auto,pm, intent_assistant, time_assistant, location_assistant, url_assistant, request_assistant], messages=[], max_round=12)
|
202 |
+
manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=llm_config,system_message="Group Chat Manager. You should finalize the task after product manager generate response to user.")
|
203 |
+
for task in tasks:
|
204 |
+
test_task = json.dumps(task)
|
205 |
+
#Console(manager.run_stream(task="Find information about AutoGen and write a short summary."))
|
206 |
+
user_proxy_auto.initiate_chat(
|
207 |
+
manager, message=test_task
|
208 |
+
)
|