liqiang888 commited on
Commit
941886f
·
verified ·
1 Parent(s): 949547b

Delete data_analysis/compute_answer.py

Browse files
Files changed (1) hide show
  1. data_analysis/compute_answer.py +0 -120
data_analysis/compute_answer.py DELETED
@@ -1,120 +0,0 @@
1
- import json
2
-
3
- from tqdm import tqdm
4
- import os
5
- from openai import OpenAI
6
-
7
- client = OpenAI(api_key="")
8
-
9
- samples = []
10
- with open("./data.json", "r") as f:
11
- for line in f:
12
- samples.append(eval(line.strip()))
13
-
14
-
15
- def evaluate_prediction(client, question, answer, prediction):
16
- prompt = (f"Please judge whether the generated answer is right or wrong. We require that the correct answer "
17
- f"to the prediction gives a clear answer, not just a calculation process or a disassembly of ideas. "
18
- f"The question is {question}. The true answer is \n {answer}. \n The predicted answer is \n {prediction}.\n "
19
- f"If the predicted answer is right, please output True. Otherwise output Flase. "
20
- f"Don't output any other text content. You only can output True or False.")
21
- response = client.chat.completions.create(
22
- model="gpt-4o-2024-05-13",
23
- messages=[
24
- {
25
- "role": "user",
26
- "content": [
27
- {
28
- "type": "text",
29
- "text": prompt
30
- }
31
- ]
32
- }
33
- ],
34
- temperature=0,
35
- max_tokens=256,
36
- top_p=1,
37
- frequency_penalty=0,
38
- presence_penalty=0
39
- )
40
- # print(prompt)
41
- # print(response.choices[0].message.content)
42
- # exit()
43
- return response.choices[0].message.content
44
-
45
- def read_txt(path):
46
- with open(path, "r") as f:
47
- return f.read()
48
-
49
- save_path = "./save_process"
50
- model = "gpt-3.5-turbo-0125"
51
- # model = 'gpt-4o-2024-05-13'
52
- # model = 'llama-3-8b-instruct'
53
- # model = 'gpt-3.5-turbo-0125-autoagent'
54
- # model = 'gpt-4o-2024-05-13-autoagent'
55
- # model = 'llava-v1.5-13b'
56
- # model = 'llama3-autoagent'
57
-
58
- results = []
59
- save_f = open(os.path.join(save_path, model, "results.json"), "w")
60
- save_process = open(os.path.join(save_path, model, "results_process.json"), "w")
61
-
62
- for sample in tqdm(samples):
63
- result = []
64
- if len(sample["questions"]) > 0:
65
- # print(sample['id'])
66
- predicts = []
67
- with open(os.path.join(save_path, model, sample['id']+".json"), "r") as f:
68
- for line in f:
69
- predicts.append(eval(line.strip()))
70
-
71
- questions = []
72
- for id, question_name in enumerate(tqdm(sample["questions"])):
73
- question = read_txt(os.path.join("./data", sample["id"], question_name + ".txt"))
74
- pre = predicts[id]
75
- try:
76
- if not model.endswith('autoagent'):
77
- ans = evaluate_prediction(client, question, str(sample["answers"][id]), pre['response'])
78
- else:
79
- ans = evaluate_prediction(client, question, str(sample["answers"][id]), pre['summary'])
80
- except Exception as e:
81
- print(e)
82
- ans = "False"
83
- # print(result)
84
- if not model.endswith('autoagent'):
85
- process = [sample["id"], ans, str(sample["answers"][id]), pre['response'][:]]
86
- else:
87
- process = [sample["id"], ans, str(sample["answers"][id]), pre['summary'][:]]
88
-
89
- result.append(ans)
90
- json.dump(process, save_process)
91
- save_process.write("\n")
92
- save_process.flush()
93
- json.dump(result, save_f)
94
- save_f.write("\n")
95
- save_f.flush()
96
- results += result
97
-
98
- save_f.close()
99
- save_process.close()
100
-
101
- results_c = []
102
- for i, result in enumerate(results):
103
- if "true" in result.lower():
104
- results_c.append(True)
105
- else:
106
- results_c.append(False)
107
-
108
- idx = 0
109
- score4cha = []
110
- for sample in tqdm(samples):
111
- if len(sample["questions"]) > 0:
112
- score_ = sum(results_c[idx:idx+len(sample["questions"])]) / len(sample["questions"])
113
- idx += len(sample["questions"])
114
- score4cha.append(score_)
115
- print(f"Accuracy for each challenge is {score4cha}")
116
-
117
-
118
- acc = sum(results_c) / len(results_c)
119
- print(f"Accuracy for all the {len(results_c)} questions is {acc}")
120
-