liqiang888 commited on
Commit
9fd03fb
·
verified ·
1 Parent(s): 941886f

Delete data_analysis/show_result.py

Browse files
Files changed (1) hide show
  1. data_analysis/show_result.py +0 -73
data_analysis/show_result.py DELETED
@@ -1,73 +0,0 @@
1
-
2
- from tqdm import tqdm
3
- import os
4
-
5
-
6
- samples = []
7
- with open("./data.json", "r") as f:
8
- for line in f:
9
- samples.append(eval(line.strip()))
10
-
11
- def read_txt(path):
12
- with open(path, "r") as f:
13
- return f.read()
14
-
15
- save_path = "./save_process"
16
- # model = 'llava-v1.5-13b'
17
- # model = 'llama-3-8b-instruct'
18
- model = "gpt-3.5-turbo-0125"
19
- # model = 'gpt-4o-2024-05-13'
20
-
21
-
22
-
23
- results = []
24
- with open(os.path.join(save_path, model, "results.json"), "r") as f:
25
- for line in f:
26
- results += eval(line.strip())
27
-
28
- costs = []
29
- time_cost = []
30
-
31
- id = 0
32
- for sample in tqdm(samples):
33
- result = []
34
- if len(sample["questions"]) > 0:
35
- predicts = []
36
- with open(os.path.join(save_path, model, sample['id']+".json"), "r") as f:
37
- for line in f:
38
- pre = eval(line.strip())
39
- predicts.append(pre)
40
- costs.append(pre['cost'])
41
- time_cost.append(pre['time'])
42
- id += 1
43
-
44
-
45
-
46
-
47
- results_c = []
48
- for i, result in enumerate(results):
49
- if "true" in result.lower():
50
- results_c.append(True)
51
- else:
52
- results_c.append(False)
53
- # if i>=11:
54
- # break
55
-
56
- idx = 0
57
- score4cha = []
58
-
59
- for i, sample in enumerate(samples):
60
- if len(sample["questions"]) > 0:
61
- score_ = sum(results_c[idx:idx+len(sample["questions"])]) / len(sample["questions"])
62
- idx += len(sample["questions"])
63
- score4cha.append(score_)
64
-
65
- acc = sum(results_c) / len(results_c)
66
- print(f"Accuracy for all the {len(results_c)} questions is {acc}")
67
- print(f"Cost for all the {len(results_c)} questions is {sum(costs)}")
68
- print(f"Consume time for all the {len(results_c)} questions is {sum(time_cost)}")
69
- print()
70
-
71
-
72
- print(f"Accuracy for each challenge is {score4cha}")
73
- print(f"Average accuracy for {len(score4cha)} challenge is {sum(score4cha)/len(score4cha)}")