Repo init
Browse files- README.md +17 -25
- data/2024-1/assessment_2024-1.csv +3 -0
- data/2024-1/execution_2024-1.csv +3 -0
- data/2024-1/grade_2024-1.csv +3 -0
- src/__pycache__/assessment.cpython-312.pyc +0 -0
- src/__pycache__/execution.cpython-312.pyc +0 -0
- src/__pycache__/grade.cpython-312.pyc +0 -0
- src/__pycache__/utils.cpython-312.pyc +0 -0
- src/assessment.py +69 -0
- src/execution.py +227 -0
- src/grade.py +107 -0
- src/process.py +69 -0
- src/utils.py +4 -0
README.md
CHANGED
@@ -1,32 +1,24 @@
|
|
1 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2 |
configs:
|
3 |
-
- config_name: 2024
|
4 |
data_files:
|
5 |
-
- split:
|
6 |
-
path:
|
7 |
-
- split:
|
8 |
-
path:
|
9 |
-
- split:
|
10 |
-
path:
|
11 |
-
|
12 |
-
data_files:
|
13 |
-
- split: train
|
14 |
-
path: "data/v2_sigcse/train-*.csv"
|
15 |
-
- split: evaluation
|
16 |
-
path: "data/v2_sigcse/evaluation_dataset-*.csv"
|
17 |
-
- split: final_dataset
|
18 |
-
path: "data/v2_sigcse/final_dataset-*.csv"
|
19 |
-
- split: manual_evaluation
|
20 |
-
path: "data/v2_sigcse/manual_evaluation_dialogues-*.csv"
|
21 |
-
- split: testset
|
22 |
-
path: "data/v2_sigcse/testset-*.csv"
|
23 |
-
- config_name: v3
|
24 |
-
data_files:
|
25 |
-
- split: final_dataset
|
26 |
-
path: "data/v3/final_dataset-*.csv"
|
27 |
-
- split: testset
|
28 |
-
path: "data/v3/testset-*.csv"
|
29 |
---
|
30 |
|
|
|
31 |
# CodeBench dataset
|
32 |
|
|
|
1 |
---
|
2 |
+
task_categories:
|
3 |
+
- text-generation
|
4 |
+
language:
|
5 |
+
- en
|
6 |
+
tags:
|
7 |
+
- code
|
8 |
+
size_categories:
|
9 |
+
- 100K<n<1M
|
10 |
configs:
|
11 |
+
- config_name: 2024-1
|
12 |
data_files:
|
13 |
+
- split: assessment
|
14 |
+
path: data/2024-1/assessment*.csv
|
15 |
+
- split: execution
|
16 |
+
path: data/2024-1/execution*.csv
|
17 |
+
- split: grade
|
18 |
+
path: data/2024-1/grade*.csv
|
19 |
+
default: true
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
20 |
---
|
21 |
|
22 |
+
|
23 |
# CodeBench dataset
|
24 |
|
data/2024-1/assessment_2024-1.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:533cb2f8227895beeb0313cea518db9d5c362e0986c73b78dc35d46b237303e1
|
3 |
+
size 58424
|
data/2024-1/execution_2024-1.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd97ab5c1455baf0b77e2c800522481a63b9c0e464095f45586d4068bdeb159b
|
3 |
+
size 1974098
|
data/2024-1/grade_2024-1.csv
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66b8b3486c0f8b9429239d73146c1f0b1b43c15594223604934baad58148e3b6
|
3 |
+
size 180450
|
src/__pycache__/assessment.cpython-312.pyc
ADDED
Binary file (3.26 kB). View file
|
|
src/__pycache__/execution.cpython-312.pyc
ADDED
Binary file (8.74 kB). View file
|
|
src/__pycache__/grade.cpython-312.pyc
ADDED
Binary file (4.37 kB). View file
|
|
src/__pycache__/utils.cpython-312.pyc
ADDED
Binary file (455 Bytes). View file
|
|
src/assessment.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from src.utils import read_data_file
|
3 |
+
|
4 |
+
def parse_assessment_simple(full_path: str) -> pd.DataFrame:
|
5 |
+
"""
|
6 |
+
Minimal parser:
|
7 |
+
- Split per line
|
8 |
+
- For lines starting with '----', strip it, then split on the first ':'
|
9 |
+
- Build a flat dict, then move keys like 'exercise 01' into an 'exercises' dict
|
10 |
+
- Return a one-row DataFrame
|
11 |
+
"""
|
12 |
+
|
13 |
+
print(f"Parsing assessment data from {full_path}...")
|
14 |
+
|
15 |
+
assessment_number = int(full_path.split("/")[-1].replace(".data", ""))
|
16 |
+
text = read_data_file(full_path)
|
17 |
+
|
18 |
+
lines = [ln.strip() for ln in text.strip().splitlines()]
|
19 |
+
flat = {}
|
20 |
+
|
21 |
+
for ln in lines:
|
22 |
+
if not ln or ln.startswith("-- ASSESSMENT DATA") or ln.startswith("-- EXERCISES"):
|
23 |
+
continue
|
24 |
+
if ln.startswith("----"):
|
25 |
+
# drop leading dashes and spaces
|
26 |
+
core = ln.lstrip("-").strip()
|
27 |
+
if ":" not in core:
|
28 |
+
continue
|
29 |
+
k, v = core.split(":", 1)
|
30 |
+
k = k.strip().lower() # normalize a bit
|
31 |
+
v = v.strip()
|
32 |
+
flat[k] = v
|
33 |
+
|
34 |
+
# Extract exercises into a nested dict
|
35 |
+
exercises = {}
|
36 |
+
for k in list(flat.keys()):
|
37 |
+
if k.startswith("exercise "):
|
38 |
+
# 'exercise 01' -> 1
|
39 |
+
try:
|
40 |
+
idx = int(k.split()[1])
|
41 |
+
except Exception:
|
42 |
+
continue
|
43 |
+
try:
|
44 |
+
exercises[idx] = int(flat[k])
|
45 |
+
except Exception:
|
46 |
+
# keep as string if not an int
|
47 |
+
exercises[idx] = flat[k]
|
48 |
+
del flat[k]
|
49 |
+
|
50 |
+
# Optional light cleanup: turn common numerics into numbers
|
51 |
+
def to_int(x):
|
52 |
+
try: return int(x)
|
53 |
+
except: return x
|
54 |
+
def to_float(x):
|
55 |
+
try: return float(x)
|
56 |
+
except: return x
|
57 |
+
|
58 |
+
for key in ("class number", "total_exercises"):
|
59 |
+
if key in flat:
|
60 |
+
flat[key] = to_int(flat[key])
|
61 |
+
if "weight" in flat:
|
62 |
+
flat["weight"] = to_float(flat["weight"])
|
63 |
+
|
64 |
+
# Rename keys to snake_case (purely cosmetic)
|
65 |
+
snake = {k.replace(" ", "_"): v for k, v in flat.items()}
|
66 |
+
snake["exercises"] = exercises or None
|
67 |
+
snake["assessment_id"] = assessment_number
|
68 |
+
|
69 |
+
return pd.DataFrame([snake])
|
src/execution.py
ADDED
@@ -0,0 +1,227 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from src.utils import read_data_file
|
3 |
+
|
4 |
+
SEP = "*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*"
|
5 |
+
|
6 |
+
def split_execution_traces(log_text: str):
|
7 |
+
parts = log_text.strip().split(SEP)
|
8 |
+
return [p.strip() for p in parts if p.strip()]
|
9 |
+
|
10 |
+
def _extract_code(block: str):
|
11 |
+
lines = block.splitlines()
|
12 |
+
start_idx = None
|
13 |
+
for i, ln in enumerate(lines):
|
14 |
+
if ln.strip().lower().startswith("-- code:"):
|
15 |
+
start_idx = i + 1
|
16 |
+
break
|
17 |
+
if start_idx is None:
|
18 |
+
return None, None # (code_text, code_end_idx)
|
19 |
+
|
20 |
+
end_idx = len(lines)
|
21 |
+
for j in range(start_idx, len(lines)):
|
22 |
+
s = lines[j].strip()
|
23 |
+
if s.startswith("-- ") and s.endswith(":"):
|
24 |
+
end_idx = j
|
25 |
+
break
|
26 |
+
code_block = "\n".join(lines[start_idx:end_idx]).strip("\n")
|
27 |
+
return (code_block if code_block.strip() else None), end_idx
|
28 |
+
|
29 |
+
def _extract_test_output(block: str):
|
30 |
+
lines = block.splitlines()
|
31 |
+
_, after_code_idx = _extract_code(block)
|
32 |
+
if after_code_idx is None:
|
33 |
+
return None, None
|
34 |
+
|
35 |
+
section_idx = None
|
36 |
+
section_tag = None
|
37 |
+
for k in range(after_code_idx, len(lines)):
|
38 |
+
tag = lines[k].strip().lower()
|
39 |
+
if tag.startswith("-- output:") or tag.startswith("-- error:"):
|
40 |
+
section_idx = k
|
41 |
+
section_tag = tag
|
42 |
+
break
|
43 |
+
if section_idx is None:
|
44 |
+
return None, None
|
45 |
+
|
46 |
+
end_idx = len(lines)
|
47 |
+
for t in range(section_idx + 1, len(lines)):
|
48 |
+
s = lines[t].strip()
|
49 |
+
if s.startswith("-- ") and s.endswith(":"):
|
50 |
+
end_idx = t
|
51 |
+
break
|
52 |
+
|
53 |
+
content = "\n".join(lines[section_idx + 1 : end_idx]).strip("\n")
|
54 |
+
if not content.strip():
|
55 |
+
content = None
|
56 |
+
|
57 |
+
out_type = "stdout" if section_tag.startswith("-- output:") else "stderr"
|
58 |
+
return out_type, content
|
59 |
+
|
60 |
+
def _read_scalar_after_section(lines, idx):
|
61 |
+
"""Return the first non-empty line after lines[idx], or None."""
|
62 |
+
for j in range(idx + 1, len(lines)):
|
63 |
+
txt = lines[j].strip()
|
64 |
+
if not txt:
|
65 |
+
# allow empty line(s) before the scalar
|
66 |
+
continue
|
67 |
+
return txt
|
68 |
+
return None
|
69 |
+
|
70 |
+
def _extract_submission(block: str):
|
71 |
+
"""
|
72 |
+
Extract from a SUBMISSION/SUBMITION block:
|
73 |
+
- execution_time (float, seconds)
|
74 |
+
- grade (float, percent 0..100)
|
75 |
+
- testcases (list of dicts with index, input, correct_output, user_output)
|
76 |
+
"""
|
77 |
+
lines = block.splitlines()
|
78 |
+
# 1) Find where code ends to start scanning the sections after it
|
79 |
+
_, after_code_idx = _extract_code(block)
|
80 |
+
scan_start = after_code_idx if after_code_idx is not None else 0
|
81 |
+
|
82 |
+
execution_time = None
|
83 |
+
grade = None
|
84 |
+
testcases = []
|
85 |
+
|
86 |
+
i = scan_start
|
87 |
+
while i < len(lines):
|
88 |
+
raw = lines[i].strip()
|
89 |
+
low = raw.lower()
|
90 |
+
|
91 |
+
# -- EXECUTION TIME:
|
92 |
+
if low.startswith("-- execution time:"):
|
93 |
+
val = _read_scalar_after_section(lines, i)
|
94 |
+
if val is not None:
|
95 |
+
# parse float; ignore parse errors silently
|
96 |
+
try:
|
97 |
+
execution_time = float(val)
|
98 |
+
except:
|
99 |
+
pass
|
100 |
+
i += 1
|
101 |
+
continue
|
102 |
+
|
103 |
+
# -- TEST CASE N:
|
104 |
+
if low.startswith("-- test case"):
|
105 |
+
# parse index between "test case " and ":"
|
106 |
+
idx = None
|
107 |
+
try:
|
108 |
+
# e.g., "-- TEST CASE 3:"
|
109 |
+
head = raw.split(":", 1)[0]
|
110 |
+
idx = int(head.lower().split("test case", 1)[1].strip())
|
111 |
+
except:
|
112 |
+
pass
|
113 |
+
|
114 |
+
# collect subfields until next '-- TEST CASE' or '-- GRADE:' or end
|
115 |
+
j = i + 1
|
116 |
+
current = {"index": idx, "input": "", "correct_output": "", "user_output": ""}
|
117 |
+
active = None # "input" | "correct_output" | "user_output"
|
118 |
+
|
119 |
+
while j < len(lines):
|
120 |
+
sraw = lines[j].rstrip("\n")
|
121 |
+
slow = sraw.strip().lower()
|
122 |
+
|
123 |
+
if slow.startswith("-- test case") or slow.startswith("-- grade:"):
|
124 |
+
break # end of this test case section
|
125 |
+
|
126 |
+
if slow.startswith("---- input:"):
|
127 |
+
active = "input"
|
128 |
+
# reset to collect fresh content for this field
|
129 |
+
current["input"] = ""
|
130 |
+
elif slow.startswith("---- correct output:"):
|
131 |
+
active = "correct_output"
|
132 |
+
current["correct_output"] = ""
|
133 |
+
elif slow.startswith("---- user output:"):
|
134 |
+
active = "user_output"
|
135 |
+
current["user_output"] = ""
|
136 |
+
else:
|
137 |
+
if active:
|
138 |
+
# preserve newlines as in the log
|
139 |
+
current[active] += (sraw + "\n")
|
140 |
+
j += 1
|
141 |
+
|
142 |
+
# strip trailing newlines
|
143 |
+
for k in ("input", "correct_output", "user_output"):
|
144 |
+
current[k] = current[k].rstrip("\n")
|
145 |
+
|
146 |
+
testcases.append(current)
|
147 |
+
i = j
|
148 |
+
continue
|
149 |
+
|
150 |
+
# -- GRADE:
|
151 |
+
if low.startswith("-- grade:"):
|
152 |
+
val = _read_scalar_after_section(lines, i)
|
153 |
+
if val is not None:
|
154 |
+
v = val.strip().rstrip("%").strip()
|
155 |
+
try:
|
156 |
+
grade = float(v)
|
157 |
+
except:
|
158 |
+
pass
|
159 |
+
i += 1
|
160 |
+
continue
|
161 |
+
|
162 |
+
i += 1
|
163 |
+
|
164 |
+
return execution_time, grade, testcases
|
165 |
+
|
166 |
+
def parse_execution_data(full_path):
|
167 |
+
"""
|
168 |
+
Returns one row per trace with:
|
169 |
+
user, class_number, type, timestamp, code,
|
170 |
+
output_type, output_content (for TEST),
|
171 |
+
execution_time, grade, testcases (for SUBMISSION/SUBMITION),
|
172 |
+
trace
|
173 |
+
"""
|
174 |
+
|
175 |
+
print(f"Parsing execution data from {full_path}...")
|
176 |
+
|
177 |
+
user = full_path.split("/")[-3]
|
178 |
+
class_number = full_path.split("/")[-5]
|
179 |
+
assessment_id, exercise_id = full_path.split("/")[-1].replace(".log", "").split("_")
|
180 |
+
assessment_id = int(assessment_id)
|
181 |
+
exercise_id = int(exercise_id)
|
182 |
+
|
183 |
+
log_text = read_data_file(full_path) # you provide this
|
184 |
+
|
185 |
+
rows = []
|
186 |
+
for block in split_execution_traces(log_text):
|
187 |
+
# header '== TYPE (timestamp)'
|
188 |
+
first = next((ln.strip() for ln in block.splitlines() if ln.strip()), "")
|
189 |
+
header = first.lstrip("=").strip()
|
190 |
+
|
191 |
+
typ, ts = None, None
|
192 |
+
if "(" in header and ")" in header:
|
193 |
+
left, rest = header.split("(", 1)
|
194 |
+
typ = left.strip()
|
195 |
+
ts = rest.split(")", 1)[0].strip()
|
196 |
+
else:
|
197 |
+
typ = header
|
198 |
+
|
199 |
+
code, _ = _extract_code(block)
|
200 |
+
|
201 |
+
# Defaults
|
202 |
+
output_type, output_content = None, None
|
203 |
+
execution_time, grade, testcases = None, None, None
|
204 |
+
|
205 |
+
tnorm = (typ or "").strip().lower()
|
206 |
+
if tnorm == "test":
|
207 |
+
output_type, output_content = _extract_test_output(block)
|
208 |
+
elif tnorm in ("submission", "submition", "submit"):
|
209 |
+
execution_time, grade, testcases = _extract_submission(block)
|
210 |
+
|
211 |
+
rows.append({
|
212 |
+
"user": user,
|
213 |
+
"class_number": class_number,
|
214 |
+
"assessment_id": assessment_id,
|
215 |
+
"exercise_id": exercise_id,
|
216 |
+
"type": typ,
|
217 |
+
"timestamp": ts,
|
218 |
+
"code": code,
|
219 |
+
"output_type": output_type,
|
220 |
+
"output_content": output_content,
|
221 |
+
"execution_time": execution_time,
|
222 |
+
"grade": grade, # percent 0..100
|
223 |
+
"testcases": testcases, # list[dict] or None
|
224 |
+
"trace": block
|
225 |
+
})
|
226 |
+
|
227 |
+
return pd.DataFrame(rows)
|
src/grade.py
ADDED
@@ -0,0 +1,107 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pandas as pd
|
2 |
+
from src.utils import read_data_file
|
3 |
+
|
4 |
+
def parse_grade_data(full_path: str) -> pd.DataFrame:
|
5 |
+
"""
|
6 |
+
Parse a simple grade block like:
|
7 |
+
---- grade (0-10): 0
|
8 |
+
---- number of exercises: 4
|
9 |
+
---- correct: 0
|
10 |
+
---- incorrect: 1
|
11 |
+
---- blank: 3
|
12 |
+
Returns a one-row DataFrame with:
|
13 |
+
grade, grade_min, grade_max, number_of_exercises, correct, incorrect, blank
|
14 |
+
"""
|
15 |
+
|
16 |
+
assessment_number = int(full_path.split("/")[-1].replace(".log", ""))
|
17 |
+
user = full_path.split("/")[-3]
|
18 |
+
class_number = full_path.split("/")[-5]
|
19 |
+
text = read_data_file(full_path)
|
20 |
+
|
21 |
+
lines = [ln.strip() for ln in text.strip().splitlines() if ln.strip()]
|
22 |
+
flat = {}
|
23 |
+
|
24 |
+
for ln in lines:
|
25 |
+
if not ln.startswith("----"):
|
26 |
+
continue
|
27 |
+
core = ln.lstrip("-").strip()
|
28 |
+
if ":" not in core:
|
29 |
+
continue
|
30 |
+
k, v = core.split(":", 1)
|
31 |
+
k = k.strip().lower()
|
32 |
+
v = v.strip()
|
33 |
+
flat[k] = v
|
34 |
+
|
35 |
+
# Extract grade and grade range if present in key "grade (x-y)"
|
36 |
+
grade_min, grade_max = None, None
|
37 |
+
grade_val = None
|
38 |
+
|
39 |
+
# Find the grade key (it may look like 'grade (0-10)' or 'grade (0 - 10)')
|
40 |
+
grade_key = next((k for k in flat.keys() if k.startswith("grade")), None)
|
41 |
+
if grade_key is not None:
|
42 |
+
# value is after ':', convert to int/float if possible
|
43 |
+
try:
|
44 |
+
grade_val = float(flat[grade_key])
|
45 |
+
if grade_val.is_integer():
|
46 |
+
grade_val = int(grade_val)
|
47 |
+
except:
|
48 |
+
grade_val = flat[grade_key]
|
49 |
+
|
50 |
+
# parse range in parentheses in the key
|
51 |
+
if "(" in grade_key and ")" in grade_key:
|
52 |
+
inside = grade_key.split("(", 1)[1].split(")", 1)[0] # e.g., "0-10" or "0 - 10"
|
53 |
+
# normalize "0 - 10" -> "0-10"
|
54 |
+
inside = inside.replace(" ", "")
|
55 |
+
if "-" in inside:
|
56 |
+
lo, hi = inside.split("-", 1)
|
57 |
+
try:
|
58 |
+
grade_min = float(lo)
|
59 |
+
grade_max = float(hi)
|
60 |
+
if grade_min.is_integer(): grade_min = int(grade_min)
|
61 |
+
if grade_max.is_integer(): grade_max = int(grade_max)
|
62 |
+
except:
|
63 |
+
pass
|
64 |
+
|
65 |
+
# remove original grade key from flat
|
66 |
+
del flat[grade_key]
|
67 |
+
|
68 |
+
# Coerce remaining known numeric fields
|
69 |
+
def to_num(x):
|
70 |
+
try:
|
71 |
+
y = float(x)
|
72 |
+
return int(y) if y.is_integer() else y
|
73 |
+
except:
|
74 |
+
return x
|
75 |
+
|
76 |
+
rename_map = {
|
77 |
+
"number of exercises": "number_of_exercises",
|
78 |
+
"correct": "correct",
|
79 |
+
"incorrect": "incorrect",
|
80 |
+
"blank": "blank",
|
81 |
+
}
|
82 |
+
out = {}
|
83 |
+
for src, dst in rename_map.items():
|
84 |
+
if src in flat:
|
85 |
+
out[dst] = to_num(flat[src])
|
86 |
+
|
87 |
+
out["grade"] = grade_val
|
88 |
+
out["grade_min"] = grade_min
|
89 |
+
out["grade_max"] = grade_max
|
90 |
+
out["class_number"] = class_number
|
91 |
+
out["assessment_number"] = assessment_number
|
92 |
+
out["user"] = user
|
93 |
+
|
94 |
+
return pd.DataFrame([out])
|
95 |
+
|
96 |
+
def parse_final_grade_data(full_path):
|
97 |
+
user = full_path.split("/")[-3]
|
98 |
+
class_number = full_path.split("/")[-5]
|
99 |
+
text = read_data_file(full_path)
|
100 |
+
|
101 |
+
out = {}
|
102 |
+
out["class_number"] = class_number
|
103 |
+
out["assessment_number"] = -1
|
104 |
+
out["user"] = user
|
105 |
+
out["grade"] = text
|
106 |
+
return pd.DataFrame([out])
|
107 |
+
|
src/process.py
ADDED
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import os
|
3 |
+
import pandas as pd
|
4 |
+
from argparse import ArgumentParser
|
5 |
+
|
6 |
+
from src.grade import parse_grade_data, parse_final_grade_data
|
7 |
+
from src.execution import parse_execution_data
|
8 |
+
from src.assessment import parse_assessment_simple
|
9 |
+
|
10 |
+
def parse_args():
|
11 |
+
"""
|
12 |
+
Parse command line arguments.
|
13 |
+
"""
|
14 |
+
parser = ArgumentParser(description="Process CodeBench data.")
|
15 |
+
parser.add_argument("--input", type=str, required=True, help="Input directory containing downloaded tar.")
|
16 |
+
return parser.parse_args()
|
17 |
+
|
18 |
+
|
19 |
+
def process_data(BASE_PATH):
|
20 |
+
|
21 |
+
assessment_data = []
|
22 |
+
grade_data = []
|
23 |
+
execution_data = []
|
24 |
+
for root, dirs, files in os.walk(BASE_PATH):
|
25 |
+
for d in dirs:
|
26 |
+
full_path = os.path.join(root, d)
|
27 |
+
if not len(dirs):
|
28 |
+
# We are within a last level directory
|
29 |
+
if root.split("/")[-1] == "assessments":
|
30 |
+
for f in files:
|
31 |
+
full_path = os.path.join(root, f)
|
32 |
+
assessment_data.append(parse_assessment_simple(full_path))
|
33 |
+
elif root.split("/")[-1] == "grades":
|
34 |
+
for f in files:
|
35 |
+
if f.endswith(".data"): # Final grade
|
36 |
+
full_path = os.path.join(root, f)
|
37 |
+
grade_data.append(parse_final_grade_data(full_path))
|
38 |
+
else:
|
39 |
+
full_path = os.path.join(root, f)
|
40 |
+
grade_data.append(parse_grade_data(full_path))
|
41 |
+
elif root.split("/")[-1] == "executions":
|
42 |
+
for f in files:
|
43 |
+
full_path = os.path.join(root, f)
|
44 |
+
execution_data.append(parse_execution_data(full_path))
|
45 |
+
break
|
46 |
+
|
47 |
+
assessment_df = pd.concat(assessment_data)
|
48 |
+
grade_df = pd.concat(grade_data)
|
49 |
+
execution_df = pd.concat(execution_data)
|
50 |
+
|
51 |
+
return assessment_df, grade_df, execution_df
|
52 |
+
|
53 |
+
def main():
|
54 |
+
args = parse_args()
|
55 |
+
# Load all provided dataframes
|
56 |
+
subset = args.input.split("/")[-1]
|
57 |
+
print(f"Processing data from {subset}...")
|
58 |
+
assessment_df, grade_df, execution_df = process_data(args.input)
|
59 |
+
|
60 |
+
# Save the dataframes to CSV files
|
61 |
+
base_save_path = f"./data/{subset}/"
|
62 |
+
os.makedirs(base_save_path, exist_ok=True)
|
63 |
+
assessment_df.to_csv(os.path.join(base_save_path, f"assessment_{subset}.csv"), index=False)
|
64 |
+
grade_df.to_csv(os.path.join(base_save_path, f"grade_{subset}.csv"), index=False)
|
65 |
+
execution_df.to_csv(os.path.join(base_save_path, f"execution_{subset}.csv"), index=False)
|
66 |
+
|
67 |
+
|
68 |
+
if __name__ == "__main__":
|
69 |
+
main()
|
src/utils.py
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
def read_data_file(path):
|
2 |
+
with open(path, "r") as fp:
|
3 |
+
content = fp.read()
|
4 |
+
return content
|