|
import json |
|
import re |
|
|
|
|
|
|
|
def count_tokens(code): |
|
|
|
|
|
return len(code) |
|
|
|
def split_code(code, max_tokens=2048): |
|
|
|
class_pattern = re.compile(r'(?:class|struct|interface)\s+\w+\s*[\(\{]') |
|
function_pattern = re.compile(r'(?:def|function|func|fn)\s+\w+\s*\([^)]*\)\s*\{') |
|
import_pattern = re.compile(r'(?:import|include|require)\s+') |
|
comment_pattern = re.compile(r'(?://|#).*') |
|
|
|
|
|
code = comment_pattern.sub('', code) |
|
|
|
|
|
class_starts = [match.start() for match in class_pattern.finditer(code)] |
|
function_starts = [match.start() for match in function_pattern.finditer(code)] |
|
import_starts = [match.start() for match in import_pattern.finditer(code)] |
|
logical_units = sorted(class_starts + function_starts + import_starts + [len(code)]) |
|
|
|
chunks = [] |
|
start_index = 0 |
|
for end_index in logical_units: |
|
chunk = code[start_index:end_index].strip() |
|
if chunk: |
|
token_count = count_tokens(chunk) |
|
if token_count <= max_tokens: |
|
chunks.append(chunk) |
|
start_index = end_index |
|
else: |
|
|
|
sub_chunks = split_chunk(chunk, max_tokens) |
|
chunks.extend(sub_chunks) |
|
start_index = end_index |
|
|
|
return chunks |
|
|
|
def split_chunk(chunk, max_tokens): |
|
|
|
sub_chunks = [] |
|
start_index = 0 |
|
while start_index < len(chunk): |
|
end_index = start_index + max_tokens |
|
sub_chunk = chunk[start_index:end_index].strip() |
|
sub_chunks.append(sub_chunk) |
|
start_index = end_index |
|
return sub_chunks |
|
|
|
def find_split_index(code, max_tokens): |
|
|
|
token_count = 0 |
|
for i, char in enumerate(code): |
|
if char.isspace(): |
|
token_count += 1 |
|
if token_count > max_tokens: |
|
return i |
|
return len(code) |
|
|
|
def process_json_file(input_file, output_file): |
|
|
|
with open(input_file, 'r') as file: |
|
data = json.load(file) |
|
|
|
|
|
output_data = [] |
|
for entry in data: |
|
code = entry['content'] |
|
token_count = count_tokens(code) |
|
if code != "": |
|
if token_count > 2048: |
|
|
|
chunks = split_code(code) |
|
|
|
|
|
for chunk in chunks: |
|
output_data.append(json.dumps({"text": chunk})) |
|
if len(chunk)>2048: |
|
print("Chunks len: ", len(chunk)) |
|
print(chunk) |
|
break |
|
else: |
|
|
|
output_data.append(json.dumps({"text": code})) |
|
|
|
|
|
with open(output_file, 'w') as file: |
|
file.write('\n'.join(output_data)) |
|
|
|
print(f"Processing completed. Results saved to '{output_file}'.") |
|
|
|
|
|
input_file = '../zkml-dataset/dataset.json' |
|
output_file = 'tokenized_code_data.json' |
|
|
|
|
|
process_json_file(input_file, output_file) |