zkml-github-repos-truncated / split_tokens.py
sa8's picture
Upload split_tokens.py
21be7bb
import json
import re
## split the code into multiple tokens so that it is not too long
def count_tokens(code):
# Count the number of tokens in the code
#tokens = re.findall(r'\b\w+\b', code)
return len(code)
def split_code(code, max_tokens=2048):
# Split the code into smaller chunks based on classes, functions, and other logical units
class_pattern = re.compile(r'(?:class|struct|interface)\s+\w+\s*[\(\{]')
function_pattern = re.compile(r'(?:def|function|func|fn)\s+\w+\s*\([^)]*\)\s*\{')
import_pattern = re.compile(r'(?:import|include|require)\s+')
comment_pattern = re.compile(r'(?://|#).*')
# Remove comments from the code
code = comment_pattern.sub('', code)
# Find the starting points of classes, functions, and other logical units
class_starts = [match.start() for match in class_pattern.finditer(code)]
function_starts = [match.start() for match in function_pattern.finditer(code)]
import_starts = [match.start() for match in import_pattern.finditer(code)]
logical_units = sorted(class_starts + function_starts + import_starts + [len(code)])
chunks = []
start_index = 0
for end_index in logical_units:
chunk = code[start_index:end_index].strip()
if chunk:
token_count = count_tokens(chunk)
if token_count <= max_tokens:
chunks.append(chunk)
start_index = end_index
else:
# If the chunk is too long, split it into smaller chunks
sub_chunks = split_chunk(chunk, max_tokens)
chunks.extend(sub_chunks)
start_index = end_index
return chunks
def split_chunk(chunk, max_tokens):
# Split a chunk into smaller sub-chunks based on the maximum token limit
sub_chunks = []
start_index = 0
while start_index < len(chunk):
end_index = start_index + max_tokens
sub_chunk = chunk[start_index:end_index].strip()
sub_chunks.append(sub_chunk)
start_index = end_index
return sub_chunks
def find_split_index(code, max_tokens):
# Find the index to split the code chunk
token_count = 0
for i, char in enumerate(code):
if char.isspace():
token_count += 1
if token_count > max_tokens:
return i
return len(code)
def process_json_file(input_file, output_file):
# Read the input JSON file
with open(input_file, 'r') as file:
data = json.load(file)
# Process each entry in the JSON data
output_data = []
for entry in data:
code = entry['content']
token_count = count_tokens(code)
if code != "":
if token_count > 2048:
# Split the code into smaller chunks
chunks = split_code(code)
# Create separate entries for each chunk
for chunk in chunks:
output_data.append(json.dumps({"text": chunk}))
if len(chunk)>2048:
print("Chunks len: ", len(chunk))
print(chunk)
break
else:
# Create a single entry for the code
output_data.append(json.dumps({"text": code}))
# Save the output data to a new JSON file without square brackets
with open(output_file, 'w') as file:
file.write('\n'.join(output_data))
print(f"Processing completed. Results saved to '{output_file}'.")
# Specify the input and output file paths
input_file = '../zkml-dataset/dataset.json'
output_file = 'tokenized_code_data.json'
# Process the JSON file
process_json_file(input_file, output_file)