File size: 1,893 Bytes
5a223ff |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 |
import csv
from transformers import AutoTokenizer
# Initialize the tokenizer
tokenizer = AutoTokenizer.from_pretrained("TheBloke/Yarn-Llama-2-7B-128K-GPTQ", use_fast=True)
# Read the input data
with open('input.txt', 'r') as f:
data = f.readlines()
# Initialize variables
train_data = []
test_data = []
current_row = ""
current_token_count = 0
carry_over = ""
# Iterate over each line and add to train or test data
for i, line in enumerate(data):
line_to_add = carry_over + line.strip()
carry_over = ""
# Tokenize the line to count tokens
tokens = tokenizer(line_to_add)['input_ids']
num_tokens = len(tokens)
# Check if adding the line would exceed the token limit
if current_token_count + num_tokens > 1024:
# Find the last period followed by a space in the current row
last_period_idx = current_row.rfind('. ')
if last_period_idx != -1:
# Carry over the content after the last period
carry_over = current_row[last_period_idx+2:].strip() + "\n"
current_row = current_row[:last_period_idx+1]
if i < len(data) * 0.9:
train_data.append(current_row.strip())
else:
test_data.append(current_row.strip())
current_row = carry_over
current_token_count = len(tokenizer(current_row.strip())['input_ids'])
# Add the line to the current row
current_row += (line_to_add + "\n") if current_row else (line_to_add + "\n")
current_token_count += num_tokens
# Save as train.csv and test.csv
with open('train.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Text'])
for row in train_data:
writer.writerow([row])
with open('test.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Text'])
for row in test_data:
writer.writerow([row])
|