File size: 1,169 Bytes
381bc18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 |
import pandas as pd
import xmltodict
from sklearn.model_selection import train_test_split
import glob
import sys
import os
filelist = glob.glob('*.jsonl')
for jsonfile in filelist:
data = pd.DataFrame([['source','target']])
temp = pd.DataFrame()
print(f"Processing {jsonfile}")
temp = pd.read_json(jsonfile, lines=True,encoding='utf8')
errors = 0
for index, row in temp.iterrows():
try:
engnob = ['nob: '+str(row['en']),str(row['no'])]
data.loc[len(data)] = engnob
nobeng = ['eng: '+str(row['no']),str(row['en'])]
data.loc[len(data)] = nobeng
except:
errors += 1
print("Unable to convert this line")
print(row)
try:
data['source'] = data['source'].str.replace('\t',' ')
data['target'] = data['target'].str.replace('\t',' ')
except:
errors += 1
print("Key error")
data = data.sample(frac=1).reset_index(drop=True)
filename = jsonfile.replace(".jsonl",".tsv")
# Write the datasets to disk
data.to_csv(filename, index=False, header=False, sep='\t')
print("Finished")
|