Llama-Guard / text.py
taozi555's picture
Upload folder using huggingface_hub
2cfd9e8 verified
raw
history blame
1.8 kB
import json
import random
# 文件路径
file_path = 'robot_comment.json'
# 读取JSON数据
with open(file_path, 'r', encoding='utf-8') as file:
data = json.load(file)
# 分开存储safe和unsafe数据
safe_data = []
unsafe_data = []
safe_count = 0
for item in data:
content = item['content']
audit_status = item['audit_status']
status = item['status']
# 根据audit_status分类存储数据
if audit_status == 1:
if safe_count < 500:
safe_data.append({
"prompt": content,
"response": "N/A",
"violated_category_codes": [],
"label": "safe",
"explanation": ""
})
safe_count += 1
elif audit_status == -1 or status==0:
unsafe_data.append({
"prompt": content,
"response": "N/A",
"violated_category_codes": ["S12"],
"label": "unsafe",
"explanation": "This text is not suitable for public display"
})
# 随机抽取50条safe和50条unsafe作为测试集
test_safe = random.sample(safe_data, 50)
test_unsafe = random.sample(unsafe_data, 50)
test_data = test_safe + test_unsafe
# 将余下的数据作为训练集
train_safe = [item for item in safe_data if item not in test_safe]
train_unsafe = [item for item in unsafe_data if item not in test_unsafe]
train_data = train_safe + train_unsafe
# 输出测试数据和训练数据到文件
output_test_path = 'test_data.json'
output_train_path = 'transformed_data.json'
with open(output_test_path, 'w', encoding='utf-8') as test_output_file:
json.dump(test_data, test_output_file, indent=2)
with open(output_train_path, 'w', encoding='utf-8') as train_output_file:
json.dump(train_data, train_output_file, indent=2)