taozi555 commited on
Commit
f2676f8
1 Parent(s): 2cfd9e8

Upload toxicchat_dataset.py

Browse files
Files changed (1) hide show
  1. toxicchat_dataset.py +131 -0
toxicchat_dataset.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Meta Platforms, Inc. and affiliates.
2
+ # This software may be used and distributed according to the terms of the Llama 3.1 Community License Agreement.
3
+
4
+ # For dataset details visit: https://huggingface.co/datasets/lmsys/toxic-chat
5
+
6
+ import copy
7
+ import datasets
8
+ import itertools
9
+ from llama_recipes.inference.prompt_format_utils import LLAMA_GUARD_3_CATEGORY
10
+ import ast
11
+ import fire
12
+
13
+ def tokenize_prompt_and_labels(full_prompt, tokenizer):
14
+ prompt_tokens = tokenizer.encode(full_prompt)
15
+ combined_tokens = {
16
+ "input_ids": list(prompt_tokens),
17
+ "labels": list(prompt_tokens)
18
+ }
19
+ return dict(combined_tokens, attention_mask=[1]*len(combined_tokens["input_ids"]))
20
+
21
+
22
+ from llama_recipes.data.llama_guard.finetuning_data_formatter import TrainingExample, Guidelines, Category, LlamaGuardPromptConfigs, LlamaGuardGenerationConfigs, ExplanationPosition, AugmentationConfigs, FormatterConfigs, create_formatted_finetuning_examples
23
+ from datasets import Dataset, DatasetInfo
24
+
25
+ def mapTcCategoriesToLGCategories(TcCategoriesString):
26
+ TcCategories = ast.literal_eval(TcCategoriesString)
27
+ if(len(TcCategories)==0):
28
+ return None
29
+ ranked = sorted(TcCategories, key=lambda x: x[1], reverse=True)
30
+ primary = ranked[0][0] if len(ranked) else None
31
+ TcMapping = {
32
+ "S12":"012",
33
+ "violence":"01",
34
+ "sexual/minors":"04",
35
+ "self-harm/intent":"011",
36
+ "hate":"010",
37
+ "harassment":"010",
38
+ "self-harm":"011",
39
+ "self-harm/instructions":"011",
40
+ "violence/graphic":"01",
41
+ "harassment/threatening":"010",
42
+ "hate/threatening":"010"
43
+ }
44
+ return TcMapping[primary]
45
+
46
+ def get_llamaguard_toxicchat_dataset(dataset_config, tokenizer, split, return_jsonl=False):
47
+ dataset = datasets.load_dataset('json', data_files="/root/autodl-tmp/transformed_data.json", split='train')
48
+
49
+
50
+ if return_jsonl:
51
+ jsonl_data = [];
52
+ for x in dataset.to_iterable_dataset():
53
+ jsonl_data.append({
54
+ "prompt":x['prompt'],
55
+ "generation":x['response'],
56
+ "label":("good" if x['label'] == "safe" else "bad"),
57
+ "unsafe_content": [] if x["label"]=="safe" else ["012"]
58
+ })
59
+ return jsonl_data
60
+
61
+ guidelines = Guidelines(
62
+ categories=LLAMA_GUARD_3_CATEGORY,
63
+ category_code_prefix="O")
64
+
65
+ # Setting up configs
66
+ llama_guard_prompt_configs = LlamaGuardPromptConfigs(
67
+ instructions_format_string="""<|begin_of_text|><|start_header_id|>user<|end_header_id|> Task: Check if there is unsafe content in 'User' messages in conversations according our safety policy with the below categories.
68
+
69
+ <BEGIN UNSAFE CONTENT CATEGORIES>
70
+ {guidelines}
71
+ <END UNSAFE CONTENT CATEGORIES>
72
+
73
+ <BEGIN CONVERSATION>
74
+
75
+ {conversation}
76
+
77
+ <END CONVERSATION>
78
+
79
+ Provide your safety assessment for $agent_type in the above conversation:
80
+ - First line must read 'safe' or 'unsafe'.
81
+ - If unsafe, a second line must include a comma-separated list of violated categories. <|eot_id|><|start_header_id|>assistant<|end_header_id|>""",
82
+ should_include_category_descriptions=False,
83
+ should_shuffle_category_codes=False
84
+ )
85
+
86
+ llama_guard_generation_configs = LlamaGuardGenerationConfigs(
87
+ should_list_violated_codes=True,
88
+ explanation_position=None
89
+ )
90
+
91
+ augmentation_configs = AugmentationConfigs(
92
+ should_add_examples_with_dropped_nonviolated_prompt_categories=False,
93
+ should_add_examples_with_dropped_violated_and_nonviolated_prompt_categories=False,
94
+ explanation_for_augmentation_with_dropped_violated_and_nonviolated_prompt_categories="Response is safe because any riskiness in it is related to categories we're not trying to detect."
95
+ )
96
+
97
+ formatter_configs = FormatterConfigs(
98
+ guidelines=guidelines,
99
+ llama_guard_prompt_configs=llama_guard_prompt_configs,
100
+ llama_guard_generation_configs=llama_guard_generation_configs,
101
+ augmentation_configs=augmentation_configs,
102
+ random_seed=42
103
+ )
104
+
105
+ dataset = dataset.map(lambda x: {"full_prompt": create_formatted_finetuning_examples(
106
+ [TrainingExample(
107
+ prompt=x["prompt"],
108
+ response=None,
109
+ violated_category_codes = [] if x["label"]=="safe" else ["012"],
110
+ label=x["label"],
111
+ explanation=x["explanation"]
112
+ )],
113
+ formatter_configs)[0]},
114
+ remove_columns=list(dataset.features))
115
+
116
+ dataset = dataset.map(lambda x: tokenize_prompt_and_labels(x["full_prompt"], tokenizer), remove_columns=list(dataset.features))
117
+ return dataset
118
+
119
+ def main(return_jsonl = False):
120
+ from transformers import AutoTokenizer
121
+ model_id: str = "/home/ubuntu/LG3-interim-hf-weights"
122
+ tokenizer = AutoTokenizer.from_pretrained(model_id)
123
+ if return_jsonl:
124
+ dataset = get_llamaguard_toxicchat_dataset(None, tokenizer, "train", return_jsonl = True)
125
+ print(dataset[0:50])
126
+ else:
127
+ dataset = get_llamaguard_toxicchat_dataset(None, tokenizer, "train")
128
+ print(dataset[0])
129
+
130
+ if __name__ == '__main__':
131
+ fire.Fire(main)