leannetanyt commited on
Commit
cc0fd7b
·
verified ·
1 Parent(s): 9bed984

feat: upload model weights

Browse files
Files changed (3) hide show
  1. config.json +49 -0
  2. lionguard2.py +195 -0
  3. model.safetensors +3 -0
config.json ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "LionGuard2Model"
4
+ ],
5
+ "categories": {
6
+ "all_other_misconduct": [
7
+ "all_other_misconduct_l1",
8
+ "all_other_misconduct_l2"
9
+ ],
10
+ "binary": [
11
+ "binary"
12
+ ],
13
+ "hateful": [
14
+ "hateful_l1",
15
+ "hateful_l2"
16
+ ],
17
+ "insults": [
18
+ "insults"
19
+ ],
20
+ "physical_violence": [
21
+ "physical_violence"
22
+ ],
23
+ "self_harm": [
24
+ "self_harm_l1",
25
+ "self_harm_l2"
26
+ ],
27
+ "sexual": [
28
+ "sexual_l1",
29
+ "sexual_l2"
30
+ ]
31
+ },
32
+ "category_order": [
33
+ "binary",
34
+ "hateful",
35
+ "insults",
36
+ "sexual",
37
+ "physical_violence",
38
+ "self_harm",
39
+ "all_other_misconduct"
40
+ ],
41
+ "input_dim": 3072,
42
+ "model_type": "lionguard2",
43
+ "torch_dtype": "float32",
44
+ "transformers_version": "4.50.3",
45
+ "auto_map": {
46
+ "AutoConfig": "lionguard2.LionGuard2Config",
47
+ "AutoModel": "lionguard2.LionGuard2Model"
48
+ }
49
+ }
lionguard2.py ADDED
@@ -0,0 +1,195 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ lionguard2.py
3
+ """
4
+
5
+ import torch
6
+ import torch.nn as nn
7
+ from transformers import PretrainedConfig, PreTrainedModel
8
+
9
+ INPUT_DIMENSION = 3072 # length of OpenAI embeddings
10
+
11
+ CATEGORIES = {
12
+ "binary": ["binary"],
13
+ "hateful": ["hateful_l1", "hateful_l2"],
14
+ "insults": ["insults"],
15
+ "sexual": [
16
+ "sexual_l1",
17
+ "sexual_l2",
18
+ ],
19
+ "physical_violence": ["physical_violence"],
20
+ "self_harm": ["self_harm_l1", "self_harm_l2"],
21
+ "all_other_misconduct": [
22
+ "all_other_misconduct_l1",
23
+ "all_other_misconduct_l2",
24
+ ],
25
+ }
26
+
27
+ CATEGORY_ORDER = [
28
+ "binary",
29
+ "hateful",
30
+ "insults",
31
+ "sexual",
32
+ "physical_violence",
33
+ "self_harm",
34
+ "all_other_misconduct",
35
+ ]
36
+
37
+
38
+ class LionGuard2Config(PretrainedConfig):
39
+ model_type = "lionguard2"
40
+
41
+ def __init__(
42
+ self,
43
+ input_dim=INPUT_DIMENSION,
44
+ categories=CATEGORIES,
45
+ category_order=CATEGORY_ORDER,
46
+ **kwargs,
47
+ ):
48
+ super().__init__(**kwargs)
49
+ self.input_dim = input_dim
50
+ self.categories = categories
51
+ self.category_order = category_order
52
+
53
+
54
+ class LionGuard2Model(PreTrainedModel):
55
+ config_class = LionGuard2Config
56
+
57
+ def __init__(self, config: LionGuard2Config):
58
+ """
59
+ LionGuard2 is a localised content moderation model that flags whether text violates the following categories:
60
+
61
+ 1. `hateful`: Text that discriminates, criticizes, insults, denounces, or dehumanizes a person or group on the basis of a protected identity.
62
+
63
+ There are two sub-categories for the `hateful` category:
64
+ a. `level_1_discriminatory`: Text that contains derogatory or generalized negative statements targeting a protected group.
65
+ b. `level_2_hate_speech`: Text that explicitly calls for harm or violence against a protected group; or language praising or justifying violence against them.
66
+
67
+ 2. `insults`: Text that insults demeans, humiliates, mocks, or belittles a person or group **without** referencing a legally protected trait.
68
+ For example, this includes personal attacks on attributes such as someone’s appearance, intellect, behavior, or other non-protected characteristics.
69
+
70
+ 3. `sexual`: Text that depicts or indicates sexual interest, activity, or arousal, using direct or indirect references to body parts, sexual acts, or physical traits.
71
+ This includes sexual content that may be inappropriate for certain audiences.
72
+
73
+ There are two sub-categories for the `sexual` category:
74
+ a. `level_1_not_appropriate_for_minors`: Text that contains mild-to-moderate sexual content that is generally adult-oriented or potentially unsuitable for those under 16.
75
+ May include matter-of-fact discussions about sex, sexuality, or sexual preferences.
76
+ b. `level_2_not_appropriate_for_all_ages`: Text that contains content aimed at adults and considered explicit, graphic, or otherwise inappropriate for a broad audience.
77
+ May include explicit descriptions of sexual acts, detailed sexual fantasies, or highly sexualized content.
78
+
79
+ 4. `physical_violence`: Text that includes glorification of violence or threats to inflict physical harm or injury on a person, group, or entity.
80
+
81
+ 5. `self_harm`: Text that promotes, suggests, or expresses intent to self-harm or commit suicide.
82
+
83
+ There are two sub-categories for the `self_harm` category:
84
+ a. `level_1_self_harm_intent`: Text that expresses suicidal thoughts or self-harm intention; or content encouraging someone to self-harm.
85
+ b. `level_2_self_harm_action`: Text that describes or indicates ongoing or imminent self-harm behavior.
86
+
87
+ 6. `all_other_misconduct`: This is a catch-all category for any other unsafe text that does not fit into the other categories.
88
+ It includes text that seeks or provides information about engaging in misconduct, wrongdoing, or criminal activity, or that threatens to harm,
89
+ defraud, or exploit others. This includes facilitating illegal acts (under Singapore law) or other forms of socially harmful activity.
90
+
91
+ There are two sub-categories for the `all_other_misconduct` category:
92
+ a. `level_1_not_socially_accepted`: Text that advocates or instructs on unethical/immoral activities that may not necessarily be illegal but are socially condemned.
93
+ b. `level_2_illegal_activities`: Text that seeks or provides instructions to carry out clearly illegal activities or serious wrongdoing; includes credible threats of severe harm.
94
+
95
+ Lastly, there is an additional `binary` category (#7) which flags whether the text is unsafe in general.
96
+
97
+ The model takes in as input text, after it has been encoded with OpenAI's `text-embedding-3-small` model.
98
+
99
+ The model outputs the probabilities of each category being true.
100
+
101
+ ================================
102
+
103
+ Args:
104
+ input_dim: The dimension of the input embeddings. This defaults to 3072, which is the dimension of the embeddings from OpenAI's `text-embedding-3-small` model. This should not be changed.
105
+ label_names: The names of the labels. This defaults to the keys of the CATEGORIES dictionary. This should not be changed.
106
+ categories: The categories of the labels. This defaults to the CATEGORIES dictionary. This should not be changed.
107
+
108
+ Returns:
109
+ A LionGuard2 model.
110
+ """
111
+ super().__init__(config)
112
+ self.input_dim = config.input_dim
113
+ self.categories = config.categories
114
+ self.category_order = config.category_order
115
+ self.n_outputs = len(self.category_order)
116
+
117
+ # Shared layers
118
+ self.shared_layers = nn.Sequential(
119
+ nn.Linear(self.input_dim, 256),
120
+ nn.ReLU(),
121
+ nn.Dropout(0.2),
122
+ nn.Linear(256, 128),
123
+ nn.ReLU(),
124
+ nn.Dropout(0.2),
125
+ )
126
+
127
+ # Output heads for each label
128
+ self.output_heads = nn.ModuleList(
129
+ [
130
+ nn.Sequential(
131
+ nn.Linear(128, 32),
132
+ nn.ReLU(),
133
+ nn.Linear(32, 2), # 2 thresholds for ordinal classification
134
+ nn.Sigmoid(),
135
+ )
136
+ for _ in range(self.n_outputs)
137
+ ]
138
+ )
139
+
140
+ def forward(self, x):
141
+ # Pass through shared layers
142
+ h = self.shared_layers(x)
143
+ # Pass through each output head
144
+ return [head(h) for head in self.output_heads]
145
+
146
+ def predict(self, embeddings):
147
+ """
148
+ Predict the probabilities of each label being true.
149
+
150
+ Args:
151
+ embeddings: A numpy array of embeddings (N * INPUT_DIMENSION)
152
+
153
+ Returns:
154
+ A dictionary of probabilities.
155
+ """
156
+ # Convert input to PyTorch tensor if not already
157
+ if not isinstance(embeddings, torch.Tensor):
158
+ x = torch.tensor(embeddings, dtype=torch.float32)
159
+ else:
160
+ x = embeddings
161
+
162
+ # Pass through model
163
+ with torch.no_grad():
164
+ outputs = self.forward(x)
165
+
166
+ # Stack outputs into a single tensor
167
+ raw_predictions = torch.stack(outputs) # SIZE:
168
+
169
+ # Extract and format probabilities from raw predictions
170
+ output = {}
171
+ for i, main_cat in enumerate(self.category_order):
172
+ sub_categories = self.categories[main_cat]
173
+ for j, sub_cat in enumerate(sub_categories):
174
+ # j=0 uses P(y>0)
175
+ # j=1 uses P(y>1) if L2 category exists
176
+ output[sub_cat] = raw_predictions[i, :, j]
177
+
178
+ # Post processing step:
179
+ # If L2 category exists, and P(L2) > P(L1),
180
+ # Set both P(L1) and P(L2) to their average to maintain ordinal consistency
181
+ if len(sub_categories) > 1:
182
+ l1 = output[sub_categories[0]]
183
+ l2 = output[sub_categories[1]]
184
+
185
+ # Update probabilities on samples where P(L2) > P(L1)
186
+ mask = l2 > l1
187
+ mean_prob = (l1 + l2) / 2
188
+ l1[mask] = mean_prob[mask]
189
+ l2[mask] = mean_prob[mask]
190
+ output[sub_categories[0]] = l1
191
+ output[sub_categories[1]] = l2
192
+
193
+ for key, value in output.items():
194
+ output[key] = value.numpy().tolist()
195
+ return output
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:20665c1cde68b57c34444accc4f0fca5a3f58b3483d6bad2d6c6911e431afac9
3
+ size 3398496