update readme
Browse files
README.md
CHANGED
@@ -33,15 +33,14 @@ For more detailed code regarding generating the annotations in Toxic Commons, tr
|
|
33 |
|
34 |
# How to Use
|
35 |
|
36 |
-
```
|
37 |
-
from transformers import AutoTokenizer
|
38 |
-
from celadon.model import MultiHeadDebertaForSequenceClassification
|
39 |
|
40 |
-
|
41 |
-
|
42 |
model.eval()
|
43 |
|
44 |
-
sample_text = "
|
45 |
|
46 |
inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
|
47 |
outputs = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
@@ -53,6 +52,21 @@ predictions = outputs.argmax(dim=-1).squeeze().tolist()
|
|
53 |
print(f"Text: {sample_text}")
|
54 |
for i, category in enumerate(categories):
|
55 |
print(f"Prediction for Category {category}: {predictions[i]}")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
56 |
```
|
57 |
|
58 |
# How to Cite
|
|
|
33 |
|
34 |
# How to Use
|
35 |
|
36 |
+
```py
|
37 |
+
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
|
|
38 |
|
39 |
+
model = AutoModelForSequenceClassification.from_pretrained("PleIAs/celadon", trust_remote_code=True)
|
40 |
+
tokenizer = AutoTokenizer.from_pretrained("PleIAs/celadon", trust_remote_code=True)
|
41 |
model.eval()
|
42 |
|
43 |
+
sample_text = "A very gender inappropriate comment"
|
44 |
|
45 |
inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
|
46 |
outputs = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
|
|
52 |
print(f"Text: {sample_text}")
|
53 |
for i, category in enumerate(categories):
|
54 |
print(f"Prediction for Category {category}: {predictions[i]}")
|
55 |
+
# Text: A very gender inappropriate comment
|
56 |
+
# Prediction for Category Race/Origin: 0
|
57 |
+
# Prediction for Category Gender/Sex: 3
|
58 |
+
# Prediction for Category Religion: 0
|
59 |
+
# Prediction for Category Ability: 0
|
60 |
+
# Prediction for Category Violence: 0
|
61 |
+
```
|
62 |
+
|
63 |
+
you can also use transformers pipelines to get a more streamlined experience
|
64 |
+
|
65 |
+
```py
|
66 |
+
pipe = pipeline("text-classification", model="PleIAs/celadon", trust_remote_code=True)
|
67 |
+
result = pipe("This is an example of a normal sentence")
|
68 |
+
print(result)
|
69 |
+
# [{'Race/Origin': 0, 'Gender/Sex': 3, 'Religion': 0, 'Ability': 0, 'Violence': 0}]
|
70 |
```
|
71 |
|
72 |
# How to Cite
|