Update README.md
Browse files
README.md
CHANGED
@@ -39,19 +39,19 @@ For more detailed code regarding generating the annotations in Toxic Commons, tr
|
|
39 |
|
40 |
```
|
41 |
from transformers import AutoTokenizer
|
42 |
-
from model import MultiHeadDebertaForSequenceClassification
|
43 |
|
44 |
-
tokenizer = AutoTokenizer.from_pretrained("
|
45 |
-
model = MultiHeadDebertaForSequenceClassification.from_pretrained("
|
46 |
model.eval()
|
47 |
|
48 |
sample_text = "This is an example of a normal sentence"
|
49 |
|
50 |
inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
|
51 |
-
outputs = model(
|
52 |
|
53 |
categories = ['Race/Origin', 'Gender/Sex', 'Religion', 'Ability', 'Violence']
|
54 |
-
predictions = outputs.argmax(dim=-1).squeeze().tolist()
|
55 |
|
56 |
# Print the classification results for each category
|
57 |
print(f"Text: {sample_text}")
|
|
|
39 |
|
40 |
```
|
41 |
from transformers import AutoTokenizer
|
42 |
+
from celadon.model import MultiHeadDebertaForSequenceClassification
|
43 |
|
44 |
+
tokenizer = AutoTokenizer.from_pretrained("celadon")
|
45 |
+
model = MultiHeadDebertaForSequenceClassification.from_pretrained("celadon")
|
46 |
model.eval()
|
47 |
|
48 |
sample_text = "This is an example of a normal sentence"
|
49 |
|
50 |
inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
|
51 |
+
outputs = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
52 |
|
53 |
categories = ['Race/Origin', 'Gender/Sex', 'Religion', 'Ability', 'Violence']
|
54 |
+
predictions = outputs.argmax(dim=-1).squeeze().tolist()
|
55 |
|
56 |
# Print the classification results for each category
|
57 |
print(f"Text: {sample_text}")
|