Text Classification
Safetensors
deberta-v2
eliotj commited on
Commit
ffef542
1 Parent(s): b32bfa0

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +5 -5
README.md CHANGED
@@ -39,19 +39,19 @@ For more detailed code regarding generating the annotations in Toxic Commons, tr
39
 
40
  ```
41
  from transformers import AutoTokenizer
42
- from model import MultiHeadDebertaForSequenceClassification
43
 
44
- tokenizer = AutoTokenizer.from_pretrained("PleIAs/celadon")
45
- model = MultiHeadDebertaForSequenceClassification.from_pretrained("PleIAs/celadon")
46
  model.eval()
47
 
48
  sample_text = "This is an example of a normal sentence"
49
 
50
  inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
51
- outputs = model(**inputs)
52
 
53
  categories = ['Race/Origin', 'Gender/Sex', 'Religion', 'Ability', 'Violence']
54
- predictions = outputs.argmax(dim=-1).squeeze().tolist()[0]
55
 
56
  # Print the classification results for each category
57
  print(f"Text: {sample_text}")
 
39
 
40
  ```
41
  from transformers import AutoTokenizer
42
+ from celadon.model import MultiHeadDebertaForSequenceClassification
43
 
44
+ tokenizer = AutoTokenizer.from_pretrained("celadon")
45
+ model = MultiHeadDebertaForSequenceClassification.from_pretrained("celadon")
46
  model.eval()
47
 
48
  sample_text = "This is an example of a normal sentence"
49
 
50
  inputs = tokenizer(sample_text, return_tensors="pt", padding=True, truncation=True)
51
+ outputs = model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
52
 
53
  categories = ['Race/Origin', 'Gender/Sex', 'Religion', 'Ability', 'Violence']
54
+ predictions = outputs.argmax(dim=-1).squeeze().tolist()
55
 
56
  # Print the classification results for each category
57
  print(f"Text: {sample_text}")