amandakonet
commited on
Commit
•
e8d285b
1
Parent(s):
6686bbd
Update README.md
Browse files
README.md
CHANGED
@@ -6,23 +6,26 @@ datasets: climatebert/distilroberta-base-climate-f
|
|
6 |
tags:
|
7 |
- fact-checking
|
8 |
- climate
|
|
|
9 |
---
|
10 |
|
11 |
This model fine-tuned ClimateBert on the textual entailment task. Given (claim, evidence) pairs, the model predicts support (entailment), refute (contradict), or not enough info (neutral).
|
12 |
|
|
|
13 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
14 |
import torch
|
15 |
|
16 |
-
model = AutoModelForSequenceClassification.from_pretrained(
|
17 |
-
tokenizer = AutoTokenizer.from_pretrained(
|
18 |
|
19 |
-
features = tokenizer(['
|
20 |
-
['
|
21 |
-
padding=
|
22 |
|
23 |
model.eval()
|
24 |
with torch.no_grad():
|
25 |
scores = model(**features).logits
|
26 |
label_mapping = ['contradiction', 'entailment', 'neutral']
|
27 |
labels = [label_mapping[score_max] for score_max in scores.argmax(dim=1)]
|
28 |
-
print(labels)
|
|
|
|
6 |
tags:
|
7 |
- fact-checking
|
8 |
- climate
|
9 |
+
- text entailment
|
10 |
---
|
11 |
|
12 |
This model fine-tuned ClimateBert on the textual entailment task. Given (claim, evidence) pairs, the model predicts support (entailment), refute (contradict), or not enough info (neutral).
|
13 |
|
14 |
+
```python
|
15 |
from transformers import AutoTokenizer, AutoModelForSequenceClassification
|
16 |
import torch
|
17 |
|
18 |
+
model = AutoModelForSequenceClassification.from_pretrained("amandakonet/climatebert-fact-checking", use_auth_token=True)
|
19 |
+
tokenizer = AutoTokenizer.from_pretrained("amandakonet/climatebert-fact-checking", use_auth_token=True)
|
20 |
|
21 |
+
features = tokenizer(['Beginning in 2005, however, polar ice modestly receded for several years'],
|
22 |
+
['Polar Discovery "Continued Sea Ice Decline in 2005'],
|
23 |
+
padding='max_length', truncation=True, return_tensors="pt", max_length=512)
|
24 |
|
25 |
model.eval()
|
26 |
with torch.no_grad():
|
27 |
scores = model(**features).logits
|
28 |
label_mapping = ['contradiction', 'entailment', 'neutral']
|
29 |
labels = [label_mapping[score_max] for score_max in scores.argmax(dim=1)]
|
30 |
+
print(labels)
|
31 |
+
```
|