Update README.md
Browse files
README.md
CHANGED
@@ -44,4 +44,19 @@ pipe = pipeline(model=model_name, device=device, truncation=True, max_length=max
|
|
44 |
is_it_a_joke = """A nervous passenger is about to book a flight ticket, and he asks the airlines' ticket seller, "I hope your planes are safe. Do they have a good track record for safety?" The airline agent replies, "Sir, I can guarantee you, we've never had a plane that has crashed more than once." """
|
45 |
result = pipe(is_it_a_joke) # [{'label': 'LABEL_1', 'score': 0.7313136458396912}]
|
46 |
print('This is a joke') if result[0]['label'] == 'LABEL_1' else print('This is not a joke')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
```
|
|
|
44 |
is_it_a_joke = """A nervous passenger is about to book a flight ticket, and he asks the airlines' ticket seller, "I hope your planes are safe. Do they have a good track record for safety?" The airline agent replies, "Sir, I can guarantee you, we've never had a plane that has crashed more than once." """
|
45 |
result = pipe(is_it_a_joke) # [{'label': 'LABEL_1', 'score': 0.7313136458396912}]
|
46 |
print('This is a joke') if result[0]['label'] == 'LABEL_1' else print('This is not a joke')
|
47 |
+
|
48 |
+
# Or if you don't want to use pipelines
|
49 |
+
from transformers import pipeline
|
50 |
+
import torch
|
51 |
+
device = 0 if torch.cuda.is_available() else -1
|
52 |
+
model_name = 'Reggie/muppet-roberta-base-joke_detector'
|
53 |
+
max_seq_len = 510
|
54 |
+
|
55 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name, model_max_length=510)
|
56 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name).to(device)
|
57 |
+
input = tokenizer(is_it_a_joke, '', truncation=True, return_tensors="pt")
|
58 |
+
output = model(input["input_ids"].to(device))
|
59 |
+
prediction = torch.softmax(output["logits"][0], -1).tolist()
|
60 |
+
pred_out = 1 if prediction[0] < prediction[1] else 0
|
61 |
+
print('This is a joke') if pred_out == 1 else print('This is not a joke')
|
62 |
```
|