File size: 2,689 Bytes
20d0ef5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
import tensorflow as tf
import numpy as np
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential

import json


test_div = 0.75

vocab_size = 10000
embedding_dim = 16
max_length = 100
trunc_type = 'post'
padding_type = 'post'
oov_tok = "<OOV>"

sentences = [
    'Wow this AI is astonishing',
    'This is the worst AI',
    'This is the best AI',
    'I am the best AI',
    'It is very astonishing that we can train a model on any data we have',
]


headlines = []
is_sarcastic = []
article_link = []

with open('Sarcasm_Headlines_Dataset.json', 'r') as f:
    data = json.load(f)


for i in data:
    headlines.append(i['headline'])
    is_sarcastic.append(i['is_sarcastic'])
    article_link.append(i['article_link'])


train_data = headlines[:int(len(headlines) * test_div)]
train_result = is_sarcastic[:int(len(is_sarcastic) * test_div)]
test_data = headlines[int(len(headlines) * test_div):]
test_result = is_sarcastic[int(len(is_sarcastic) * test_div):]


tokenizer = Tokenizer(num_words=10000, oov_token=oov_tok)
tokenizer.fit_on_texts(train_data)

word_index = tokenizer.word_index

train_sequences = tokenizer.texts_to_sequences(train_data)
test_sequences = tokenizer.texts_to_sequences(test_data)
train_padded = pad_sequences(
    train_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)
test_padded = pad_sequences(
    test_sequences, maxlen=max_length, padding=padding_type, truncating=trunc_type)

training_padded = np.array(train_padded)
training_labels = np.array(train_result)
testing_padded = np.array(test_padded)
testing_labels = np.array(test_result)


model = Sequential([
    tf.keras.layers.Embedding(
        vocab_size, embedding_dim, input_length=max_length),
    tf.keras.layers.GlobalAveragePooling1D(),
    tf.keras.layers.Dense(24, activation='relu'),
    tf.keras.layers.Dense(1, activation='sigmoid')
])
model.compile(loss='binary_crossentropy',
              optimizer='adam', metrics=['accuracy'])

model.summary()
num_epochs = 30
history = model.fit(training_padded, training_labels, epochs=num_epochs,
                    validation_data=(testing_padded, testing_labels), verbose=2)

sentence = ["granny starting to fear spiders in the garden might be real",
            "game of thrones season finale showing this sunday night",
            "Central Valley Coalition Suing the EPA Over Clean Air Failures"]
sequences = tokenizer.texts_to_sequences(sentence)
padded = pad_sequences(sequences, maxlen=max_length,
                       padding=padding_type, truncating=trunc_type)
print(model.predict(padded))