pedrocas15 commited on
Commit
0be59b5
·
verified ·
1 Parent(s): ec7df1f

Create rpc.py

Browse files
Files changed (1) hide show
  1. rpc.py +206 -0
rpc.py ADDED
@@ -0,0 +1,206 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import tensorflow as tf
2
+ from tensorflow import keras
3
+ from keras.layers import *
4
+ import keras_nlp
5
+
6
+ import math
7
+ import json
8
+ from transformers import AutoTokenizer
9
+ from tokenizers import AddedToken
10
+
11
+
12
+ # Config
13
+ input_size = 512
14
+ embed_dim = 128
15
+
16
+
17
+ # Tokenizer
18
+ tokenizer = AutoTokenizer.from_pretrained('google/t5-v1_1-base')
19
+ tokenizer.add_tokens(AddedToken("\n", normalized=False))
20
+ tokenizer.add_tokens(AddedToken("<s>", normalized=False))
21
+ vocab_size = len(tokenizer.get_vocab().keys())
22
+ print("vocab_size:", vocab_size)
23
+ print("pad token id:", tokenizer.pad_token)
24
+
25
+
26
+ # Masked Accuracy Metric
27
+ def masked_accuracy(y_true, y_pred, padding_token=tokenizer.pad_token_id):
28
+ y_true = tf.cast(y_true, tf.int32)
29
+ y_pred = tf.cast(tf.argmax(y_pred, axis=-1), tf.int32)
30
+ mask = tf.cast(tf.not_equal(y_true, padding_token), tf.float32)
31
+ matches = tf.cast(tf.equal(y_true, y_pred), tf.float32)
32
+ accuracy = tf.reduce_sum(matches * mask) / tf.reduce_sum(mask)
33
+ return accuracy
34
+
35
+
36
+ # Embedding Layer
37
+ class SharedEmbedding(tf.keras.layers.Layer):
38
+ def __init__(self, vocab_size, embed_dim, **kwargs):
39
+ super(SharedEmbedding, self).__init__(**kwargs)
40
+ self.vocab_size = vocab_size
41
+ self.embed_dim = embed_dim
42
+
43
+ def build(self, input_shape):
44
+ self.shared_weights = self.add_weight(
45
+ shape=(self.vocab_size, self.embed_dim),
46
+ initializer='random_normal',
47
+ trainable=True,
48
+ name='shared_weights'
49
+ )
50
+ super(SharedEmbedding, self).build(input_shape)
51
+
52
+ def call(self, inputs, mode='embedding', temp=0.1):
53
+ if mode == 'embedding':
54
+ return tf.nn.embedding_lookup(self.shared_weights, inputs)
55
+ elif mode == 'classify':
56
+ sw = tf.nn.l2_normalize(self.shared_weights, axis=-1)
57
+ return tf.nn.softmax(tf.matmul(inputs, sw, transpose_b=True)/temp, axis=-1)
58
+
59
+
60
+ # Attention Layer
61
+ class Attention(keras.layers.Layer):
62
+ def __init__(self, **kwargs):
63
+ super(Attention, self).__init__(**kwargs)
64
+
65
+ def build(self, input_shape):
66
+ self.embed_dim = input_shape[-1]
67
+ self.mask = tf.where(tf.linalg.band_part(tf.ones((input_shape[-2], input_shape[-2])), -1, 0) == 1.0, 0.0, float("-inf"))
68
+ self.range_do = -tf.range(input_shape[-2])-1
69
+ self.range_undo = tf.range(input_shape[-2])+1
70
+ self.Q = self.add_weight(name='kernelQ',
71
+ shape=(input_shape[-1], input_shape[-1]),
72
+ initializer='uniform',
73
+ trainable=True)
74
+ self.K = self.add_weight(name='kernelK',
75
+ shape=(input_shape[-1], input_shape[-1]),
76
+ initializer='uniform',
77
+ trainable=True)
78
+ self.V = self.add_weight(name='kernelV',
79
+ shape=(input_shape[-1], input_shape[-1]),
80
+ initializer='uniform',
81
+ trainable=True)
82
+ super(Attention, self).build(input_shape)
83
+
84
+ def roll_embeddings(self, tensor, shift_values):
85
+ batch_size, time_size, embed_dim = tensor.shape
86
+ if batch_size is None: return tensor
87
+ shift_matrix = tf.reshape(shift_values, (1, -1, 1))
88
+ shift_matrix = tf.tile(shift_matrix, [batch_size, 1, embed_dim])
89
+ indices = tf.range(embed_dim)
90
+ indices_matrix = tf.tile(indices, [batch_size * time_size])
91
+ indices_matrix = tf.reshape(indices_matrix, (batch_size, time_size, embed_dim))
92
+ new_indices = (indices_matrix + shift_matrix) % embed_dim
93
+ rolled_tensor = tf.gather(tensor, new_indices, batch_dims=2)
94
+ return rolled_tensor
95
+
96
+ def call(self, x, pos):
97
+ q = x @ self.Q
98
+ k = x @ self.K
99
+ v = x @ self.V
100
+ atti = tf.matmul(q, k, transpose_b=True)
101
+ attp = tf.matmul(q, pos, transpose_b=True)
102
+ attp = self.roll_embeddings(attp, self.range_do)
103
+ att = atti + attp
104
+ att = tf.nn.softmax((att / math.sqrt(self.embed_dim)) + self.mask, axis=-1)
105
+ outi = att @ v
106
+ attp = self.roll_embeddings(att, self.range_undo)
107
+ outp = attp @ pos
108
+ out = outi + outp
109
+ return out
110
+
111
+
112
+ # Encoder
113
+ inputs = Input(shape=(input_size, ), dtype=tf.int32)
114
+ emb_layer = SharedEmbedding(vocab_size, embed_dim)
115
+ pos_layer = keras_nlp.layers.PositionEmbedding(input_size)
116
+
117
+ x = LayerNormalization()(emb_layer(inputs, mode="embedding"))
118
+ pos = pos_layer(x)
119
+
120
+ b = 6
121
+ for _ in range(b):
122
+ x += (2*b)**-0.5 * LayerNormalization()(Attention()(x, pos))
123
+ x += (2*b)**-0.5 * LayerNormalization()(Dense(embed_dim, activation="gelu")(x))
124
+ x = tf.nn.l2_normalize(x, axis=-1)
125
+
126
+ for _ in range(b):
127
+ x1 = Dense(embed_dim, activation="gelu")(x)
128
+ x1 = Dense(embed_dim, activation="gelu")(x1)
129
+ x += b**-0.5 * LayerNormalization()(x1)
130
+ x = tf.nn.l2_normalize(x, axis=-1)
131
+
132
+ x = emb_layer(x, mode="classify", temp=0.1)
133
+
134
+ model = keras.Model(inputs=inputs, outputs=x)
135
+ model.compile(
136
+ loss=keras.losses.SparseCategoricalCrossentropy(ignore_class=tokenizer.pad_token_id),
137
+ optimizer=keras.optimizers.AdamW(learning_rate=0.001),
138
+ metrics=[masked_accuracy, keras_nlp.metrics.Perplexity(mask_token_id=tokenizer.pad_token_id)],
139
+ )
140
+
141
+
142
+ # Import Model
143
+ model.load_weights("rpc.keras")
144
+ encoder = keras.Model(inputs=model.layers[0].input, outputs=model.layers[52].output)
145
+ encoder.summary()
146
+
147
+
148
+ # Vectorize Function
149
+ def vectorize_texts(all_texts):
150
+ batch_size = 128
151
+ vects = []
152
+ for i in range(0, len(all_texts), batch_size):
153
+ texts = all_texts[i:i+batch_size]
154
+ toks = [text + ([tokenizer.pad_token_id] * (input_size - len(text))) for text in texts]
155
+ if len(toks) > 0:
156
+ toks = tf.constant(toks, shape=(len(toks), input_size))
157
+ vect = encoder.predict(toks, verbose=0)
158
+ for v, t in zip(vect, texts):
159
+ vects.append(v[:len(t), :])
160
+ return tf.concat(vects, axis=0).numpy()
161
+
162
+
163
+ # Import Database and All Toks
164
+ index = None
165
+ all_toks = None
166
+ def load_index(index_path="/dev/shm/rpc-vecdb/index"):
167
+ global index
168
+ global all_toks
169
+ #import ngtpy
170
+ #index = ngtpy.Index(index_path, read_only=True)
171
+ import faiss
172
+ index = faiss.read_index(index_path + "/index.faiss")
173
+ with open(index_path + "/all_toks.json", "r") as f:
174
+ all_toks = json.loads(f.read())
175
+
176
+
177
+ # Generate Function
178
+ def generate(text, use_rpc=True, max_tokens=128):
179
+ enc_text = tokenizer.encode(text, add_special_tokens=False)
180
+
181
+ i = 0
182
+ while i < max_tokens and tok != vocab_size - 2:
183
+
184
+ enc_text = enc_text[-input_size:]
185
+ if use_rpc:
186
+ xq = vectorize_texts([enc_text])[-1]
187
+ #_id, _ = index.search(xq, size=1, epsilon=2)[0]
188
+ D, I = index.search(xq.reshape((1, -1)), 1)
189
+ _id = I[0][0]
190
+ if all_toks[_id] in carry_toks:
191
+ tmp = tf.argmax(tf.matmul(xq.reshape((1, -1)), encoder.layers[1].shared_weights, transpose_b=True), axis=-1).numpy()[0]
192
+ if all_toks[tmp] in enc_text: tok = tmp
193
+ else: tok = all_toks[_id]
194
+ else: tok = all_toks[_id]
195
+ else:
196
+ ins = enc_text + [tokenizer.pad_token_id] * (input_size - len(enc_text))
197
+ ins = tf.constant(ins, shape=(1, input_size))
198
+ res = model.predict(ins, verbose=0)[0][len(enc_text)-1]
199
+ tok = tf.argmax(res, axis=-1).numpy().tolist()
200
+
201
+ enc_text += [tok]
202
+ new_text = tokenizer.decode(enc_text)
203
+ tok_str = new_text[len(text):]
204
+ text = new_text
205
+
206
+ yield tok_str