Pradeep Kumar
Create app.py
32e8749 verified
raw
history blame
2.07 kB
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import tf_keras as keras
import pandas as pd
from tensorflow.keras.models import load_model
from official.nlp.data import classifier_data_lib
from official.nlp.tools import tokenization
import joblib
model = load_model('best_model.h5', custom_objects={'KerasLayer': hub.KerasLayer})
vocab_file = model.resolved_object.vocab_file.asset_path.numpy()
do_lower_case = model.resolved_object.do_lower_case.numpy()
tokenizer = tokenization.FullTokenizer(vocab_file,do_lower_case)
# Parameters
max_seq_length = 128
label_list = 424
dummy_label = 100
# Define a function to preprocess the new data
def get_feature_new(text, max_seq_length, tokenizer, dummy_label):
example = classifier_data_lib.InputExample(guid=None,
text_a=text.numpy().decode('utf-8'),
text_b=None,
label=dummy_label) # Use a valid dummy label
feature = classifier_data_lib.convert_single_example(0, example, label_list, max_seq_length, tokenizer)
return feature.input_ids, feature.input_mask, feature.segment_ids
def get_feature_map_new(text):
input_ids, input_mask, segment_ids = tf.py_function(
lambda text: get_feature_new(text, max_seq_length, tokenizer, dummy_label),
inp=[text],
Tout=[tf.int32, tf.int32, tf.int32]
)
input_ids.set_shape([max_seq_length])
input_mask.set_shape([max_seq_length])
segment_ids.set_shape([max_seq_length])
x = {'input_word_ids': input_ids,
'input_mask': input_mask,
'input_type_ids': segment_ids}
return x
def preprocess_new_data(texts):
dataset = tf.data.Dataset.from_tensor_slices((texts,))
dataset = dataset.map(get_feature_map_new,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
dataset = dataset.batch(32, drop_remainder=False)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset