ServiceNowMTTR / app.py
xeroISB's picture
Update app.py
2df3893 verified
raw
history blame
3.81 kB
import gradio as gr
import numpy as np
import pandas as pd
from keras.models import load_model
from huggingface_hub import hf_hub_download
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from sklearn.preprocessing import LabelEncoder, StandardScaler
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
# Download VADER lexicon for sentiment analysis
nltk.download('vader_lexicon')
# Load the model from Hugging Face
model_path = hf_hub_download(repo_id="xeroISB/ServiceNowMTTR", filename="my_model.h5")
model = load_model(model_path)
# Initialize Tokenizer and LabelEncoders
tokenizer = Tokenizer(num_words=10000, oov_token='<OOV>')
label_encoders = {
'impact': LabelEncoder(),
'priority': LabelEncoder(),
'category': LabelEncoder(),
'urgency': LabelEncoder()
}
# Function to preprocess input data
def preprocess_input(short_description, impact, priority, category, urgency):
# Encode categorical features
input_data = pd.DataFrame({
'short_description': [short_description],
'impact': [impact],
'priority': [priority],
'category': [category],
'urgency': [urgency]
})
for column in ['impact', 'priority', 'category', 'urgency']:
input_data[column] = label_encoders[column].fit_transform(input_data[column])
short_description = input_data['short_description'].iloc[0].lower()
# Tokenize text data
sequences = tokenizer.texts_to_sequences(short_description)
print("Short description",input_data['short_description'].iloc[0])
print("Sequence",sequences)
if not sequences:
return None, None # Handle empty sequences
padded_sequences = pad_sequences(sequences, maxlen=50, padding='post', truncating='post')
# Feature engineering: Add sentiment score
sid = SentimentIntensityAnalyzer()
input_data['sentiment_score'] = input_data['short_description'].apply(lambda x: sid.polarity_scores(x)['compound'])
# Normalize numerical features
numerical_features = input_data[['impact', 'priority', 'category', 'urgency', 'sentiment_score']]
scaler = StandardScaler()
scaled_numerical_features = scaler.fit_transform(numerical_features)
# Prepare the final input features
X_input = np.concatenate([padded_sequences, scaled_numerical_features], axis=1)
return X_input, input_data['sentiment_score'].iloc[0]
# Function to make predictions
def predict(short_description, impact, priority, category, urgency):
X_input, sentiment_score = preprocess_input(short_description, impact, priority, category, urgency)
predictions = model.predict(X_input)
predicted_label = np.argmax(predictions, axis=1)[0]
return predicted_label, sentiment_score
# Define Gradio interface
inputs = [
gr.components.Textbox(lines=2, placeholder="Enter short description...", label="Short Description"),
gr.components.Textbox(lines=1, placeholder="Enter impact...", label="Impact (e.g., '2 - Medium')"),
gr.components.Textbox(lines=1, placeholder="Enter priority...", label="Priority (e.g., '2 - Medium')"),
gr.components.Textbox(lines=1, placeholder="Enter category...", label="Category (e.g., 'Network')"),
gr.components.Textbox(lines=1, placeholder="Enter urgency...", label="Urgency (e.g., '1 - High')")
]
outputs = [
gr.components.Textbox(label="Predicted Duration Bin"),
gr.components.Textbox(label="Sentiment Score")
]
interface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Issue Resolution Predictor", description="Predict the duration bin and sentiment score based on issue description and related features.")
# Launch the interface
interface.launch()