File size: 3,690 Bytes
0f6aa00
 
 
976bdda
0f6aa00
 
 
976bdda
0f6aa00
 
 
 
976bdda
 
 
0f6aa00
976bdda
 
 
5163bee
 
0f6aa00
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5163bee
2df3893
5163bee
976bdda
 
 
0f6aa00
 
 
 
 
 
 
 
2753f4c
 
976bdda
0f6aa00
 
 
976bdda
 
 
 
 
 
 
 
 
0f6aa00
 
 
 
3efad8a
 
 
 
 
0f6aa00
 
 
3759ad1
3efad8a
0f6aa00
 
3759ad1
0f6aa00
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
import gradio as gr
import numpy as np
import pandas as pd
from transformers import AutoTokenizer, AutoModelForSequenceClassification
from sklearn.preprocessing import LabelEncoder, StandardScaler
from nltk.sentiment.vader import SentimentIntensityAnalyzer
import nltk
import torch

# Download VADER lexicon for sentiment analysis
nltk.download('vader_lexicon')

# Load model and tokenizer from Hugging Face
tokenizer = AutoTokenizer.from_pretrained("xeroISB/ServiceNowMTTR")
model = AutoModelForSequenceClassification.from_pretrained("xeroISB/ServiceNowMTTR")

# Move model to GPU if available
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model.to(device)

# Initialize LabelEncoders
label_encoders = {
    'impact': LabelEncoder(),
    'priority': LabelEncoder(),
    'category': LabelEncoder(),
    'urgency': LabelEncoder()
}

# Function to preprocess input data
def preprocess_input(short_description, impact, priority, category, urgency):
    # Encode categorical features
    input_data = pd.DataFrame({
        'short_description': [short_description],
        'impact': [impact],
        'priority': [priority],
        'category': [category],
        'urgency': [urgency]
    })
    
    for column in ['impact', 'priority', 'category', 'urgency']:
        input_data[column] = label_encoders[column].fit_transform(input_data[column])
    
    short_description = input_data['short_description'].iloc[0].lower()
    
    # Tokenize text data using the new tokenizer
    inputs = tokenizer(short_description, return_tensors='pt', padding='max_length', truncation=True, max_length=50)
    inputs = {k: v.to(device) for k, v in inputs.items()}
    
    # Feature engineering: Add sentiment score
    sid = SentimentIntensityAnalyzer()
    input_data['sentiment_score'] = input_data['short_description'].apply(lambda x: sid.polarity_scores(x)['compound'])
    
    # Normalize numerical features
    numerical_features = input_data[['impact', 'priority', 'category', 'urgency', 'sentiment_score']]
    scaler = StandardScaler()
    scaled_numerical_features = scaler.fit_transform(numerical_features)

    return inputs, scaled_numerical_features

# Function to make predictions
def predict(short_description, impact, priority, category, urgency):
    inputs, scaled_numerical_features = preprocess_input(short_description, impact, priority, category, urgency)
    
    # Make prediction
    with torch.no_grad():
        outputs = model(**inputs)
        logits = outputs.logits
        predicted_label = torch.argmax(logits, axis=1).item()
    
    sentiment_score = scaled_numerical_features[0][-1]
    return predicted_label, sentiment_score

# Define Gradio interface
inputs = [
    gr.components.Textbox(lines=2, placeholder="Enter short description...", label="Short Description"),
    gr.components.Textbox(lines=1, placeholder="Enter impact...", label="Impact (e.g., '2 - Medium')"),
    gr.components.Textbox(lines=1, placeholder="Enter priority...", label="Priority (e.g., '2 - Medium')"),
    gr.components.Textbox(lines=1, placeholder="Enter category...", label="Category (e.g., 'Network')"),
    gr.components.Textbox(lines=1, placeholder="Enter urgency...", label="Urgency (e.g., '1 - High')")
]

outputs = [
    gr.components.Textbox(label="Predicted Duration Bin 0 : 1-2 hours, 1: 3-4 hours, 2: 4-8 hours, 3: More than 8 hours"),
    gr.components.Textbox(label="Sentiment Score")
]

interface = gr.Interface(fn=predict, inputs=inputs, outputs=outputs, title="Incident Duration Predictor", description="Predict the duration bin and sentiment score based on issue description and related features.")

# Launch the interface
interface.launch()