Upload helpdesk_multilanguage.py
Browse files- helpdesk_multilanguage.py +87 -0
helpdesk_multilanguage.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
"""Helpdesk MultiLanguage
|
3 |
+
|
4 |
+
Automatically generated by Colab.
|
5 |
+
|
6 |
+
Original file is located at
|
7 |
+
https://colab.research.google.com/drive/1hukfVm3vIbdmE1jpGZUoZDhHeuTs-AlR
|
8 |
+
"""
|
9 |
+
|
10 |
+
!pip install datasets
|
11 |
+
import pandas as pd
|
12 |
+
import plotly.express as px
|
13 |
+
import os
|
14 |
+
import plotly.graph_objects as go
|
15 |
+
from plotly.subplots import make_subplots
|
16 |
+
from sklearn.model_selection import train_test_split
|
17 |
+
from sklearn.metrics import classification_report, confusion_matrix
|
18 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
|
19 |
+
from datasets import Dataset
|
20 |
+
import torch
|
21 |
+
import numpy as np
|
22 |
+
|
23 |
+
!pip install wandb
|
24 |
+
|
25 |
+
import wandb
|
26 |
+
wandb.login(key='eb4c4a1fa7eec1ffbabc36420ba1166f797d4ac5')
|
27 |
+
|
28 |
+
data_path = "/content/ticket_helpdesk_labeled_multi_languages_english_spain_french_german.csv"
|
29 |
+
df = pd.read_csv(data_path)
|
30 |
+
|
31 |
+
print("First few rows of the dataset:")
|
32 |
+
print(df.head())
|
33 |
+
|
34 |
+
print("\nEDA and Visualization")
|
35 |
+
print("\nSummary statistics:")
|
36 |
+
print(df.describe(include='all'))
|
37 |
+
|
38 |
+
fig_queue = px.histogram(df, x='queue', title='Distribution of Queue Categories', color='queue')
|
39 |
+
fig_queue.show()
|
40 |
+
|
41 |
+
fig_priority = px.histogram(df, x='priority', title='Distribution of Priority Levels', color='priority')
|
42 |
+
fig_priority.show()
|
43 |
+
|
44 |
+
fig_language = px.histogram(df, x='language', title='Distribution of Languages', color='language')
|
45 |
+
fig_language.show()
|
46 |
+
|
47 |
+
fig_software = px.histogram(df, x='software_used', title='Distribution of Software Used', color='software_used')
|
48 |
+
fig_software.show()
|
49 |
+
|
50 |
+
fig_hardware = px.histogram(df, x='hardware_used', title='Distribution of Hardware Used', color='hardware_used')
|
51 |
+
fig_hardware.show()
|
52 |
+
|
53 |
+
fig_accounting = px.histogram(df, x='accounting_category', title='Distribution of Accounting Categories', color='accounting_category')
|
54 |
+
fig_accounting.show()
|
55 |
+
|
56 |
+
fig = make_subplots(rows=3, cols=1, subplot_titles=('Priority Distribution', 'Language Distribution', 'Queue Distribution'))
|
57 |
+
|
58 |
+
fig.add_trace(go.Histogram(x=df['priority'], name='Priority'), row=1, col=1)
|
59 |
+
fig.add_trace(go.Histogram(x=df['language'], name='Language'), row=2, col=1)
|
60 |
+
fig.add_trace(go.Histogram(x=df['queue'], name='Queue'), row=3, col=1)
|
61 |
+
|
62 |
+
fig.update_layout(title_text='Distributions of Priority, Language, and Queue', showlegend=False)
|
63 |
+
fig.show()
|
64 |
+
|
65 |
+
fig_scatter = px.scatter(df, x='priority', y='queue', color='priority', title='Scatter Plot of Priority vs. Queue')
|
66 |
+
fig_scatter.show()
|
67 |
+
|
68 |
+
df = df.dropna(subset=['text'])
|
69 |
+
df['text'] = df['text'].astype(str)
|
70 |
+
|
71 |
+
df['queue_encoded'] = df['queue'].astype('category').cat.codes
|
72 |
+
queue_mapping = dict(enumerate(df['queue'].astype('category').cat.categories))
|
73 |
+
|
74 |
+
X_train, X_test, y_train, y_test = train_test_split(df['text'], df['queue_encoded'], test_size=0.2, random_state=42)
|
75 |
+
|
76 |
+
train_data = Dataset.from_dict({'text': X_train.tolist(), 'label': y_train.tolist()})
|
77 |
+
test_data = Dataset.from_dict({'text': X_test.tolist(), 'label': y_test.tolist()})
|
78 |
+
|
79 |
+
model_name = "xlm-roberta-base"
|
80 |
+
tokenizer = AutoTokenizer.from_pretrained(model_name)
|
81 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=df['queue_encoded'].nunique())
|
82 |
+
|
83 |
+
def preprocess_function(examples):
|
84 |
+
return tokenizer(examples['text'], truncation=True, padding=True)
|
85 |
+
|
86 |
+
train_data = train_data.map(preprocess_function, batched=True)
|
87 |
+
test_data = test_data.map(preprocess_function, batched=True)
|