HelpDeskMultiLanguage / helpdesk_multilanguage.py
antitheft159's picture
Upload helpdesk_multilanguage.py
c64c415 verified
raw
history blame
3.34 kB
# -*- coding: utf-8 -*-
"""Helpdesk MultiLanguage
Automatically generated by Colab.
Original file is located at
https://colab.research.google.com/drive/1hukfVm3vIbdmE1jpGZUoZDhHeuTs-AlR
"""
!pip install datasets
import pandas as pd
import plotly.express as px
import os
import plotly.graph_objects as go
from plotly.subplots import make_subplots
from sklearn.model_selection import train_test_split
from sklearn.metrics import classification_report, confusion_matrix
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, TrainingArguments, Trainer
from datasets import Dataset
import torch
import numpy as np
!pip install wandb
import wandb
wandb.login(key='eb4c4a1fa7eec1ffbabc36420ba1166f797d4ac5')
data_path = "/content/ticket_helpdesk_labeled_multi_languages_english_spain_french_german.csv"
df = pd.read_csv(data_path)
print("First few rows of the dataset:")
print(df.head())
print("\nEDA and Visualization")
print("\nSummary statistics:")
print(df.describe(include='all'))
fig_queue = px.histogram(df, x='queue', title='Distribution of Queue Categories', color='queue')
fig_queue.show()
fig_priority = px.histogram(df, x='priority', title='Distribution of Priority Levels', color='priority')
fig_priority.show()
fig_language = px.histogram(df, x='language', title='Distribution of Languages', color='language')
fig_language.show()
fig_software = px.histogram(df, x='software_used', title='Distribution of Software Used', color='software_used')
fig_software.show()
fig_hardware = px.histogram(df, x='hardware_used', title='Distribution of Hardware Used', color='hardware_used')
fig_hardware.show()
fig_accounting = px.histogram(df, x='accounting_category', title='Distribution of Accounting Categories', color='accounting_category')
fig_accounting.show()
fig = make_subplots(rows=3, cols=1, subplot_titles=('Priority Distribution', 'Language Distribution', 'Queue Distribution'))
fig.add_trace(go.Histogram(x=df['priority'], name='Priority'), row=1, col=1)
fig.add_trace(go.Histogram(x=df['language'], name='Language'), row=2, col=1)
fig.add_trace(go.Histogram(x=df['queue'], name='Queue'), row=3, col=1)
fig.update_layout(title_text='Distributions of Priority, Language, and Queue', showlegend=False)
fig.show()
fig_scatter = px.scatter(df, x='priority', y='queue', color='priority', title='Scatter Plot of Priority vs. Queue')
fig_scatter.show()
df = df.dropna(subset=['text'])
df['text'] = df['text'].astype(str)
df['queue_encoded'] = df['queue'].astype('category').cat.codes
queue_mapping = dict(enumerate(df['queue'].astype('category').cat.categories))
X_train, X_test, y_train, y_test = train_test_split(df['text'], df['queue_encoded'], test_size=0.2, random_state=42)
train_data = Dataset.from_dict({'text': X_train.tolist(), 'label': y_train.tolist()})
test_data = Dataset.from_dict({'text': X_test.tolist(), 'label': y_test.tolist()})
model_name = "xlm-roberta-base"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForSequenceClassification.from_pretrained(model_name, num_labels=df['queue_encoded'].nunique())
def preprocess_function(examples):
return tokenizer(examples['text'], truncation=True, padding=True)
train_data = train_data.map(preprocess_function, batched=True)
test_data = test_data.map(preprocess_function, batched=True)