Spaces:
Runtime error
Runtime error
import pandas as pd | |
import pickle as pkl | |
from sklearn.preprocessing import StandardScaler | |
from sklearn.model_selection import train_test_split | |
from sklearn.dummy import DummyClassifier | |
from sklearn.feature_extraction.text import CountVectorizer | |
from sklearn.linear_model import Perceptron | |
from numpy import reshape | |
import numpy as np | |
from sklearn.metrics import accuracy_score | |
from sklearn.metrics import classification_report | |
from sklearn.naive_bayes import GaussianNB | |
from sklearn.neighbors import KNeighborsClassifier | |
from sklearn.linear_model import Perceptron | |
from sklearn.dummy import DummyClassifier | |
from sklearn.ensemble import RandomForestClassifier | |
from sklearn.neural_network import MLPClassifier | |
from sklearn import svm | |
import gradio as gr | |
class NLP: | |
def __init__(self) -> None: | |
self.__path = "models/" | |
self.__exec = {"Perceptron": [self.perceptron_pol_eval, self.perceptron_rat_eval], "K-Neighbors": [self.kneighbors_pol_eval, self.kneighbors_rat_eval], "Naive Bayes": [self.NB_pol_eval, self.NB_rat_eval], "SVM": [self.SVM_pol_eval, self.SVM_rat_eval], "Random Forest": [self.RF_pol_eval, self.RF_rat_eval], "NN (MLP)": [self.MLP_pol_eval, self.MLP_rat_eval], "Dummy (Baseline)": [self.Dummy_pol_eval, self.Dummy_rat_eval]} | |
self.__get_vocabulary() | |
self.__vectorizer_pol = pkl.load(open(self.__path + "vectorizer_pol.pkl", 'rb')) | |
self.__vectorizer_rat = pkl.load(open(self.__path + "vectorizer_rat.pkl", 'rb')) | |
self.__X_pol_test = pkl.load(open(self.__path + "X_pol_test.pkl", 'rb')) | |
self.__y_pol_test = pkl.load(open(self.__path + "y_pol_test.pkl", 'rb')) | |
self.__X_rat_test = self.__X_pol_test | |
self.__y_rat_test = pkl.load(open(self.__path + "y_rat_test.pkl", 'rb')) | |
self.__get_models() | |
def __get_models(self): | |
self.__perceptron_pol = pkl.load(open(self.__path + "perceptron_pol.pkl", 'rb')) | |
self.__perceptron_pol_score = self.__perceptron_pol.score(self.__X_pol_test, self.__y_pol_test) | |
self.__perceptron_rat = pkl.load(open(self.__path + "perceptron_rat.pkl", 'rb')) | |
self.__perceptron_rat_score = self.__perceptron_rat.score(self.__X_rat_test, self.__y_rat_test) | |
self.__rf_pol = pkl.load(open(self.__path + "rf_pol.pkl", 'rb')) | |
self.__rf_pol_score = self.__rf_pol.score(self.__X_pol_test, self.__y_pol_test) | |
self.__rf_rat = pkl.load(open(self.__path + "rf_rat.pkl", 'rb')) | |
self.__rf_rat_score = self.__rf_rat.score(self.__X_rat_test, self.__y_rat_test) | |
self.__nb_pol = pkl.load(open(self.__path + "nb_pol.pkl", 'rb')) | |
self.__nb_pol_score = self.__nb_pol.score(self.__X_pol_test, self.__y_pol_test) | |
self.__nb_rat = pkl.load(open(self.__path + "nb_rat.pkl", 'rb')) | |
self.__nb_rat_score = self.__nb_rat.score(self.__X_rat_test, self.__y_rat_test) | |
# self.__svm_pol = pkl.load(open(self.__path + "svm_pol.pkl", 'rb')) | |
# self.__svm_pol_score = self.__svm_pol.score(self.__X_pol_test, self.__y_pol_test) | |
# self.__svm_rat = pkl.load(open(self.__path + "svm_rat.pkl", 'rb')) | |
# self.__svm_rat_score = self.__svm_rat.score(self.__X_rat_test, self.__y_rat_test) | |
# self.__k_neighbors_pol = pkl.load(open(self.__path + "kneighbors_pol.pkl", 'rb')) | |
# self.__k_neighbors_pol_score = self.__k_neighbors_pol.score(self.__X_pol_test, self.__y_pol_test) | |
# self.__k_neighbors_rat = pkl.load(open(self.__path + "kneighbors_rat.pkl", 'rb')) | |
# self.__k_neighbors_rat_score = self.__k_neighbors_rat.score(self.__X_rat_test, self.__y_rat_test) | |
self.__dummy_pol = pkl.load(open(self.__path + "dummy_pol.pkl", 'rb')) | |
self.__dummy_pol_score = self.__dummy_pol.score(self.__X_pol_test, self.__y_pol_test) | |
self.__dummy_rat = pkl.load(open(self.__path + "dummy_rat.pkl", 'rb')) | |
self.__dummy_rat_score = self.__dummy_rat.score(self.__X_rat_test, self.__y_rat_test) | |
self.__clf_pol = pkl.load(open(self.__path + "clf_pol.pkl", 'rb')) | |
self.__clf_pol_score = self.__clf_pol.score(self.__X_pol_test, self.__y_pol_test) | |
self.__clf_rat = pkl.load(open(self.__path + "clf_rat.pkl", 'rb')) | |
self.__clf_rat_score = self.__clf_rat.score(self.__X_rat_test, self.__y_rat_test) | |
def perceptron_pol_eval(self, evalu): | |
tmp = self.__perceptron_pol.predict(evalu) | |
return([[tmp, 1-tmp]], str(self.__perceptron_pol_score)) | |
def perceptron_rat_eval(self, evalu): | |
tmp = self.__perceptron_rat.predict(evalu) | |
if (tmp == 5): | |
tmp = [[0, 0, 0, 1]] | |
elif (tmp == 4): | |
tmp = [[0, 0, 1, 0]] | |
elif (tmp == 2): | |
tmp = [[0, 1, 0, 0]] | |
else: | |
tmp = [[1, 0, 0, 0]] | |
return(tmp, str(self.__perceptron_rat_score)) | |
def kneighbors_pol_eval(self, evalu): | |
return ([[0, 0]], "0.45") | |
#return(self.__k_neighbors_pol.predict_proba(evalu).tolist(), str(self.__k_neighbors_rat_score)) | |
def kneighbors_rat_eval(self, evalu): | |
return ([[0, 0]], "0.27") | |
#return(self.__k_neighbors_rat.predict_proba(evalu).tolist(), str(self.__k_neighbors_rat_score)) | |
def NB_pol_eval(self, evalu): | |
return(self.__nb_pol.predict_proba(evalu).tolist(), str(self.__nb_pol_score)) | |
def NB_rat_eval(self, evalu): | |
return(self.__nb_rat.predict_proba(evalu).tolist(), str(self.__nb_rat_score)) | |
def SVM_pol_eval(self, evalu): | |
return ([[0, 0]], "0.57") | |
#return(self.__svm_pol.predict_proba(evalu).tolist(), str(self.__svm_pol_score)) | |
def SVM_rat_eval(self, evalu): | |
return ([[0, 0]], "0.22") | |
#return(self.__svm_rat.predict_proba(evalu).tolist(), str(self.__svm_rat_score)) | |
def RF_pol_eval(self, evalu): | |
return(self.__rf_pol.predict_proba(evalu).tolist(), str(self.__rf_pol_score)) | |
def RF_rat_eval(self, evalu): | |
return(self.__rf_rat.predict_proba(evalu).tolist(), str(self.__rf_rat_score)) | |
def MLP_pol_eval(self, evalu): | |
return(self.__clf_pol.predict_proba(evalu).tolist(), str(self.__clf_pol_score)) | |
def MLP_rat_eval(self, evalu): | |
return(self.__clf_rat.predict_proba(evalu).tolist(), str(self.__clf_rat_score)) | |
def Dummy_pol_eval(self, evalu): | |
return(self.__dummy_pol.predict_proba(evalu).tolist(), self.__dummy_pol_score) | |
def Dummy_rat_eval(self, evalu): | |
tmp = self.__dummy_rat.predict_proba(evalu).tolist() | |
return(tmp, self.__dummy_rat.score) | |
def __get_vocabulary(self): | |
with open("models/vocabulary_polarity.txt", "r") as o: | |
res = o.read() | |
self.__vocabulary = res.split("\n") | |
self.__vocabulary = list(set(self.__vocabulary)) | |
def Tokenizer(self, text): | |
tmp = self.__vectorizer_pol.transform([text]) | |
tmp = tmp.toarray() | |
return (tmp) | |
def Manage(self, model, Dataset, review): | |
if (Dataset == "Binary"): | |
percent, score = self.__exec[model][0](review) | |
res = pd.DataFrame({'Positive': percent[0][0], 'Negative': percent[0][1]}, index=["Prediction"]) | |
else: | |
percent, score = self.__exec[model][1](review) | |
res = pd.DataFrame({'Rated 1/5': percent[0][0], 'Rated 2/5': percent[0][1], 'Rated 4/5': percent[0][2], 'Rated 5/5': percent[0][3]}, index=["Prediction"]) | |
if (percent[0][0] == 0 and percent[0][1] == 0): | |
return (res, f"Model: {model}\nDataset: {Dataset}\nAccuracy: {str(float(score)*100)}\nDue to the size of the model, it has not been implemented on huggingface.") | |
else: | |
return (res, f"Model: {model}\nDataset: {Dataset}\nAccuracy: {str(float(score)*100)}") | |
if __name__ == "__main__": | |
class Execution: | |
def __init__(self): | |
self.__n = NLP() | |
def greet(self, Model, Dataset, Review): | |
return(self.__n.Manage(Model, Dataset, self.__n.Tokenizer(Review))) | |
e = Execution() | |
gr.Interface(e.greet, [gr.inputs.Dropdown(["Perceptron", "K-Neighbors", "Naive Bayes", "SVM", "Random Forest", "NN (MLP)", "Dummy (Baseline)"]), gr.inputs.Dropdown(["Binary", "Rating"]), "text"], [gr.outputs.Dataframe(), "text"]).launch() |