|
import gradio as gr |
|
import spacy |
|
from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score |
|
|
|
|
|
model_path = "./my_trained_model" |
|
nlp = spacy.load(model_path) |
|
|
|
|
|
threshold = 0.21 |
|
|
|
|
|
def classify_text(text): |
|
doc = nlp(text) |
|
predicted_labels = doc.cats |
|
return predicted_labels |
|
|
|
|
|
def evaluate_text(input_text): |
|
|
|
doc = nlp(input_text) |
|
predicted_labels = {label: score > threshold for label, score in doc.cats.items()} |
|
|
|
|
|
|
|
ground_truth_labels = { |
|
"CapitalRequirements": 0, |
|
"ConsumerProtection": 1, |
|
"RiskManagement": 0, |
|
"ReportingAndCompliance": 1, |
|
"CorporateGovernance": 0 |
|
} |
|
|
|
|
|
predicted_labels_list = [1 if predicted_labels[label] else 0 for label in predicted_labels] |
|
ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels] |
|
|
|
|
|
accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list) |
|
precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted') |
|
recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted') |
|
f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted') |
|
|
|
|
|
report = classification_report(ground_truth_labels_list, predicted_labels_list) |
|
|
|
|
|
output_dict = { |
|
"PredictedLabels": predicted_labels, |
|
"EvaluationMetrics": { |
|
"Accuracy": accuracy, |
|
"Precision": precision, |
|
"Recall": recall, |
|
"F1-Score": f1, |
|
"ClassificationReport": report |
|
} |
|
} |
|
|
|
return output_dict |
|
|
|
|
|
iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text") |
|
iface.launch(share=True) |
|
|