File size: 2,591 Bytes
c2eb30b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b88663b
c2eb30b
31cece3
c2eb30b
 
 
 
 
 
 
 
 
 
 
b88663b
31cece3
 
b88663b
 
31cece3
 
 
 
b88663b
 
31cece3
b88663b
ff92eb4
 
 
 
 
 
 
 
 
 
 
58d3595
b88663b
c2eb30b
a2d889d
31cece3
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import gradio as gr
import spacy
from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score

# Load the trained spaCy model
model_path = "./my_trained_model"
nlp = spacy.load(model_path)

# Threshold for classification
threshold = 0.21

# Function to classify text
def classify_text(text):
    doc = nlp(text)
    predicted_labels = doc.cats
    return predicted_labels

# Function to evaluate the predicted labels for the input text
def evaluate_text(input_text):
    # Get the predicted labels and probabilities for the input text
    doc = nlp(input_text)
    predicted_labels = doc.cats
    
    # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
    # For demonstration purposes, let's assume the ground truth labels are provided here.
    ground_truth_labels = {
        "CapitalRequirements": 0,
        "ConsumerProtection": 1,
        "RiskManagement": 0,
        "ReportingAndCompliance": 1,
        "CorporateGovernance": 0
    }

    # Convert predicted and ground truth labels to lists
    predicted_labels_list = [predicted_labels[label] for label in ground_truth_labels]
    ground_truth_labels_list = [ground_truth_labels[label] for label in ground_truth_labels]

    # Calculate evaluation metrics
    accuracy = accuracy_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
    precision = precision_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
    recall = recall_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
    f1 = f1_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')

    # Additional classification report
    report = classification_report(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])

    # # Construct output dictionary
    # output_dict = {
    #     "PredictedLabels": predicted_labels,
    #     "EvaluationMetrics": {
    #         "Accuracy": accuracy,
    #         "Precision": precision,
    #         "Recall": recall,
    #         "F1-Score": f1,
    #         "ClassificationReport": report
    #     }
    # }
    return output_dict

# Gradio Interface
iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
iface.launch(share=True)