ManjinderUNCC commited on
Commit
c2eb30b
1 Parent(s): fd66da1

Upload gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +63 -0
gradio_interface.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import spacy
3
+ from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
4
+
5
+ # Load the trained spaCy model
6
+ model_path = "./my_trained_model"
7
+ nlp = spacy.load(model_path)
8
+
9
+ # Threshold for classification
10
+ threshold = 0.21
11
+
12
+ # Function to classify text
13
+ def classify_text(text):
14
+ doc = nlp(text)
15
+ predicted_labels = doc.cats
16
+ return predicted_labels
17
+
18
+ # Function to evaluate the predicted labels for the input text
19
+ def evaluate_text(input_text):
20
+ # Get the predicted labels for the input text
21
+ doc = nlp(input_text)
22
+ predicted_labels = {label: score > threshold for label, score in doc.cats.items()}
23
+
24
+ # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
+ # For demonstration purposes, let's assume the ground truth labels are provided here.
26
+ ground_truth_labels = {
27
+ "CapitalRequirements": 0,
28
+ "ConsumerProtection": 1,
29
+ "RiskManagement": 0,
30
+ "ReportingAndCompliance": 1,
31
+ "CorporateGovernance": 0
32
+ }
33
+
34
+ # Convert predicted and ground truth labels to lists
35
+ predicted_labels_list = [1 if predicted_labels[label] else 0 for label in predicted_labels]
36
+ ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels]
37
+
38
+ # Calculate evaluation metrics
39
+ accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list)
40
+ precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
41
+ recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
42
+ f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
43
+
44
+ # Additional classification report
45
+ report = classification_report(ground_truth_labels_list, predicted_labels_list)
46
+
47
+ # Construct output dictionary
48
+ output_dict = {
49
+ "PredictedLabels": predicted_labels,
50
+ "EvaluationMetrics": {
51
+ "Accuracy": accuracy,
52
+ "Precision": precision,
53
+ "Recall": recall,
54
+ "F1-Score": f1,
55
+ "ClassificationReport": report
56
+ }
57
+ }
58
+
59
+ return output_dict
60
+
61
+ # Gradio Interface
62
+ iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
63
+ iface.launch(share=True)