ManjinderUNCC commited on
Commit
b88663b
1 Parent(s): 6dafb68

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +29 -2
gradio_interface.py CHANGED
@@ -17,9 +17,9 @@ def classify_text(text):
17
 
18
  # Function to evaluate the predicted labels for the input text
19
  def evaluate_text(input_text):
20
- # Get the predicted labels for the input text
21
  doc = nlp(input_text)
22
- predicted_labels = {label: score > threshold for label, score in doc.cats.items()}
23
 
24
  # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
  # For demonstration purposes, let's assume the ground truth labels are provided here.
@@ -31,6 +31,33 @@ def evaluate_text(input_text):
31
  "CorporateGovernance": 0
32
  }
33
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
34
  # Convert predicted and ground truth labels to lists
35
  predicted_labels_list = [1 if predicted_labels[label] else 0 for label in predicted_labels]
36
  ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels]
 
17
 
18
  # Function to evaluate the predicted labels for the input text
19
  def evaluate_text(input_text):
20
+ # Get the predicted labels and probabilities for the input text
21
  doc = nlp(input_text)
22
+ predicted_labels = {label: doc.cats[label] for label in doc.cats}
23
 
24
  # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
  # For demonstration purposes, let's assume the ground truth labels are provided here.
 
31
  "CorporateGovernance": 0
32
  }
33
 
34
+ # Convert predicted and ground truth labels to lists
35
+ predicted_labels_list = [doc.cats[label] for label in doc.cats]
36
+ ground_truth_labels_list = [ground_truth_labels[label] for label in doc.cats]
37
+
38
+ # Calculate evaluation metrics
39
+ accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list)
40
+ precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
41
+ recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
42
+ f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
43
+
44
+ # Additional classification report
45
+ report = classification_report(ground_truth_labels_list, predicted_labels_list)
46
+
47
+ # Construct output dictionary
48
+ output_dict = {
49
+ "PredictedLabels": predicted_labels,
50
+ "EvaluationMetrics": {
51
+ "Accuracy": accuracy,
52
+ "Precision": precision,
53
+ "Recall": recall,
54
+ "F1-Score": f1,
55
+ "ClassificationReport": report
56
+ }
57
+ }
58
+
59
+ return output_dict
60
+
61
  # Convert predicted and ground truth labels to lists
62
  predicted_labels_list = [1 if predicted_labels[label] else 0 for label in predicted_labels]
63
  ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels]