ManjinderUNCC commited on
Commit
a987204
·
verified ·
1 Parent(s): 58d3595

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +66 -33
gradio_interface.py CHANGED
@@ -21,42 +21,75 @@ def evaluate_text(input_text):
21
  doc = nlp(input_text)
22
  predicted_labels = doc.cats
23
 
24
- # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
- # For demonstration purposes, let's assume the ground truth labels are provided here.
26
- ground_truth_labels = {
27
- "CapitalRequirements": 0,
28
- "ConsumerProtection": 1,
29
- "RiskManagement": 0,
30
- "ReportingAndCompliance": 1,
31
- "CorporateGovernance": 0
32
  }
33
-
34
- # Convert predicted and ground truth labels to lists
35
- predicted_labels_list = [predicted_labels[label] for label in ground_truth_labels]
36
- ground_truth_labels_list = [ground_truth_labels[label] for label in ground_truth_labels]
37
-
38
- # Calculate evaluation metrics
39
- accuracy = accuracy_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
40
- precision = precision_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
41
- recall = recall_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
42
- f1 = f1_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
43
-
44
- # Additional classification report
45
- report = classification_report(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
46
-
47
- # # Construct output dictionary
48
- # output_dict = {
49
- # "PredictedLabels": predicted_labels,
50
- # "EvaluationMetrics": {
51
- # "Accuracy": accuracy,
52
- # "Precision": precision,
53
- # "Recall": recall,
54
- # "F1-Score": f1,
55
- # "ClassificationReport": report
56
- # }
57
- # }
58
  return output_dict
59
 
60
  # Gradio Interface
61
  iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
62
  iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21
  doc = nlp(input_text)
22
  predicted_labels = doc.cats
23
 
24
+ # Construct output dictionary with predicted labels and probabilities
25
+ output_dict = {
26
+ "PredictedLabels": {label: score for label, score in predicted_labels.items() if score > threshold}
 
 
 
 
 
27
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
28
  return output_dict
29
 
30
  # Gradio Interface
31
  iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
32
  iface.launch(share=True)
33
+
34
+ # import gradio as gr
35
+ # import spacy
36
+ # from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
37
+
38
+ # # Load the trained spaCy model
39
+ # model_path = "./my_trained_model"
40
+ # nlp = spacy.load(model_path)
41
+
42
+ # # Threshold for classification
43
+ # threshold = 0.21
44
+
45
+ # # Function to classify text
46
+ # def classify_text(text):
47
+ # doc = nlp(text)
48
+ # predicted_labels = doc.cats
49
+ # return predicted_labels
50
+
51
+ # # Function to evaluate the predicted labels for the input text
52
+ # def evaluate_text(input_text):
53
+ # # Get the predicted labels and probabilities for the input text
54
+ # doc = nlp(input_text)
55
+ # predicted_labels = doc.cats
56
+
57
+ # # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
58
+ # # For demonstration purposes, let's assume the ground truth labels are provided here.
59
+ # ground_truth_labels = {
60
+ # "CapitalRequirements": 0,
61
+ # "ConsumerProtection": 1,
62
+ # "RiskManagement": 0,
63
+ # "ReportingAndCompliance": 1,
64
+ # "CorporateGovernance": 0
65
+ # }
66
+
67
+ # # Convert predicted and ground truth labels to lists
68
+ # predicted_labels_list = [predicted_labels[label] for label in ground_truth_labels]
69
+ # ground_truth_labels_list = [ground_truth_labels[label] for label in ground_truth_labels]
70
+
71
+ # # Calculate evaluation metrics
72
+ # accuracy = accuracy_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
73
+ # precision = precision_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
74
+ # recall = recall_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
75
+ # f1 = f1_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
76
+
77
+ # # Additional classification report
78
+ # report = classification_report(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
79
+
80
+ # # # Construct output dictionary
81
+ # # output_dict = {
82
+ # # "PredictedLabels": predicted_labels,
83
+ # # "EvaluationMetrics": {
84
+ # # "Accuracy": accuracy,
85
+ # # "Precision": precision,
86
+ # # "Recall": recall,
87
+ # # "F1-Score": f1,
88
+ # # "ClassificationReport": report
89
+ # # }
90
+ # # }
91
+ # return output_dict
92
+
93
+ # # Gradio Interface
94
+ # iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
95
+ # iface.launch(share=True)