ManjinderUNCC commited on
Commit
b7e150c
·
verified ·
1 Parent(s): 7b8f10b

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +36 -35
gradio_interface.py CHANGED
@@ -1,6 +1,5 @@
1
  import gradio as gr
2
  import spacy
3
- import jsonlines
4
  from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
5
 
6
  # Load the trained spaCy model
@@ -13,46 +12,48 @@ def classify_text(text):
13
  predicted_labels = doc.cats
14
  return predicted_labels
15
 
16
- # Function to evaluate the model based on user input
17
- def evaluate_model(input_text):
18
- # Load the golden evaluation data
19
- golden_eval_data = []
20
- with jsonlines.open("data/goldenEval.jsonl") as reader:
21
- for record in reader:
22
- golden_eval_data.append(record)
23
-
24
- # Predict labels for the input text using the model
25
  predicted_labels = classify_text(input_text)
 
 
 
 
 
 
 
 
 
 
26
 
27
- # Extract ground truth labels from the golden evaluation data
28
- true_labels = [record["accept"] for record in golden_eval_data]
29
-
30
- # Convert label format to match sklearn's classification report format
31
- true_labels_flat = [label[0] if label else "reject" for label in true_labels]
32
- predicted_labels_flat = [max(predicted_labels, key=predicted_labels.get) for _ in range(len(true_labels))]
33
 
34
  # Calculate evaluation metrics
35
- accuracy = accuracy_score(true_labels_flat, predicted_labels_flat)
36
- precision = precision_score(true_labels_flat, predicted_labels_flat, average='weighted')
37
- recall = recall_score(true_labels_flat, predicted_labels_flat, average='weighted')
38
- f1 = f1_score(true_labels_flat, predicted_labels_flat, average='weighted')
39
 
40
  # Additional classification report
41
- report = classification_report(true_labels_flat, predicted_labels_flat)
42
-
43
- # Return evaluation metrics and report
44
- return {
45
- "accuracy": accuracy,
46
- "precision": precision,
47
- "recall": recall,
48
- "f1": f1,
49
- "classification_report": report
50
  }
 
 
51
 
52
- # Gradio Interface for text classification
53
- classification_interface = gr.Interface(fn=classify_text, inputs="textbox", outputs="json", title="Text Classifier", description="Enter your text")
54
- classification_interface.launch(share=True)
55
 
56
- # Gradio Interface for model evaluation
57
- evaluation_interface = gr.Interface(fn=evaluate_model, inputs="textbox", outputs="json", title="Model Evaluation", description="Enter text to evaluate the model")
58
- evaluation_interface.launch(share=True)
 
1
  import gradio as gr
2
  import spacy
 
3
  from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
4
 
5
  # Load the trained spaCy model
 
12
  predicted_labels = doc.cats
13
  return predicted_labels
14
 
15
+ # Function to evaluate the predicted labels for the input text
16
+ def evaluate_text(input_text):
17
+ # Get the predicted labels for the input text
 
 
 
 
 
 
18
  predicted_labels = classify_text(input_text)
19
+
20
+ # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
21
+ # For demonstration purposes, let's assume the ground truth labels are provided here.
22
+ ground_truth_labels = {
23
+ "CapitalRequirements": 0,
24
+ "ConsumerProtection": 1,
25
+ "RiskManagement": 0,
26
+ "ReportingAndCompliance": 1,
27
+ "CorporateGovernance": 0
28
+ }
29
 
30
+ # Convert predicted and ground truth labels to lists
31
+ predicted_labels_list = [1 if predicted_labels[label] > 0.5 else 0 for label in predicted_labels]
32
+ ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels]
 
 
 
33
 
34
  # Calculate evaluation metrics
35
+ accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list)
36
+ precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
37
+ recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
38
+ f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
39
 
40
  # Additional classification report
41
+ report = classification_report(ground_truth_labels_list, predicted_labels_list)
42
+
43
+ # Construct output
44
+ evaluation_results = {
45
+ "Accuracy": accuracy,
46
+ "Precision": precision,
47
+ "Recall": recall,
48
+ "F1-Score": f1,
49
+ "Classification Report": report
50
  }
51
+
52
+ return evaluation_results
53
 
54
+ # Gradio Interface
55
+ inputs = gr.inputs.Textbox(lines=7, label="Enter your text")
56
+ outputs = gr.outputs.JSON(label="Evaluation Results")
57
 
58
+ iface = gr.Interface(fn=evaluate_text, inputs=inputs, outputs=outputs, title="Text Evaluation")
59
+ iface.launch(share=True)