ManjinderUNCC commited on
Commit
24b9875
·
verified ·
1 Parent(s): e04d6cb

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +32 -26
gradio_interface.py CHANGED
@@ -1,29 +1,26 @@
1
- import gradio as gr
2
-
3
- # Function to execute evaluate_model.py
4
- def evaluate_model_script():
5
- import spacy
6
- import jsonlines
7
- from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
8
 
 
9
  # Load the trained spaCy model
10
- nlp = spacy.load("./my_trained_model")
11
 
12
- # Load the golden evaluation data
13
- golden_eval_data = []
14
- with jsonlines.open("data/goldenEval.jsonl") as reader:
15
  for record in reader:
16
- golden_eval_data.append(record)
17
 
18
- # Predict labels for each record using your model
19
  predicted_labels = []
20
- for record in golden_eval_data:
21
  text = record["text"]
22
  doc = nlp(text)
23
  predicted_labels.append(doc.cats)
24
 
25
- # Extract ground truth labels from the golden evaluation data
26
- true_labels = [record["accept"] for record in golden_eval_data]
27
 
28
  # Convert label format to match sklearn's classification report format
29
  true_labels_flat = [label[0] if label else "reject" for label in true_labels]
@@ -38,18 +35,27 @@ def evaluate_model_script():
38
  # Additional classification report
39
  report = classification_report(true_labels_flat, predicted_labels_flat)
40
 
41
- # Build the result dictionary
42
- result = {
43
  "accuracy": accuracy,
44
  "precision": precision,
45
  "recall": recall,
46
- "f1_score": f1,
47
- "detailed_classification_report": report
48
  }
49
 
50
- return result
51
-
52
- # Gradio Interface
53
- output = gr.outputs.Label(type="json", label="Evaluation Metrics")
54
- iface = gr.Interface(fn=evaluate_model_script, outputs=output, title="Evaluate Model Script")
55
- iface.launch()
 
 
 
 
 
 
 
 
 
 
1
+ import spacy
2
+ import jsonlines
3
+ from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
 
 
 
 
4
 
5
+ def evaluate_model(input_file_path, model_path):
6
  # Load the trained spaCy model
7
+ nlp = spacy.load(model_path)
8
 
9
+ # Load the evaluation data
10
+ eval_data = []
11
+ with jsonlines.open(input_file_path) as reader:
12
  for record in reader:
13
+ eval_data.append(record)
14
 
15
+ # Predict labels for each record using the model
16
  predicted_labels = []
17
+ for record in eval_data:
18
  text = record["text"]
19
  doc = nlp(text)
20
  predicted_labels.append(doc.cats)
21
 
22
+ # Extract ground truth labels from the evaluation data
23
+ true_labels = [record["accept"] for record in eval_data]
24
 
25
  # Convert label format to match sklearn's classification report format
26
  true_labels_flat = [label[0] if label else "reject" for label in true_labels]
 
35
  # Additional classification report
36
  report = classification_report(true_labels_flat, predicted_labels_flat)
37
 
38
+ # Return evaluation metrics and report
39
+ return {
40
  "accuracy": accuracy,
41
  "precision": precision,
42
  "recall": recall,
43
+ "f1": f1,
44
+ "classification_report": report
45
  }
46
 
47
+ # Example usage
48
+ input_file_path = "data/goldenEval.jsonl"
49
+ model_path = "./my_trained_model"
50
+ evaluation_results = evaluate_model(input_file_path, model_path)
51
+
52
+ # Print or save the evaluation results
53
+ print("Evaluation Metrics:")
54
+ print(f"Accuracy: {evaluation_results['accuracy']}")
55
+ print(f"Precision: {evaluation_results['precision']}")
56
+ print(f"Recall: {evaluation_results['recall']}")
57
+ print(f"F1-Score: {evaluation_results['f1']}")
58
+
59
+ # Print or save the detailed classification report
60
+ print("Detailed Classification Report:")
61
+ print(evaluation_results['classification_report'])