ManjinderUNCC commited on
Commit
7b8f10b
·
verified ·
1 Parent(s): 24b9875

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +29 -32
gradio_interface.py CHANGED
@@ -1,30 +1,35 @@
 
1
  import spacy
2
  import jsonlines
3
  from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
4
 
5
- def evaluate_model(input_file_path, model_path):
6
- # Load the trained spaCy model
7
- nlp = spacy.load(model_path)
8
-
9
- # Load the evaluation data
10
- eval_data = []
11
- with jsonlines.open(input_file_path) as reader:
 
 
 
 
 
 
 
 
12
  for record in reader:
13
- eval_data.append(record)
14
 
15
- # Predict labels for each record using the model
16
- predicted_labels = []
17
- for record in eval_data:
18
- text = record["text"]
19
- doc = nlp(text)
20
- predicted_labels.append(doc.cats)
21
 
22
- # Extract ground truth labels from the evaluation data
23
- true_labels = [record["accept"] for record in eval_data]
24
 
25
  # Convert label format to match sklearn's classification report format
26
  true_labels_flat = [label[0] if label else "reject" for label in true_labels]
27
- predicted_labels_flat = [max(pred, key=pred.get) for pred in predicted_labels]
28
 
29
  # Calculate evaluation metrics
30
  accuracy = accuracy_score(true_labels_flat, predicted_labels_flat)
@@ -44,18 +49,10 @@ def evaluate_model(input_file_path, model_path):
44
  "classification_report": report
45
  }
46
 
47
- # Example usage
48
- input_file_path = "data/goldenEval.jsonl"
49
- model_path = "./my_trained_model"
50
- evaluation_results = evaluate_model(input_file_path, model_path)
51
-
52
- # Print or save the evaluation results
53
- print("Evaluation Metrics:")
54
- print(f"Accuracy: {evaluation_results['accuracy']}")
55
- print(f"Precision: {evaluation_results['precision']}")
56
- print(f"Recall: {evaluation_results['recall']}")
57
- print(f"F1-Score: {evaluation_results['f1']}")
58
-
59
- # Print or save the detailed classification report
60
- print("Detailed Classification Report:")
61
- print(evaluation_results['classification_report'])
 
1
+ import gradio as gr
2
  import spacy
3
  import jsonlines
4
  from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
5
 
6
+ # Load the trained spaCy model
7
+ model_path = "./my_trained_model"
8
+ nlp = spacy.load(model_path)
9
+
10
+ # Function to classify text
11
+ def classify_text(text):
12
+ doc = nlp(text)
13
+ predicted_labels = doc.cats
14
+ return predicted_labels
15
+
16
+ # Function to evaluate the model based on user input
17
+ def evaluate_model(input_text):
18
+ # Load the golden evaluation data
19
+ golden_eval_data = []
20
+ with jsonlines.open("data/goldenEval.jsonl") as reader:
21
  for record in reader:
22
+ golden_eval_data.append(record)
23
 
24
+ # Predict labels for the input text using the model
25
+ predicted_labels = classify_text(input_text)
 
 
 
 
26
 
27
+ # Extract ground truth labels from the golden evaluation data
28
+ true_labels = [record["accept"] for record in golden_eval_data]
29
 
30
  # Convert label format to match sklearn's classification report format
31
  true_labels_flat = [label[0] if label else "reject" for label in true_labels]
32
+ predicted_labels_flat = [max(predicted_labels, key=predicted_labels.get) for _ in range(len(true_labels))]
33
 
34
  # Calculate evaluation metrics
35
  accuracy = accuracy_score(true_labels_flat, predicted_labels_flat)
 
49
  "classification_report": report
50
  }
51
 
52
+ # Gradio Interface for text classification
53
+ classification_interface = gr.Interface(fn=classify_text, inputs="textbox", outputs="json", title="Text Classifier", description="Enter your text")
54
+ classification_interface.launch(share=True)
55
+
56
+ # Gradio Interface for model evaluation
57
+ evaluation_interface = gr.Interface(fn=evaluate_model, inputs="textbox", outputs="json", title="Model Evaluation", description="Enter text to evaluate the model")
58
+ evaluation_interface.launch(share=True)