ManjinderUNCC commited on
Commit
c083ec3
·
verified ·
1 Parent(s): 3fa594d

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +50 -16
gradio_interface.py CHANGED
@@ -1,21 +1,55 @@
1
- # Import necessary libraries
2
  import gradio as gr
3
- import spacy
4
 
5
- # Load the trained spaCy model
6
- model_path = "./my_trained_model"
7
- nlp = spacy.load(model_path)
 
 
8
 
9
- # Function to classify text
10
- def classify_text(text):
11
- doc = nlp(text)
12
- predicted_labels = doc.cats
13
- return predicted_labels
14
 
15
- # Gradio Interface
16
- def classify_and_save(input_text):
17
- predicted_labels = classify_text(input_text)
18
- return predicted_labels
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
19
 
20
- iface = gr.Interface(classify_and_save, "textbox", "json", title="Text Classifier", description="Enter your text")
21
- iface.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
 
2
 
3
+ # Function to execute evaluate_model.py
4
+ def evaluate_model_script():
5
+ import spacy
6
+ import jsonlines
7
+ from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
8
 
9
+ # Load the trained spaCy model
10
+ nlp = spacy.load("./my_trained_model")
 
 
 
11
 
12
+ # Load the golden evaluation data
13
+ golden_eval_data = []
14
+ with jsonlines.open("data/goldenEval.jsonl") as reader:
15
+ for record in reader:
16
+ golden_eval_data.append(record)
17
+
18
+ # Predict labels for each record using your model
19
+ predicted_labels = []
20
+ for record in golden_eval_data:
21
+ text = record["text"]
22
+ doc = nlp(text)
23
+ predicted_labels.append(doc.cats)
24
+
25
+ # Extract ground truth labels from the golden evaluation data
26
+ true_labels = [record["accept"] for record in golden_eval_data]
27
+
28
+ # Convert label format to match sklearn's classification report format
29
+ true_labels_flat = [label[0] if label else "reject" for label in true_labels]
30
+ predicted_labels_flat = [max(pred, key=pred.get) for pred in predicted_labels]
31
+
32
+ # Calculate evaluation metrics
33
+ accuracy = accuracy_score(true_labels_flat, predicted_labels_flat)
34
+ precision = precision_score(true_labels_flat, predicted_labels_flat, average='weighted')
35
+ recall = recall_score(true_labels_flat, predicted_labels_flat, average='weighted')
36
+ f1 = f1_score(true_labels_flat, predicted_labels_flat, average='weighted')
37
 
38
+ # Additional classification report
39
+ report = classification_report(true_labels_flat, predicted_labels_flat)
40
+
41
+ # Build the result dictionary
42
+ result = {
43
+ "accuracy": accuracy,
44
+ "precision": precision,
45
+ "recall": recall,
46
+ "f1_score": f1,
47
+ "detailed_classification_report": report
48
+ }
49
+
50
+ return result
51
+
52
+ # Gradio Interface
53
+ output = gr.outputs.Label(type="json", label="Evaluation Metrics")
54
+ iface = gr.Interface(fn=evaluate_model_script, outputs=output, title="Evaluate Model Script")
55
+ iface.launch()