prodigy-ecfr-textcat / gradio_interface.py
ManjinderUNCC's picture
Update gradio_interface.py
c083ec3 verified
raw
history blame
2.02 kB
import gradio as gr
# Function to execute evaluate_model.py
def evaluate_model_script():
import spacy
import jsonlines
from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
# Load the trained spaCy model
nlp = spacy.load("./my_trained_model")
# Load the golden evaluation data
golden_eval_data = []
with jsonlines.open("data/goldenEval.jsonl") as reader:
for record in reader:
golden_eval_data.append(record)
# Predict labels for each record using your model
predicted_labels = []
for record in golden_eval_data:
text = record["text"]
doc = nlp(text)
predicted_labels.append(doc.cats)
# Extract ground truth labels from the golden evaluation data
true_labels = [record["accept"] for record in golden_eval_data]
# Convert label format to match sklearn's classification report format
true_labels_flat = [label[0] if label else "reject" for label in true_labels]
predicted_labels_flat = [max(pred, key=pred.get) for pred in predicted_labels]
# Calculate evaluation metrics
accuracy = accuracy_score(true_labels_flat, predicted_labels_flat)
precision = precision_score(true_labels_flat, predicted_labels_flat, average='weighted')
recall = recall_score(true_labels_flat, predicted_labels_flat, average='weighted')
f1 = f1_score(true_labels_flat, predicted_labels_flat, average='weighted')
# Additional classification report
report = classification_report(true_labels_flat, predicted_labels_flat)
# Build the result dictionary
result = {
"accuracy": accuracy,
"precision": precision,
"recall": recall,
"f1_score": f1,
"detailed_classification_report": report
}
return result
# Gradio Interface
output = gr.outputs.Label(type="json", label="Evaluation Metrics")
iface = gr.Interface(fn=evaluate_model_script, outputs=output, title="Evaluate Model Script")
iface.launch()