ManjinderUNCC commited on
Commit
fd66da1
·
verified ·
1 Parent(s): 6150b70

Delete gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +0 -59
gradio_interface.py DELETED
@@ -1,59 +0,0 @@
1
- import gradio as gr
2
- import spacy
3
- from sklearn.metrics import classification_report, accuracy_score, f1_score, precision_score, recall_score
4
-
5
- # Load the trained spaCy model
6
- model_path = "./my_trained_model"
7
- nlp = spacy.load(model_path)
8
-
9
- # Function to classify text
10
- def classify_text(text):
11
- doc = nlp(text)
12
- predicted_labels = doc.cats
13
- return predicted_labels
14
-
15
- # Function to evaluate the predicted labels for the input text
16
- def evaluate_text(input_text):
17
- # Get the predicted labels for the input text
18
- predicted_labels = classify_text(input_text)
19
-
20
- # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
21
- # For demonstration purposes, let's assume the ground truth labels are provided here.
22
- ground_truth_labels = {
23
- "CapitalRequirements": 0,
24
- "ConsumerProtection": 1,
25
- "RiskManagement": 0,
26
- "ReportingAndCompliance": 1,
27
- "CorporateGovernance": 0
28
- }
29
-
30
- # Convert predicted and ground truth labels to lists
31
- predicted_labels_list = [1 if predicted_labels[label] > 0.5 else 0 for label in predicted_labels]
32
- ground_truth_labels_list = [ground_truth_labels[label] for label in predicted_labels]
33
-
34
- # Calculate evaluation metrics
35
- accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list)
36
- precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
37
- recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
38
- f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
39
-
40
- # Additional classification report
41
- report = classification_report(ground_truth_labels_list, predicted_labels_list)
42
-
43
- # Construct output dictionary
44
- output_dict = {
45
- "PredictedLabels": predicted_labels,
46
- "EvaluationMetrics": {
47
- "Accuracy": accuracy,
48
- "Precision": precision,
49
- "Recall": recall,
50
- "F1-Score": f1,
51
- "ClassificationReport": report
52
- }
53
- }
54
-
55
- return output_dict
56
-
57
- # Gradio Interface
58
- iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
59
- iface.launch(share=True)