ManjinderUNCC commited on
Commit
31cece3
1 Parent(s): 0a54820

Update gradio_interface.py

Browse files
Files changed (1) hide show
  1. gradio_interface.py +9 -9
gradio_interface.py CHANGED
@@ -19,7 +19,7 @@ def classify_text(text):
19
  def evaluate_text(input_text):
20
  # Get the predicted labels and probabilities for the input text
21
  doc = nlp(input_text)
22
- predicted_labels = {label: doc.cats[label] for label in doc.cats}
23
 
24
  # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
  # For demonstration purposes, let's assume the ground truth labels are provided here.
@@ -32,17 +32,17 @@ def evaluate_text(input_text):
32
  }
33
 
34
  # Convert predicted and ground truth labels to lists
35
- predicted_labels_list = [doc.cats[label] for label in doc.cats]
36
- ground_truth_labels_list = [ground_truth_labels[label] for label in doc.cats]
37
 
38
  # Calculate evaluation metrics
39
- accuracy = accuracy_score(ground_truth_labels_list, predicted_labels_list)
40
- precision = precision_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
41
- recall = recall_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
42
- f1 = f1_score(ground_truth_labels_list, predicted_labels_list, average='weighted')
43
 
44
  # Additional classification report
45
- report = classification_report(ground_truth_labels_list, predicted_labels_list)
46
 
47
  # Construct output dictionary
48
  output_dict = {
@@ -60,4 +60,4 @@ def evaluate_text(input_text):
60
 
61
  # Gradio Interface
62
  iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
63
- iface.launch(share=True)
 
19
  def evaluate_text(input_text):
20
  # Get the predicted labels and probabilities for the input text
21
  doc = nlp(input_text)
22
+ predicted_labels = doc.cats
23
 
24
  # Assuming you have ground truth labels for the input text, you would compare the predicted labels with the ground truth labels here.
25
  # For demonstration purposes, let's assume the ground truth labels are provided here.
 
32
  }
33
 
34
  # Convert predicted and ground truth labels to lists
35
+ predicted_labels_list = [predicted_labels[label] for label in ground_truth_labels]
36
+ ground_truth_labels_list = [ground_truth_labels[label] for label in ground_truth_labels]
37
 
38
  # Calculate evaluation metrics
39
+ accuracy = accuracy_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
40
+ precision = precision_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
41
+ recall = recall_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
42
+ f1 = f1_score(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list], average='weighted')
43
 
44
  # Additional classification report
45
+ report = classification_report(ground_truth_labels_list, [1 if prob > threshold else 0 for prob in predicted_labels_list])
46
 
47
  # Construct output dictionary
48
  output_dict = {
 
60
 
61
  # Gradio Interface
62
  iface = gr.Interface(fn=evaluate_text, inputs="text", outputs="json", title="Text Evaluation-Manjinder", description="Enter your text")
63
+ iface.launch(share=True)