Suvajit Majumder commited on
Commit
4b514ad
·
1 Parent(s): 0700510

app.py fix

Browse files
Files changed (1) hide show
  1. app.py +40 -45
app.py CHANGED
@@ -1,16 +1,20 @@
1
  import gradio as gr
2
  from transformers import pipeline
 
 
 
 
3
 
4
  # Load the toxicity classification pipeline
5
  print("Loading toxicity classifier pipeline...")
6
  toxicity_pipeline = pipeline(
7
  "text-classification",
8
  model="s-nlp/roberta_toxicity_classifier",
9
- tokenizer="s-nlp/roberta_toxicity_classifier",
10
- # return_all_scores=True # This ensures we get scores for both classes
11
  )
12
  print("Pipeline loaded successfully!")
13
 
 
14
  def toxicity_classification(text: str) -> dict:
15
  """
16
  Classify the toxicity of the given text.
@@ -19,36 +23,38 @@ def toxicity_classification(text: str) -> dict:
19
  text (str): The text to analyze
20
 
21
  Returns:
22
- dict: A dictionary containing toxicity scores and classification
23
  """
24
  if not text.strip():
25
- return {
26
- "error": "Please enter some text to analyze"
27
- }
28
-
29
  try:
30
- # Get predictions using the pipeline
31
  result = toxicity_pipeline(text)[0]
32
-
33
- # The pipeline returns a list like: [{'label': 'LABEL_0', 'score': 0.8}]
34
- # LABEL_0 is non-toxic, LABEL_1 is toxic (based on the model description)
35
-
36
- # Convert to a more readable format
37
 
38
- label = result['label']
39
- confidence = result['score']
 
 
 
 
 
 
 
 
 
 
 
 
40
 
41
- classification = "non-toxic" if label == "LABEL_0" else "toxic"
42
-
43
  return {
44
  "classification": classification,
45
  "confidence": round(confidence, 4)
46
  }
47
-
48
  except Exception as e:
49
- return {
50
- "error": f"Error processing text: {str(e)}"
51
- }
52
 
53
  # Create the Gradio interface
54
  demo = gr.Interface(
@@ -64,48 +70,37 @@ demo = gr.Interface(
64
  examples=[
65
  ["You are amazing!"],
66
  ["This is a wonderful day."],
67
- ["I disagree with your opinion."],
68
- ["The weather is nice today."]
69
  ]
70
  )
71
 
72
- # Launch the interface and MCP server
73
  if __name__ == "__main__":
74
- # Add debugging section for local testing
75
- import sys
76
-
77
- # Check if running in debug mode (pass 'debug' as command line argument)
78
- if len(sys.argv) > 1 and sys.argv[1] == "debug":
79
  print("=" * 50)
80
- print("DEBUG MODE - Testing toxicity classification")
81
  print("=" * 50)
82
-
83
- # Test cases for debugging
84
  test_cases = [
85
  "You are amazing!",
86
  "This is a wonderful day.",
87
  "I hate you so much!",
88
- "The weather is nice today.",
89
  "You're such an idiot!",
90
  "I disagree with your opinion.",
91
  "" # Empty string test
92
  ]
93
-
94
- for i, test_text in enumerate(test_cases):
95
  print(f"\n--- Test Case {i} ---")
96
  print(f"Input: '{test_text}'")
97
-
98
- # Set breakpoint here for debugging
99
- import pdb; pdb.set_trace()
100
-
101
- # Call the function
102
  result = toxicity_classification(test_text)
103
-
104
  print(f"Output: {result}")
105
  print("-" * 30)
106
-
107
  print("\nDebug testing completed!")
108
-
109
  else:
110
- # Normal Gradio mode
111
- demo.launch(mcp_server=True)
 
1
  import gradio as gr
2
  from transformers import pipeline
3
+ import sys
4
+
5
+ # Toggle this to True if you want to see debug prints
6
+ DEBUG = False
7
 
8
  # Load the toxicity classification pipeline
9
  print("Loading toxicity classifier pipeline...")
10
  toxicity_pipeline = pipeline(
11
  "text-classification",
12
  model="s-nlp/roberta_toxicity_classifier",
13
+ tokenizer="s-nlp/roberta_toxicity_classifier"
 
14
  )
15
  print("Pipeline loaded successfully!")
16
 
17
+
18
  def toxicity_classification(text: str) -> dict:
19
  """
20
  Classify the toxicity of the given text.
 
23
  text (str): The text to analyze
24
 
25
  Returns:
26
+ dict: A dictionary containing toxicity classification and confidence
27
  """
28
  if not text.strip():
29
+ return {"error": "Please enter some text to analyze"}
30
+
 
 
31
  try:
32
+ # Get the top prediction using the pipeline
33
  result = toxicity_pipeline(text)[0]
 
 
 
 
 
34
 
35
+ if DEBUG:
36
+ print(f"DEBUG - Pipeline result: {result}")
37
+
38
+ # The model returns labels like "neutral" or "toxic"
39
+ label = result.get("label", "neutral").lower()
40
+ score = result.get("score", 0.0)
41
+
42
+ # Map "neutral" (or any non-toxic) to non-toxic
43
+ if label == "toxic":
44
+ classification = "toxic"
45
+ confidence = score
46
+ else:
47
+ classification = "non-toxic"
48
+ confidence = score
49
 
 
 
50
  return {
51
  "classification": classification,
52
  "confidence": round(confidence, 4)
53
  }
54
+
55
  except Exception as e:
56
+ return {"error": f"Error processing text: {str(e)}"}
57
+
 
58
 
59
  # Create the Gradio interface
60
  demo = gr.Interface(
 
70
  examples=[
71
  ["You are amazing!"],
72
  ["This is a wonderful day."],
73
+ ["I hate you so much!"],
74
+ ["You're such an idiot!"],
75
  ]
76
  )
77
 
78
+
79
  if __name__ == "__main__":
80
+ # If "debug" was passed as a command-line argument, run local tests
81
+ if len(sys.argv) > 1 and sys.argv[1].lower() == "debug":
82
+ DEBUG = True
 
 
83
  print("=" * 50)
84
+ print("DEBUG MODE - Testing toxicity classification locally")
85
  print("=" * 50)
86
+
 
87
  test_cases = [
88
  "You are amazing!",
89
  "This is a wonderful day.",
90
  "I hate you so much!",
 
91
  "You're such an idiot!",
92
  "I disagree with your opinion.",
93
  "" # Empty string test
94
  ]
95
+
96
+ for i, test_text in enumerate(test_cases, 1):
97
  print(f"\n--- Test Case {i} ---")
98
  print(f"Input: '{test_text}'")
 
 
 
 
 
99
  result = toxicity_classification(test_text)
 
100
  print(f"Output: {result}")
101
  print("-" * 30)
102
+
103
  print("\nDebug testing completed!")
 
104
  else:
105
+ # Normal Gradio mode: launch with MCP server enabled
106
+ demo.launch(mcp_server=True)