lg3394 commited on
Commit
425a29c
·
verified ·
1 Parent(s): 9ed27b9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +51 -31
app.py CHANGED
@@ -7,6 +7,7 @@ from azure.ai.contentsafety.models import TextCategory
7
  from azure.core.credentials import AzureKeyCredential
8
  from azure.core.exceptions import HttpResponseError
9
  from azure.ai.contentsafety.models import AnalyzeTextOptions
 
10
 
11
  # Load OpenAI and Anthropic API Keys from environment variables
12
  openai.api_key = os.getenv("openaiapikey")
@@ -36,40 +37,32 @@ def analyze_text_azure(user_text):
36
  return f"Error occurred with Azure Content Safety: {e}"
37
 
38
  # Extract moderation results
39
- hate_result = next((item for item in response.categories_analysis if item.category == TextCategory.HATE), None)
40
- self_harm_result = next((item for item in response.categories_analysis if item.category == TextCategory.SELF_HARM), None)
41
- sexual_result = next((item for item in response.categories_analysis if item.category == TextCategory.SEXUAL), None)
42
- violence_result = next((item for item in response.categories_analysis if item.category == TextCategory.VIOLENCE), None)
43
-
44
- results = []
45
- if hate_result:
46
- results.append(f"Hate severity: {hate_result.severity}")
47
- if self_harm_result:
48
- results.append(f"SelfHarm severity: {self_harm_result.severity}")
49
- if sexual_result:
50
- results.append(f"Sexual severity: {sexual_result.severity}")
51
- if violence_result:
52
- results.append(f"Violence severity: {violence_result.severity}")
53
-
54
- return "\n".join(results) if results else "No flagged content detected in Azure Content Safety."
55
 
56
  def moderate_text(user_text):
57
  # OpenAI Moderation
58
- openai_moderation_result = "Error in OpenAI Moderation"
59
  try:
60
  response = openai.Moderation.create(input=user_text)
61
  moderation_categories = response["results"][0]["categories"]
62
  moderation_flagged = response["results"][0]["flagged"]
63
 
64
  if moderation_flagged:
65
- openai_moderation_result = f"OpenAI flags the text for the following categories: {', '.join([category for category, flagged in moderation_categories.items() if flagged])}"
 
66
  else:
67
- openai_moderation_result = "The text is not flagged for any moderation issues by OpenAI."
68
  except Exception as e:
69
- openai_moderation_result = f"Error occurred with OpenAI: {e}"
70
 
71
  # Anthropic Moderation
72
- anthropic_moderation_result = "Error in Anthropic Moderation"
73
  try:
74
  prompt_template = """
75
  You are a content moderation expert tasked with categorizing user-generated text.
@@ -90,25 +83,52 @@ def moderate_text(user_text):
90
  messages=[{"role": "user", "content": prompt}]
91
  ).content[0].text
92
 
93
- anthropic_moderation_result = f"Anthropic's moderation result: {response}"
94
-
 
 
 
 
95
  except Exception as e:
96
- anthropic_moderation_result = f"Error occurred with Anthropic: {e}"
97
 
98
  # Azure Content Safety Moderation
99
- azure_moderation_result = analyze_text_azure(user_text)
100
-
101
- return openai_moderation_result, anthropic_moderation_result, azure_moderation_result
102
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
103
 
104
  # Create the Gradio interface with updated input and output labels
105
  iface = gr.Interface(
106
  fn=moderate_text,
107
  inputs=gr.Textbox(lines=2, placeholder="Please write your text here..."),
108
- outputs=[gr.Textbox(label="OpenAI"), gr.Textbox(label="Anthropic"), gr.Textbox(label="Microsoft Azure")],
109
  title="Content Moderation Tool",
110
- description="Enter some text and get the moderation results from OpenAI, Anthropic, and Azure Content Safety."
111
  )
112
 
113
  if __name__ == "__main__":
114
- iface.launch()
 
7
  from azure.core.credentials import AzureKeyCredential
8
  from azure.core.exceptions import HttpResponseError
9
  from azure.ai.contentsafety.models import AnalyzeTextOptions
10
+ import matplotlib.pyplot as plt
11
 
12
  # Load OpenAI and Anthropic API Keys from environment variables
13
  openai.api_key = os.getenv("openaiapikey")
 
37
  return f"Error occurred with Azure Content Safety: {e}"
38
 
39
  # Extract moderation results
40
+ results = {}
41
+ for category in [TextCategory.HATE, TextCategory.SELF_HARM, TextCategory.SEXUAL, TextCategory.VIOLENCE]:
42
+ result = next((item for item in response.categories_analysis if item.category == category), None)
43
+ if result:
44
+ results[category] = result.severity
45
+
46
+ return results if results else {"No flagged content detected": 0}
 
 
 
 
 
 
 
 
 
47
 
48
  def moderate_text(user_text):
49
  # OpenAI Moderation
50
+ openai_result = {"Classification": "Error in OpenAI Moderation", "Severity": 0}
51
  try:
52
  response = openai.Moderation.create(input=user_text)
53
  moderation_categories = response["results"][0]["categories"]
54
  moderation_flagged = response["results"][0]["flagged"]
55
 
56
  if moderation_flagged:
57
+ openai_result["Classification"] = "Blocked"
58
+ openai_result["Severity"] = max([category for category, flagged in moderation_categories.items() if flagged], key=moderation_categories.get)
59
  else:
60
+ openai_result["Classification"] = "Allowed"
61
  except Exception as e:
62
+ openai_result = {"Classification": f"Error occurred with OpenAI: {e}", "Severity": 0}
63
 
64
  # Anthropic Moderation
65
+ anthropic_result = {"Classification": "Error in Anthropic Moderation", "Severity": 0}
66
  try:
67
  prompt_template = """
68
  You are a content moderation expert tasked with categorizing user-generated text.
 
83
  messages=[{"role": "user", "content": prompt}]
84
  ).content[0].text
85
 
86
+ if "blocked" in response.lower():
87
+ anthropic_result["Classification"] = "Blocked"
88
+ anthropic_result["Severity"] = 1 # Assigning severity for blocked content
89
+ else:
90
+ anthropic_result["Classification"] = "Allowed"
91
+ anthropic_result["Severity"] = 0
92
  except Exception as e:
93
+ anthropic_result = {"Classification": f"Error occurred with Anthropic: {e}", "Severity": 0}
94
 
95
  # Azure Content Safety Moderation
96
+ azure_result = analyze_text_azure(user_text)
97
+
98
+ # Combine results and generate bar chart
99
+ categories = ["OpenAI", "Anthropic", "Microsoft Azure"]
100
+ classifications = [openai_result["Severity"], anthropic_result["Severity"], sum(azure_result.values()) / len(azure_result) if azure_result else 0]
101
+
102
+ bar_chart = create_comparison_chart(categories, classifications)
103
+
104
+ # Safe text suggestion for blocked content
105
+ suggestions = ""
106
+ if openai_result["Classification"] == "Blocked":
107
+ suggestions += "OpenAI flagged the text for harmful content. Suggested Rephrase: 'Please use more respectful language.'\n"
108
+ if anthropic_result["Classification"] == "Blocked":
109
+ suggestions += "Anthropic flagged the text. Suggested Rephrase: 'Avoid harmful or offensive language.'\n"
110
+ if any(value > 0.5 for value in azure_result.values()):
111
+ suggestions += "Azure flagged some content. Suggested Rephrase: 'Try to avoid sensitive topics and ensure respectful language.'"
112
+
113
+ return openai_result, anthropic_result, azure_result, bar_chart, suggestions
114
+
115
+ def create_comparison_chart(categories, values):
116
+ fig, ax = plt.subplots()
117
+ ax.bar(categories, values, color=['red', 'orange', 'green'])
118
+ ax.set_title("Content Moderation Comparison")
119
+ ax.set_ylabel("Severity Score")
120
+ ax.set_ylim(0, 1)
121
+ ax.set_xlabel("Moderation Tool")
122
+ return fig
123
 
124
  # Create the Gradio interface with updated input and output labels
125
  iface = gr.Interface(
126
  fn=moderate_text,
127
  inputs=gr.Textbox(lines=2, placeholder="Please write your text here..."),
128
+ outputs=[gr.Textbox(label="OpenAI"), gr.Textbox(label="Anthropic"), gr.Textbox(label="Microsoft Azure"), gr.Plot(label="Comparison Bar Chart"), gr.Textbox(label="Safe Text Suggestions")],
129
  title="Content Moderation Tool",
130
+ description="Enter some text and get the moderation results from OpenAI, Anthropic, Azure Content Safety, and suggestions for safe rephrasing."
131
  )
132
 
133
  if __name__ == "__main__":
134
+ iface.launch()