Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -7,7 +7,6 @@ from azure.ai.contentsafety.models import TextCategory
|
|
7 |
from azure.core.credentials import AzureKeyCredential
|
8 |
from azure.core.exceptions import HttpResponseError
|
9 |
from azure.ai.contentsafety.models import AnalyzeTextOptions
|
10 |
-
import matplotlib.pyplot as plt
|
11 |
|
12 |
# Load OpenAI and Anthropic API Keys from environment variables
|
13 |
openai.api_key = os.getenv("openaiapikey")
|
@@ -37,39 +36,45 @@ def analyze_text_azure(user_text):
|
|
37 |
return f"Error occurred with Azure Content Safety: {e}"
|
38 |
|
39 |
# Extract moderation results
|
40 |
-
|
41 |
-
for
|
42 |
-
|
43 |
-
|
44 |
-
|
45 |
-
|
46 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
|
48 |
def moderate_text(user_text):
|
49 |
# OpenAI Moderation
|
50 |
-
|
51 |
try:
|
52 |
response = openai.Moderation.create(input=user_text)
|
53 |
moderation_categories = response["results"][0]["categories"]
|
54 |
moderation_flagged = response["results"][0]["flagged"]
|
55 |
|
56 |
if moderation_flagged:
|
57 |
-
|
58 |
-
openai_result["Severity"] = max([category for category, flagged in moderation_categories.items() if flagged], key=moderation_categories.get)
|
59 |
else:
|
60 |
-
|
61 |
except Exception as e:
|
62 |
-
|
63 |
|
64 |
# Anthropic Moderation
|
65 |
-
|
66 |
try:
|
67 |
prompt_template = """
|
68 |
You are a content moderation expert tasked with categorizing user-generated text.
|
69 |
-
|
70 |
Here is the user-generated text to categorize:
|
71 |
<user_text>{user_text}</user_text>
|
72 |
-
|
73 |
Based on the content, classify this text as either ALLOW or BLOCK. Return nothing else.
|
74 |
"""
|
75 |
|
@@ -83,52 +88,25 @@ def moderate_text(user_text):
|
|
83 |
messages=[{"role": "user", "content": prompt}]
|
84 |
).content[0].text
|
85 |
|
86 |
-
|
87 |
-
|
88 |
-
anthropic_result["Severity"] = 1 # Assigning severity for blocked content
|
89 |
-
else:
|
90 |
-
anthropic_result["Classification"] = "Allowed"
|
91 |
-
anthropic_result["Severity"] = 0
|
92 |
except Exception as e:
|
93 |
-
|
94 |
|
95 |
# Azure Content Safety Moderation
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
classifications = [openai_result["Severity"], anthropic_result["Severity"], sum(azure_result.values()) / len(azure_result) if azure_result else 0]
|
101 |
-
|
102 |
-
bar_chart = create_comparison_chart(categories, classifications)
|
103 |
-
|
104 |
-
# Safe text suggestion for blocked content
|
105 |
-
suggestions = ""
|
106 |
-
if openai_result["Classification"] == "Blocked":
|
107 |
-
suggestions += "OpenAI flagged the text for harmful content. Suggested Rephrase: 'Please use more respectful language.'\n"
|
108 |
-
if anthropic_result["Classification"] == "Blocked":
|
109 |
-
suggestions += "Anthropic flagged the text. Suggested Rephrase: 'Avoid harmful or offensive language.'\n"
|
110 |
-
if any(value > 0.5 for value in azure_result.values()):
|
111 |
-
suggestions += "Azure flagged some content. Suggested Rephrase: 'Try to avoid sensitive topics and ensure respectful language.'"
|
112 |
-
|
113 |
-
return openai_result, anthropic_result, azure_result, bar_chart, suggestions
|
114 |
-
|
115 |
-
def create_comparison_chart(categories, values):
|
116 |
-
fig, ax = plt.subplots()
|
117 |
-
ax.bar(categories, values, color=['red', 'orange', 'green'])
|
118 |
-
ax.set_title("Content Moderation Comparison")
|
119 |
-
ax.set_ylabel("Severity Score")
|
120 |
-
ax.set_ylim(0, 1)
|
121 |
-
ax.set_xlabel("Moderation Tool")
|
122 |
-
return fig
|
123 |
|
124 |
# Create the Gradio interface with updated input and output labels
|
125 |
iface = gr.Interface(
|
126 |
fn=moderate_text,
|
127 |
inputs=gr.Textbox(lines=2, placeholder="Please write your text here..."),
|
128 |
-
outputs=[gr.Textbox(label="OpenAI"), gr.Textbox(label="Anthropic"), gr.Textbox(label="Microsoft Azure")
|
129 |
title="Content Moderation Tool",
|
130 |
-
description="Enter some text and get the moderation results from OpenAI, Anthropic, Azure Content Safety
|
131 |
)
|
132 |
|
133 |
if __name__ == "__main__":
|
134 |
-
iface.launch()
|
|
|
7 |
from azure.core.credentials import AzureKeyCredential
|
8 |
from azure.core.exceptions import HttpResponseError
|
9 |
from azure.ai.contentsafety.models import AnalyzeTextOptions
|
|
|
10 |
|
11 |
# Load OpenAI and Anthropic API Keys from environment variables
|
12 |
openai.api_key = os.getenv("openaiapikey")
|
|
|
36 |
return f"Error occurred with Azure Content Safety: {e}"
|
37 |
|
38 |
# Extract moderation results
|
39 |
+
hate_result = next((item for item in response.categories_analysis if item.category == TextCategory.HATE), None)
|
40 |
+
self_harm_result = next((item for item in response.categories_analysis if item.category == TextCategory.SELF_HARM), None)
|
41 |
+
sexual_result = next((item for item in response.categories_analysis if item.category == TextCategory.SEXUAL), None)
|
42 |
+
violence_result = next((item for item in response.categories_analysis if item.category == TextCategory.VIOLENCE), None)
|
43 |
+
|
44 |
+
results = []
|
45 |
+
if hate_result:
|
46 |
+
results.append(f"Hate severity: {hate_result.severity}")
|
47 |
+
if self_harm_result:
|
48 |
+
results.append(f"SelfHarm severity: {self_harm_result.severity}")
|
49 |
+
if sexual_result:
|
50 |
+
results.append(f"Sexual severity: {sexual_result.severity}")
|
51 |
+
if violence_result:
|
52 |
+
results.append(f"Violence severity: {violence_result.severity}")
|
53 |
+
|
54 |
+
return "\n".join(results) if results else "No flagged content detected in Azure Content Safety."
|
55 |
|
56 |
def moderate_text(user_text):
|
57 |
# OpenAI Moderation
|
58 |
+
openai_moderation_result = "Error in OpenAI Moderation"
|
59 |
try:
|
60 |
response = openai.Moderation.create(input=user_text)
|
61 |
moderation_categories = response["results"][0]["categories"]
|
62 |
moderation_flagged = response["results"][0]["flagged"]
|
63 |
|
64 |
if moderation_flagged:
|
65 |
+
openai_moderation_result = f"OpenAI flags the text for the following categories: {', '.join([category for category, flagged in moderation_categories.items() if flagged])}"
|
|
|
66 |
else:
|
67 |
+
openai_moderation_result = "The text is not flagged for any moderation issues by OpenAI."
|
68 |
except Exception as e:
|
69 |
+
openai_moderation_result = f"Error occurred with OpenAI: {e}"
|
70 |
|
71 |
# Anthropic Moderation
|
72 |
+
anthropic_moderation_result = "Error in Anthropic Moderation"
|
73 |
try:
|
74 |
prompt_template = """
|
75 |
You are a content moderation expert tasked with categorizing user-generated text.
|
|
|
76 |
Here is the user-generated text to categorize:
|
77 |
<user_text>{user_text}</user_text>
|
|
|
78 |
Based on the content, classify this text as either ALLOW or BLOCK. Return nothing else.
|
79 |
"""
|
80 |
|
|
|
88 |
messages=[{"role": "user", "content": prompt}]
|
89 |
).content[0].text
|
90 |
|
91 |
+
anthropic_moderation_result = f"Anthropic's moderation result: {response}"
|
92 |
+
|
|
|
|
|
|
|
|
|
93 |
except Exception as e:
|
94 |
+
anthropic_moderation_result = f"Error occurred with Anthropic: {e}"
|
95 |
|
96 |
# Azure Content Safety Moderation
|
97 |
+
azure_moderation_result = analyze_text_azure(user_text)
|
98 |
+
|
99 |
+
return openai_moderation_result, anthropic_moderation_result, azure_moderation_result
|
100 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
|
102 |
# Create the Gradio interface with updated input and output labels
|
103 |
iface = gr.Interface(
|
104 |
fn=moderate_text,
|
105 |
inputs=gr.Textbox(lines=2, placeholder="Please write your text here..."),
|
106 |
+
outputs=[gr.Textbox(label="OpenAI"), gr.Textbox(label="Anthropic"), gr.Textbox(label="Microsoft Azure")],
|
107 |
title="Content Moderation Tool",
|
108 |
+
description="Enter some text and get the moderation results from OpenAI, Anthropic, and Azure Content Safety."
|
109 |
)
|
110 |
|
111 |
if __name__ == "__main__":
|
112 |
+
iface.launch()
|