Spaces:
Running
Running
Commit
·
1d3d95d
1
Parent(s):
3740d78
Stricter moderation
Browse files- .gradio/certificate.pem +31 -0
- app.py +135 -22
.gradio/certificate.pem
ADDED
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
-----BEGIN CERTIFICATE-----
|
2 |
+
MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
|
3 |
+
TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
|
4 |
+
cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
|
5 |
+
WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
|
6 |
+
ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
|
7 |
+
MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
|
8 |
+
h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
|
9 |
+
0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
|
10 |
+
A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
|
11 |
+
T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
|
12 |
+
B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
|
13 |
+
B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
|
14 |
+
KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
|
15 |
+
OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
|
16 |
+
jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
|
17 |
+
qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
|
18 |
+
rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
|
19 |
+
HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
|
20 |
+
hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
|
21 |
+
ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
|
22 |
+
3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
|
23 |
+
NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
|
24 |
+
ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
|
25 |
+
TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
|
26 |
+
jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
|
27 |
+
oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
|
28 |
+
4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
|
29 |
+
mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
|
30 |
+
emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
|
31 |
+
-----END CERTIFICATE-----
|
app.py
CHANGED
@@ -15,6 +15,8 @@ from video_config import MODEL_FRAME_RATES, calculate_frames
|
|
15 |
import asyncio
|
16 |
from openai import OpenAI
|
17 |
import base64
|
|
|
|
|
18 |
|
19 |
dotenv.load_dotenv()
|
20 |
|
@@ -360,9 +362,10 @@ def poll_generation_status(generation_id):
|
|
360 |
|
361 |
async def moderate_prompt(prompt: str) -> dict:
|
362 |
"""
|
363 |
-
Check if a text prompt contains NSFW content
|
364 |
"""
|
365 |
try:
|
|
|
366 |
response = openai_client.moderations.create(input=prompt)
|
367 |
result = response.results[0]
|
368 |
|
@@ -378,56 +381,153 @@ async def moderate_prompt(prompt: str) -> dict:
|
|
378 |
"reason": f"Content flagged for: {', '.join(flagged_categories)}"
|
379 |
}
|
380 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
381 |
return {"isNSFW": False, "reason": None}
|
382 |
except Exception as e:
|
383 |
print(f"Error during prompt moderation: {e}")
|
384 |
-
|
|
|
|
|
|
|
|
|
385 |
|
386 |
async def moderate_image(image_path: str) -> dict:
|
387 |
"""
|
388 |
-
Check if an image contains NSFW content using
|
|
|
389 |
"""
|
390 |
try:
|
391 |
-
# Convert image to base64
|
392 |
with open(image_path, "rb") as image_file:
|
393 |
base64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
394 |
|
395 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
396 |
model="gpt-4o",
|
397 |
messages=[
|
398 |
{
|
399 |
"role": "system",
|
400 |
-
"content": "You are a content
|
401 |
},
|
402 |
{
|
403 |
"role": "user",
|
404 |
"content": [
|
405 |
-
{"type": "text", "text": "Is this image appropriate
|
406 |
{"type": "image_url", "image_url": {
|
407 |
"url": f"data:image/jpeg;base64,{base64_image}"
|
408 |
}}
|
409 |
]
|
410 |
}
|
411 |
],
|
412 |
-
max_tokens=
|
413 |
)
|
414 |
|
415 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
416 |
|
417 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
418 |
return {
|
419 |
"isNSFW": True,
|
420 |
-
"reason": "Image contains
|
421 |
}
|
422 |
|
423 |
return {"isNSFW": False, "reason": None}
|
|
|
424 |
except Exception as e:
|
425 |
print(f"Error during image moderation: {e}")
|
426 |
-
|
|
|
|
|
|
|
|
|
427 |
|
428 |
async def moderate_combined(prompt: str, image_path: str) -> dict:
|
429 |
"""
|
430 |
-
Check if both a prompt and image contain NSFW content together
|
431 |
"""
|
432 |
try:
|
433 |
# Convert image to base64
|
@@ -439,14 +539,24 @@ async def moderate_combined(prompt: str, image_path: str) -> dict:
|
|
439 |
messages=[
|
440 |
{
|
441 |
"role": "system",
|
442 |
-
"content": "You are a content moderation system. Your task is to determine if an image
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
443 |
},
|
444 |
{
|
445 |
"role": "user",
|
446 |
"content": [
|
447 |
{
|
448 |
"type": "text",
|
449 |
-
"text": f'Please moderate this image and prompt combination for an image-to-video generation:\n\nPrompt: "{prompt}"'
|
450 |
},
|
451 |
{
|
452 |
"type": "image_url",
|
@@ -472,7 +582,11 @@ async def moderate_combined(prompt: str, image_path: str) -> dict:
|
|
472 |
}
|
473 |
except Exception as e:
|
474 |
print(f"Error during combined moderation: {e}")
|
475 |
-
|
|
|
|
|
|
|
|
|
476 |
|
477 |
async def generate_video(input_image, subject, duration, selected_index, progress=gr.Progress()):
|
478 |
try:
|
@@ -564,7 +678,7 @@ async def generate_video(input_image, subject, duration, selected_index, progres
|
|
564 |
raise e
|
565 |
|
566 |
def update_selection(evt: gr.SelectData):
|
567 |
-
selected_lora = loras[evt.index]
|
568 |
sentence = f"Selected LoRA: {selected_lora['title']}"
|
569 |
return selected_lora['id'], sentence
|
570 |
|
@@ -954,12 +1068,11 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="
|
|
954 |
# Add a custom handler to check if inputs are valid
|
955 |
def check_inputs(subject, image_input, selected_index):
|
956 |
if not selected_index:
|
957 |
-
|
958 |
if not subject.strip():
|
959 |
-
|
960 |
if image_input is None:
|
961 |
-
|
962 |
-
return None
|
963 |
|
964 |
# Use gr.on for the button click with validation
|
965 |
button.click(
|
@@ -981,4 +1094,4 @@ with gr.Blocks(css=css, theme=gr.themes.Soft(primary_hue="indigo", neutral_hue="
|
|
981 |
|
982 |
if __name__ == "__main__":
|
983 |
demo.queue(default_concurrency_limit=20)
|
984 |
-
demo.launch(ssr_mode=False)
|
|
|
15 |
import asyncio
|
16 |
from openai import OpenAI
|
17 |
import base64
|
18 |
+
from google.cloud import vision
|
19 |
+
from google.oauth2 import service_account
|
20 |
|
21 |
dotenv.load_dotenv()
|
22 |
|
|
|
362 |
|
363 |
async def moderate_prompt(prompt: str) -> dict:
|
364 |
"""
|
365 |
+
Check if a text prompt contains NSFW content with strict rules against inappropriate content
|
366 |
"""
|
367 |
try:
|
368 |
+
# First check with OpenAI moderation
|
369 |
response = openai_client.moderations.create(input=prompt)
|
370 |
result = response.results[0]
|
371 |
|
|
|
381 |
"reason": f"Content flagged for: {', '.join(flagged_categories)}"
|
382 |
}
|
383 |
|
384 |
+
# Additional checks for keywords related to minors or inappropriate content
|
385 |
+
keywords = [
|
386 |
+
"child", "kid", "minor", "teen", "young", "baby", "infant", "underage",
|
387 |
+
"naked", "nude", "nsfw", "porn", "xxx", "sex", "explicit",
|
388 |
+
"inappropriate", "adult content"
|
389 |
+
]
|
390 |
+
|
391 |
+
lower_prompt = prompt.lower()
|
392 |
+
found_keywords = [word for word in keywords if word in lower_prompt]
|
393 |
+
|
394 |
+
if found_keywords:
|
395 |
+
return {
|
396 |
+
"isNSFW": True,
|
397 |
+
"reason": f"Content contains inappropriate keywords: {', '.join(found_keywords)}"
|
398 |
+
}
|
399 |
+
|
400 |
return {"isNSFW": False, "reason": None}
|
401 |
except Exception as e:
|
402 |
print(f"Error during prompt moderation: {e}")
|
403 |
+
# If there's an error, reject the prompt to be safe
|
404 |
+
return {
|
405 |
+
"isNSFW": True,
|
406 |
+
"reason": "Failed to verify prompt safety - please try again"
|
407 |
+
}
|
408 |
|
409 |
async def moderate_image(image_path: str) -> dict:
|
410 |
"""
|
411 |
+
Check if an image contains NSFW content using both Google Cloud Vision API's SafeSearch detection
|
412 |
+
and OpenAI's vision model for double verification
|
413 |
"""
|
414 |
try:
|
415 |
+
# Convert image to base64 for OpenAI
|
416 |
with open(image_path, "rb") as image_file:
|
417 |
base64_image = base64.b64encode(image_file.read()).decode('utf-8')
|
418 |
|
419 |
+
# 1. Google Cloud Vision API Check using proper client library
|
420 |
+
try:
|
421 |
+
# Get service account info from environment
|
422 |
+
service_account_info = json.loads(os.getenv('SERVICE_ACCOUNT_JSON'))
|
423 |
+
|
424 |
+
# Initialize Vision client with credentials
|
425 |
+
credentials = service_account.Credentials.from_service_account_info(service_account_info)
|
426 |
+
vision_client = vision.ImageAnnotatorClient(credentials=credentials)
|
427 |
+
|
428 |
+
# Load image content
|
429 |
+
with open(image_path, "rb") as image_file:
|
430 |
+
content = image_file.read()
|
431 |
+
|
432 |
+
# Create image object
|
433 |
+
image = vision.Image(content=content)
|
434 |
+
|
435 |
+
# Perform safe search detection
|
436 |
+
response = vision_client.safe_search_detection(image=image)
|
437 |
+
safe_search = response.safe_search_annotation
|
438 |
+
|
439 |
+
# Map likelihood values
|
440 |
+
likelihood_values = {
|
441 |
+
vision.Likelihood.VERY_LIKELY: 4,
|
442 |
+
vision.Likelihood.LIKELY: 3,
|
443 |
+
vision.Likelihood.POSSIBLE: 2,
|
444 |
+
vision.Likelihood.UNLIKELY: 1,
|
445 |
+
vision.Likelihood.VERY_UNLIKELY: 0,
|
446 |
+
vision.Likelihood.UNKNOWN: 0
|
447 |
+
}
|
448 |
+
|
449 |
+
# Get likelihood scores
|
450 |
+
adult_score = likelihood_values[safe_search.adult]
|
451 |
+
racy_score = likelihood_values[safe_search.racy]
|
452 |
+
violence_score = likelihood_values[safe_search.violence]
|
453 |
+
medical_score = likelihood_values[safe_search.medical]
|
454 |
+
|
455 |
+
# Determine if content is NSFW according to Vision API
|
456 |
+
vision_reasons = []
|
457 |
+
if adult_score >= 3: # LIKELY or VERY_LIKELY
|
458 |
+
vision_reasons.append("adult content")
|
459 |
+
if racy_score >= 3: # LIKELY or VERY_LIKELY
|
460 |
+
vision_reasons.append("suggestive content")
|
461 |
+
if violence_score >= 3: # LIKELY or VERY_LIKELY
|
462 |
+
vision_reasons.append("violent content")
|
463 |
+
|
464 |
+
|
465 |
+
# Print Vision API results
|
466 |
+
print("Google Cloud Vision API Results:")
|
467 |
+
print(f"Adult: {vision.Likelihood(safe_search.adult).name}")
|
468 |
+
print(f"Racy: {vision.Likelihood(safe_search.racy).name}")
|
469 |
+
print(f"Violence: {vision.Likelihood(safe_search.violence).name}")
|
470 |
+
print(f"Medical: {vision.Likelihood(safe_search.medical).name}")
|
471 |
+
|
472 |
+
except Exception as vision_error:
|
473 |
+
print(f"Error with Vision API: {vision_error}")
|
474 |
+
vision_reasons = [] # Continue with OpenAI check if Vision API fails
|
475 |
+
|
476 |
+
# 2. OpenAI Vision Check
|
477 |
+
openai_response = openai_client.chat.completions.create(
|
478 |
model="gpt-4o",
|
479 |
messages=[
|
480 |
{
|
481 |
"role": "system",
|
482 |
+
"content": "You are a content review system. Your task is to analyze the image and determine if it contains any inappropriate content that would make it unsuitable for general audiences. Especially don't allow any content involving minors (children). Please respond with 'SAFE' if the image is appropriate, or 'NSFW: <reason>' if you detect any concerning content."
|
483 |
},
|
484 |
{
|
485 |
"role": "user",
|
486 |
"content": [
|
487 |
+
{"type": "text", "text": "Is this image appropriate for general audiences?"},
|
488 |
{"type": "image_url", "image_url": {
|
489 |
"url": f"data:image/jpeg;base64,{base64_image}"
|
490 |
}}
|
491 |
]
|
492 |
}
|
493 |
],
|
494 |
+
max_tokens=150
|
495 |
)
|
496 |
|
497 |
+
openai_result = openai_response.choices[0].message.content.strip()
|
498 |
+
print("\nOpenAI Vision Check Result:")
|
499 |
+
print(openai_result)
|
500 |
+
|
501 |
+
# Combine results from both services
|
502 |
+
is_nsfw_vision = bool(vision_reasons)
|
503 |
+
is_nsfw_openai = openai_result.startswith("NSFW:")
|
504 |
|
505 |
+
# If either service flags the content, reject it
|
506 |
+
if is_nsfw_vision or is_nsfw_openai:
|
507 |
+
reasons = []
|
508 |
+
if vision_reasons:
|
509 |
+
reasons.append(f"Google Vision: {', '.join(vision_reasons)}")
|
510 |
+
if is_nsfw_openai:
|
511 |
+
reasons.append(f"OpenAI: {openai_result[5:].strip()}")
|
512 |
+
|
513 |
return {
|
514 |
"isNSFW": True,
|
515 |
+
"reason": f"Image contains inappropriate content: {'; '.join(reasons)}"
|
516 |
}
|
517 |
|
518 |
return {"isNSFW": False, "reason": None}
|
519 |
+
|
520 |
except Exception as e:
|
521 |
print(f"Error during image moderation: {e}")
|
522 |
+
# If there's an error, reject the image to be safe
|
523 |
+
return {
|
524 |
+
"isNSFW": True,
|
525 |
+
"reason": "Failed to verify image safety - please try again"
|
526 |
+
}
|
527 |
|
528 |
async def moderate_combined(prompt: str, image_path: str) -> dict:
|
529 |
"""
|
530 |
+
Check if both a prompt and image contain NSFW content together with strict rules
|
531 |
"""
|
532 |
try:
|
533 |
# Convert image to base64
|
|
|
539 |
messages=[
|
540 |
{
|
541 |
"role": "system",
|
542 |
+
"content": """You are a strict content moderation system. Your task is to determine if an image and prompt combination would result in inappropriate content.
|
543 |
+
|
544 |
+
STRICT RULES:
|
545 |
+
1. NO content involving minors in any way
|
546 |
+
2. NO nudity or sexually suggestive content
|
547 |
+
3. NO extreme violence or gore
|
548 |
+
4. NO hate speech or discriminatory content
|
549 |
+
5. NO illegal activities
|
550 |
+
|
551 |
+
Respond with 'NSFW: <reason>' if ANY of these rules are violated, or 'SAFE' if appropriate.
|
552 |
+
Be extremely cautious - if there's any doubt, mark it as NSFW."""
|
553 |
},
|
554 |
{
|
555 |
"role": "user",
|
556 |
"content": [
|
557 |
{
|
558 |
"type": "text",
|
559 |
+
"text": f'Please moderate this image and prompt combination for an image-to-video generation:\n\nPrompt: "{prompt}"\n\nEnsure NO inappropriate content, especially involving minors.'
|
560 |
},
|
561 |
{
|
562 |
"type": "image_url",
|
|
|
582 |
}
|
583 |
except Exception as e:
|
584 |
print(f"Error during combined moderation: {e}")
|
585 |
+
# If there's an error, reject to be safe
|
586 |
+
return {
|
587 |
+
"isNSFW": True,
|
588 |
+
"reason": "Failed to verify content safety - please try again"
|
589 |
+
}
|
590 |
|
591 |
async def generate_video(input_image, subject, duration, selected_index, progress=gr.Progress()):
|
592 |
try:
|
|
|
678 |
raise e
|
679 |
|
680 |
def update_selection(evt: gr.SelectData):
|
681 |
+
selected_lora = loras[evt.index]
|
682 |
sentence = f"Selected LoRA: {selected_lora['title']}"
|
683 |
return selected_lora['id'], sentence
|
684 |
|
|
|
1068 |
# Add a custom handler to check if inputs are valid
|
1069 |
def check_inputs(subject, image_input, selected_index):
|
1070 |
if not selected_index:
|
1071 |
+
raise gr.Error("You must select a LoRA before proceeding.")
|
1072 |
if not subject.strip():
|
1073 |
+
raise gr.Error("Please describe your subject.")
|
1074 |
if image_input is None:
|
1075 |
+
raise gr.Error("Please upload an image or select an example image.")
|
|
|
1076 |
|
1077 |
# Use gr.on for the button click with validation
|
1078 |
button.click(
|
|
|
1094 |
|
1095 |
if __name__ == "__main__":
|
1096 |
demo.queue(default_concurrency_limit=20)
|
1097 |
+
demo.launch(ssr_mode=False, share=True)
|