John Doe commited on
Commit
73df9d6
·
1 Parent(s): c988ab0

Deploy my chat filter app

Browse files
Files changed (3) hide show
  1. Dockerfile +10 -0
  2. image_moderator.py +94 -0
  3. main.py +114 -0
Dockerfile ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.9-slim
2
+
3
+ WORKDIR /app
4
+
5
+ COPY . /app
6
+
7
+ RUN pip install --upgrade pip
8
+ RUN pip install -r requirements.txt
9
+
10
+ CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
image_moderator.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # image_moderator.py
2
+
3
+ from transformers import CLIPProcessor, CLIPModel, ViTForImageClassification, ViTFeatureExtractor
4
+ from PIL import Image, ImageFilter
5
+ import torch
6
+ import cv2
7
+
8
+ class ImageContentModerator:
9
+ def __init__(self, nsfw_threshold=0.85, blur_radius=99):
10
+ # NSFW Setup
11
+ self.nsfw_threshold = nsfw_threshold
12
+ self.nsfw_classes = [
13
+ "porn", "nudity", "sexual activity", "explicit",
14
+ "safe", "neutral", "hentai", "suggestive", "drawing"
15
+ ]
16
+ self.nsfw_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch16")
17
+ self.nsfw_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch16")
18
+
19
+ # Violence Setup
20
+ self.violence_model = ViTForImageClassification.from_pretrained('jaranohaal/vit-base-violence-detection')
21
+ self.violence_extractor = ViTFeatureExtractor.from_pretrained('jaranohaal/vit-base-violence-detection')
22
+ self.custom_labels = {1: "Non-Violent", 0: "Violent"}
23
+
24
+ self.blur_radius = blur_radius
25
+
26
+ print("[Moderator] Both models loaded successfully.")
27
+
28
+ def is_nsfw(self, image):
29
+ inputs = self.nsfw_processor(text=self.nsfw_classes, images=image, return_tensors="pt", padding=True)
30
+ with torch.no_grad():
31
+ outputs = self.nsfw_model(**inputs)
32
+ probs = outputs.logits_per_image.softmax(dim=1)[0]
33
+ top_class = self.nsfw_classes[probs.argmax()]
34
+ confidence = probs.max().item()
35
+ print(f"[NSFW] Predicted: {top_class} ({confidence:.2f})")
36
+ return top_class not in ['safe', 'neutral', 'drawing'] and confidence > self.nsfw_threshold
37
+
38
+ def is_violent(self, image):
39
+ inputs = self.violence_extractor(images=image, return_tensors="pt")
40
+ with torch.no_grad():
41
+ outputs = self.violence_model(**inputs)
42
+ class_idx = outputs.logits.argmax(-1).item()
43
+ label = self.custom_labels[class_idx]
44
+ print(f"[Violence] Predicted: {label}")
45
+ return label == "Violent"
46
+
47
+ def blur_image(self, image_path, output_path):
48
+ image = Image.open(image_path)
49
+ blurred = image.filter(ImageFilter.GaussianBlur(radius=self.blur_radius))
50
+ blurred.save(output_path)
51
+ return output_path
52
+
53
+ def process_image(self, image_path, output_path="moderated_image.jpg"):
54
+ image = Image.open(image_path).convert("RGB")
55
+
56
+ if self.is_nsfw(image):
57
+ print("⚠️ NSFW Content Detected — Blurring Image")
58
+ return self.blur_image(image_path, output_path)
59
+
60
+ if self.is_violent(image):
61
+ print("⚠️ Violent Content Detected — Blurring Image")
62
+ return self.blur_image(image_path, output_path)
63
+
64
+ print("✅ Image is clean — No action taken")
65
+ image.save(output_path)
66
+ return output_path
67
+ from transformers import pipeline
68
+ from better_profanity import profanity
69
+
70
+ # Load profanity model
71
+ toxic_classifier = pipeline("text-classification", model="unitary/toxic-bert")
72
+ profanity.load_censor_words()
73
+ custom_words = [
74
+ "idiot", "moron", "dumb", "stupid", "loser", "bastard", "retard", "scumbag",
75
+ "asshole", "jerk", "shit", "fuck", "damn", "hell", "crap", "bitch"
76
+ ]
77
+ profanity.add_censor_words(custom_words)
78
+
79
+ def mask_bad_words(text):
80
+ return profanity.censor(text)
81
+
82
+ def smart_censor(text, toxic_threshold=0.85):
83
+ result = toxic_classifier(text)[0]
84
+ label = result['label'].lower()
85
+ score = result['score']
86
+
87
+ if label == "toxic" and score > toxic_threshold:
88
+ masked_text = mask_bad_words(text)
89
+ if masked_text != text:
90
+ return masked_text
91
+ else:
92
+ return "⚠️ Vulgar Content Detected"
93
+
94
+ return text
main.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # # from fastapi import FastAPI, UploadFile, File
2
+ # # from pydantic import BaseModel
3
+ # # from image_moderator import ImageContentModerator, smart_censor
4
+ # # from PIL import Image
5
+ # # import shutil
6
+ # # import os
7
+
8
+ # # app = FastAPI()
9
+
10
+ # # # Load models once when server starts
11
+ # # moderator = ImageContentModerator()
12
+
13
+ # # # Input schema for text
14
+ # # class TextRequest(BaseModel):
15
+ # # text: str
16
+
17
+ # # @app.get("/")
18
+ # # def root():
19
+ # # return {"message": "Chat Moderation API is running"}
20
+
21
+ # # @app.post("/moderate-text")
22
+ # # async def moderate_text(req: TextRequest):
23
+ # # result = smart_censor(req.text)
24
+ # # return {"moderated_text": result}
25
+
26
+ # # @app.post("/moderate-image")
27
+ # # async def moderate_image(file: UploadFile = File(...)):
28
+ # # temp_path = f"temp_{file.filename}"
29
+
30
+ # # # Save uploaded image to disk
31
+ # # with open(temp_path, "wb") as buffer:
32
+ # # shutil.copyfileobj(file.file, buffer)
33
+
34
+ # # output_path = f"blurred_{file.filename}"
35
+ # # moderated_path = moderator.process_image(temp_path, output_path)
36
+
37
+ # # # Read the moderated image and return as base64
38
+ # # with open(moderated_path, "rb") as img_file:
39
+ # # image_bytes = img_file.read()
40
+ # # import base64
41
+ # # encoded_image = base64.b64encode(image_bytes).decode()
42
+
43
+ # # # Clean up temp files
44
+ # # os.remove(temp_path)
45
+ # # os.remove(moderated_path)
46
+
47
+ # # return {
48
+ # # "blurred_image_base64": encoded_image
49
+ # # }
50
+
51
+
52
+
53
+
54
+
55
+
56
+ from fastapi import FastAPI
57
+ from pydantic import BaseModel
58
+ from image_moderator import ImageContentModerator, smart_censor
59
+ from PIL import Image
60
+ import base64
61
+ import os
62
+
63
+ app = FastAPI()
64
+
65
+ # Load models once when server starts
66
+ moderator = ImageContentModerator()
67
+
68
+ # Schemas
69
+ class TextRequest(BaseModel):
70
+ text: str
71
+
72
+ class ImageRequest(BaseModel):
73
+ image: str # base64 string
74
+
75
+ @app.get("/")
76
+ def root():
77
+ return {"message": "Chat Moderation API is running"}
78
+ # ✅ IMAGE MODERATION ENDPOINT
79
+ @app.post("/moderate-image")
80
+ async def moderate_image(req: ImageRequest):
81
+ try:
82
+ # Decode base64 image
83
+ image_data = base64.b64decode(req.image.split(",")[-1])
84
+ temp_path = "temp_input.png"
85
+ output_path = "temp_blurred.png"
86
+
87
+ # Save temp image
88
+ with open(temp_path, "wb") as f:
89
+ f.write(image_data)
90
+
91
+ # Process image
92
+ moderated_path = moderator.process_image(temp_path, output_path)
93
+
94
+ # Encode result
95
+ with open(moderated_path, "rb") as f:
96
+ encoded = base64.b64encode(f.read()).decode()
97
+
98
+ # Cleanup
99
+ os.remove(temp_path)
100
+ os.remove(output_path)
101
+
102
+ return {"blurred_image_base64": encoded}
103
+ except Exception as e:
104
+ return {"error": str(e)}
105
+ # ✅ TEXT MODERATION ENDPOINT
106
+ @app.post("/moderate-text")
107
+ async def moderate_text(req: TextRequest):
108
+ try:
109
+ result = smart_censor(req.text)
110
+ return {"moderated_text": result}
111
+ except Exception as e:
112
+ return {"error": str(e)}
113
+
114
+