Upload 20 files
Browse files- speech_to_text.py +13 -0
- untitled1.py +100 -0
- untitled10.py +75 -0
- untitled11.py +64 -0
- untitled12.py +38 -0
- untitled13.py +57 -0
- untitled14.py +55 -0
- untitled15.py +52 -0
- untitled16.py +106 -0
- untitled18.py +47 -0
- untitled2.py +106 -0
- untitled20.py +44 -0
- untitled21.py +53 -0
- untitled22.py +90 -0
- untitled23.py +91 -0
- untitled3.py +87 -0
- untitled4.py +76 -0
- untitled6.py +53 -0
- untitled7.py +64 -0
- untitled9.py +66 -0
speech_to_text.py
ADDED
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Thu Aug 1 11:23:30 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import whisper
|
10 |
+
|
11 |
+
# بارگذاری مدل
|
12 |
+
model = whisper.load_model("medium")
|
13 |
+
|
untitled1.py
ADDED
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
def adjust_brightness_contrast(image, beta=50, alpha=1.5):
|
5 |
+
# افزایش روشنایی و کنتراست تصویر
|
6 |
+
return cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
|
7 |
+
|
8 |
+
def gamma_correction(image, gamma=2.0):
|
9 |
+
# تبدیل تصویر به فضای رنگی YUV
|
10 |
+
yuv_image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
11 |
+
|
12 |
+
# اعمال تصحیح گاما بر روی کانال Y
|
13 |
+
yuv_image[..., 0] = np.clip(np.power(yuv_image[..., 0] / 255.0, gamma) * 255.0, 0, 255).astype(np.uint8)
|
14 |
+
|
15 |
+
# تبدیل مجدد به فضای رنگی BGR
|
16 |
+
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)
|
17 |
+
|
18 |
+
def apply_clahe(image):
|
19 |
+
# تبدیل تصویر به فضای رنگی خاکستری
|
20 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
21 |
+
|
22 |
+
# ایجاد شیء CLAHE و تنظیم پارامترها
|
23 |
+
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
24 |
+
|
25 |
+
# اعمال CLAHE بر روی تصویر خاکستری
|
26 |
+
clahe_image = clahe.apply(gray_image)
|
27 |
+
|
28 |
+
# تبدیل تصویر خاکستری به BGR
|
29 |
+
return cv2.cvtColor(clahe_image, cv2.COLOR_GRAY2BGR)
|
30 |
+
|
31 |
+
def enhance_details(image):
|
32 |
+
# استفاده از sharpening filter برای افزایش جزئیات
|
33 |
+
kernel = np.array([[0, -1, 0],
|
34 |
+
[-1, 5, -1],
|
35 |
+
[0, -1, 0]])
|
36 |
+
sharpened_image = cv2.filter2D(image, -1, kernel)
|
37 |
+
|
38 |
+
return sharpened_image
|
39 |
+
|
40 |
+
def convert_to_hsv(image):
|
41 |
+
# تبدیل تصویر به فضای رنگی HSV
|
42 |
+
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
43 |
+
|
44 |
+
# افزایش روشنایی در کانال V
|
45 |
+
hsv_image[..., 2] = cv2.convertScaleAbs(hsv_image[..., 2], alpha=1.5, beta=30)
|
46 |
+
|
47 |
+
# تبدیل مجدد به فضای رنگی BGR
|
48 |
+
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
|
49 |
+
|
50 |
+
def improve_low_light(image):
|
51 |
+
# تبدیل به فضای رنگی HSV برای افزایش روشنایی
|
52 |
+
hsv_image = convert_to_hsv(image)
|
53 |
+
|
54 |
+
# تنظیم روشنایی و کنتراست
|
55 |
+
bright_image = adjust_brightness_contrast(hsv_image, beta=50, alpha=1.5)
|
56 |
+
|
57 |
+
# تصحیح گاما
|
58 |
+
gamma_corrected_image = gamma_correction(bright_image, gamma=2.0)
|
59 |
+
|
60 |
+
# اعمال CLAHE برای افزایش کنتراست محلی
|
61 |
+
clahe_image = apply_clahe(gamma_corrected_image)
|
62 |
+
|
63 |
+
# افزایش جزئیات
|
64 |
+
detailed_image = enhance_details(clahe_image)
|
65 |
+
|
66 |
+
return detailed_image
|
67 |
+
|
68 |
+
def main():
|
69 |
+
# اتصال به دوربین (0 برای دوربین پیشفرض)
|
70 |
+
cap = cv2.VideoCapture(0)
|
71 |
+
|
72 |
+
if not cap.isOpened():
|
73 |
+
print("خطا: دوربین باز نشد!")
|
74 |
+
return
|
75 |
+
|
76 |
+
while True:
|
77 |
+
# خواندن فریم از دوربین
|
78 |
+
ret, frame = cap.read()
|
79 |
+
|
80 |
+
if not ret:
|
81 |
+
print("خطا: نمیتوان فریم را خواند!")
|
82 |
+
break
|
83 |
+
|
84 |
+
# بهبود تصویر در شرایط نور کم
|
85 |
+
improved_frame = improve_low_light(frame)
|
86 |
+
|
87 |
+
# نمایش تصویر اصلی و تصویر بهبود یافته
|
88 |
+
cv2.imshow('Original Frame', frame)
|
89 |
+
cv2.imshow('Improved Frame', improved_frame)
|
90 |
+
|
91 |
+
# خروج از برنامه با فشار دادن کلید 'q'
|
92 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
93 |
+
break
|
94 |
+
|
95 |
+
# آزادسازی منابع و بستن پنجرهها
|
96 |
+
cap.release()
|
97 |
+
cv2.destroyAllWindows()
|
98 |
+
|
99 |
+
if __name__ == "__main__":
|
100 |
+
main()
|
untitled10.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Aug 3 21:09:27 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
#!/usr/bin/env python3
|
10 |
+
# -*- coding: utf-8 -*-
|
11 |
+
"""
|
12 |
+
Created on Sat Aug 3 21:01:48 2024
|
13 |
+
|
14 |
+
@author: ysnrfd
|
15 |
+
"""
|
16 |
+
|
17 |
+
import cv2
|
18 |
+
import numpy as np
|
19 |
+
|
20 |
+
def main():
|
21 |
+
# Initialize video capture
|
22 |
+
cap = cv2.VideoCapture(0)
|
23 |
+
|
24 |
+
if not cap.isOpened():
|
25 |
+
print("Error: Unable to open camera.")
|
26 |
+
return
|
27 |
+
|
28 |
+
# Create background subtractor with KNN
|
29 |
+
backSub = cv2.createBackgroundSubtractorKNN(history=10, dist2Threshold=15.0, detectShadows=True)
|
30 |
+
|
31 |
+
try:
|
32 |
+
while True:
|
33 |
+
# Read frame from the camera
|
34 |
+
ret, frame = cap.read()
|
35 |
+
|
36 |
+
if not ret:
|
37 |
+
print("Error: Unable to read frame.")
|
38 |
+
break
|
39 |
+
|
40 |
+
# Apply background subtraction
|
41 |
+
fgMask = backSub.apply(frame)
|
42 |
+
|
43 |
+
# Apply morphological operations
|
44 |
+
kernel = np.ones((1, 1), np.uint8)
|
45 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
|
46 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
|
47 |
+
|
48 |
+
# Apply Gaussian blur to reduce noise
|
49 |
+
blurred = cv2.GaussianBlur(fgMask, (1, 1), 0)
|
50 |
+
|
51 |
+
# Find contours
|
52 |
+
contours, _ = cv2.findContours(blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
53 |
+
|
54 |
+
# Draw bounding boxes around detected objects
|
55 |
+
for contour in contours:
|
56 |
+
if cv2.contourArea(contour) > 10: # Filter out small contours
|
57 |
+
x, y, w, h = cv2.boundingRect(contour)
|
58 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
59 |
+
|
60 |
+
|
61 |
+
# Display the results
|
62 |
+
cv2.imshow('Frame', frame)
|
63 |
+
cv2.imshow('Foreground Mask', fgMask)
|
64 |
+
|
65 |
+
# Exit loop if 'q' is pressed
|
66 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
67 |
+
break
|
68 |
+
|
69 |
+
finally:
|
70 |
+
# Release resources
|
71 |
+
cap.release()
|
72 |
+
cv2.destroyAllWindows()
|
73 |
+
|
74 |
+
if __name__ == "__main__":
|
75 |
+
main()
|
untitled11.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Aug 3 22:20:23 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
from skimage import exposure, img_as_float
|
12 |
+
from scipy.ndimage import gaussian_filter
|
13 |
+
|
14 |
+
def enhance_image(image):
|
15 |
+
# تبدیل تصویر به فضای رنگی خاکستری
|
16 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
17 |
+
|
18 |
+
# نرمالسازی و تبدیل به float
|
19 |
+
gray_image_float = img_as_float(gray_image)
|
20 |
+
|
21 |
+
# تبدیل لگاریتمی برای افزایش کنتراست
|
22 |
+
log_transformed = np.log1p(gray_image_float * 255)
|
23 |
+
|
24 |
+
# نرمالسازی شدت تصویر
|
25 |
+
log_transformed = exposure.rescale_intensity(log_transformed, in_range='image')
|
26 |
+
|
27 |
+
# استفاده از فیلتر گوسی برای کاهش نویز
|
28 |
+
denoised_image = gaussian_filter(log_transformed, sigma=1)
|
29 |
+
|
30 |
+
# بازگشت به دامنه اصلی تصویر
|
31 |
+
enhanced_image = (denoised_image * 255).astype(np.uint8)
|
32 |
+
|
33 |
+
# استفاده از افزایش کنتراست هیستوگرام
|
34 |
+
enhanced_image = exposure.equalize_hist(enhanced_image) * 255
|
35 |
+
enhanced_image = enhanced_image.astype(np.uint8)
|
36 |
+
|
37 |
+
# تبدیل تصویر به فضای رنگی BGR
|
38 |
+
enhanced_image = cv2.cvtColor(enhanced_image, cv2.COLOR_GRAY2BGR)
|
39 |
+
|
40 |
+
return enhanced_image
|
41 |
+
|
42 |
+
# باز کردن دوربین (عدد 0 برای دوربین پیشفرض)
|
43 |
+
cap = cv2.VideoCapture(0)
|
44 |
+
|
45 |
+
while True:
|
46 |
+
# خواندن یک فریم از دوربین
|
47 |
+
ret, frame = cap.read()
|
48 |
+
|
49 |
+
if not ret:
|
50 |
+
break
|
51 |
+
|
52 |
+
# بهبود تصویر
|
53 |
+
enhanced_frame = enhance_image(frame)
|
54 |
+
|
55 |
+
# نمایش تصویر بهبود یافته
|
56 |
+
cv2.imshow('Night Vision', enhanced_frame)
|
57 |
+
|
58 |
+
# خروج از حلقه با زدن کلید 'q'
|
59 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
60 |
+
break
|
61 |
+
|
62 |
+
# آزادسازی منابع
|
63 |
+
cap.release()
|
64 |
+
cv2.destroyAllWindows()
|
untitled12.py
ADDED
@@ -0,0 +1,38 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
# تابع برای شبیهسازی تصویر مادون قرمز
|
5 |
+
def simulate_ir(image):
|
6 |
+
# تبدیل تصویر به فضای رنگی خاکستری
|
7 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
8 |
+
|
9 |
+
# معکوس کردن تصویر خاکستری برای شبیهسازی تصویر مادون قرمز
|
10 |
+
ir_image = cv2.bitwise_not(gray_image)
|
11 |
+
|
12 |
+
return ir_image
|
13 |
+
|
14 |
+
# باز کردن دوربین (عدد 0 نشاندهنده دوربین پیشفرض است)
|
15 |
+
cap = cv2.VideoCapture(0)
|
16 |
+
|
17 |
+
while True:
|
18 |
+
# خواندن فریم از دوربین
|
19 |
+
ret, frame = cap.read()
|
20 |
+
|
21 |
+
if not ret:
|
22 |
+
print("خطا در خواندن فریم")
|
23 |
+
break
|
24 |
+
|
25 |
+
# شبیهسازی تصویر مادون قرمز
|
26 |
+
ir_frame = simulate_ir(frame)
|
27 |
+
|
28 |
+
# نمایش تصویر اصلی و شبیهسازی شده
|
29 |
+
cv2.imshow('Original Frame', frame)
|
30 |
+
cv2.imshow('Simulated IR Frame', ir_frame)
|
31 |
+
|
32 |
+
# خروج از حلقه با فشار دادن کلید 'q'
|
33 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
34 |
+
break
|
35 |
+
|
36 |
+
# آزاد کردن دوربین و بستن تمام پنجرهها
|
37 |
+
cap.release()
|
38 |
+
cv2.destroyAllWindows()
|
untitled13.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from ultralytics import YOLO
|
4 |
+
|
5 |
+
# Load YOLOv8x model and move it to GPU if available
|
6 |
+
model = YOLO('yolov8n.pt') # Use 'yolov8n.pt' for even faster processing if accuracy is acceptable
|
7 |
+
|
8 |
+
# Open a connection to the camera
|
9 |
+
cap = cv2.VideoCapture(0)
|
10 |
+
|
11 |
+
# Check if the camera opened successfully
|
12 |
+
if not cap.isOpened():
|
13 |
+
print("Error: Could not open camera.")
|
14 |
+
exit()
|
15 |
+
|
16 |
+
# Set the camera resolution (lower resolution for faster processing)
|
17 |
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 320) # Reduced resolution for speed
|
18 |
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 240) # Reduced resolution for speed
|
19 |
+
|
20 |
+
while True:
|
21 |
+
# Capture frame-by-frame
|
22 |
+
ret, frame = cap.read()
|
23 |
+
if not ret:
|
24 |
+
print("Error: Failed to capture image")
|
25 |
+
break
|
26 |
+
|
27 |
+
# Convert to grayscale for night vision effect
|
28 |
+
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
29 |
+
|
30 |
+
# Apply a color map to simulate night vision
|
31 |
+
night_vision = cv2.applyColorMap(gray, cv2.COLORMAP_HOT)
|
32 |
+
|
33 |
+
# Convert the frame to the format required by YOLOv8
|
34 |
+
night_vision_rgb = cv2.cvtColor(night_vision, cv2.COLOR_BGR2RGB)
|
35 |
+
|
36 |
+
# Perform object detection with YOLOv8x
|
37 |
+
results = model(night_vision_rgb, stream=True, imgsz=320) # Further reduced image size for speed
|
38 |
+
|
39 |
+
# Draw bounding boxes and labels on the night vision image
|
40 |
+
for result in results:
|
41 |
+
boxes = result.boxes.data.cpu().numpy()
|
42 |
+
for box in boxes:
|
43 |
+
x1, y1, x2, y2, score, class_id = map(int, box)
|
44 |
+
label = f"{model.names[class_id]}: {score:.2f}"
|
45 |
+
cv2.rectangle(night_vision, (x1, y1), (x2, y2), (0, 255, 0), 1) # Thin box for speed
|
46 |
+
cv2.putText(night_vision, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) # Thin text for speed
|
47 |
+
|
48 |
+
# Display the resulting frame
|
49 |
+
cv2.imshow('Night Vision YOLOv8x', night_vision)
|
50 |
+
|
51 |
+
# Break the loop on 'q' key press
|
52 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
53 |
+
break
|
54 |
+
|
55 |
+
# Release the capture and close all windows
|
56 |
+
cap.release()
|
57 |
+
cv2.destroyAllWindows()
|
untitled14.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 13:35:09 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
from ultralytics import YOLO
|
11 |
+
|
12 |
+
# Load the YOLOv8n model (nano) for ultra-fast inference
|
13 |
+
model = YOLO('yolov10n.pt') # Replace with the path to your YOLOv8n model
|
14 |
+
|
15 |
+
# Open a connection to the camera
|
16 |
+
cap = cv2.VideoCapture(0)
|
17 |
+
|
18 |
+
# Check if the camera opened successfully
|
19 |
+
if not cap.isOpened():
|
20 |
+
print("Error: Could not open camera.")
|
21 |
+
exit()
|
22 |
+
|
23 |
+
# Set the camera resolution (lower resolution for speed)
|
24 |
+
cap.set(cv2.CAP_PROP_FRAME_WIDTH, 512)
|
25 |
+
cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 512)
|
26 |
+
|
27 |
+
while True:
|
28 |
+
# Capture frame-by-frame
|
29 |
+
ret, frame = cap.read()
|
30 |
+
if not ret:
|
31 |
+
print("Error: Failed to capture image")
|
32 |
+
break
|
33 |
+
|
34 |
+
# Perform object detection with YOLOv8n
|
35 |
+
results = model(frame, imgsz=512, stream=True) # Adjust img size if necessary
|
36 |
+
|
37 |
+
# Draw bounding boxes and labels on the frame
|
38 |
+
for result in results:
|
39 |
+
boxes = result.boxes.data.cpu().numpy()
|
40 |
+
for box in boxes:
|
41 |
+
x1, y1, x2, y2, score, class_id = map(int, box)
|
42 |
+
label = f"{model.names[class_id]}: {score:.2f}"
|
43 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 1) # Thin box for speed
|
44 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1) # Thin text for speed
|
45 |
+
|
46 |
+
# Display the resulting frame
|
47 |
+
cv2.imshow('YOLOv8n Real-Time Detection', frame)
|
48 |
+
|
49 |
+
# Break the loop on 'q' key press
|
50 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
51 |
+
break
|
52 |
+
|
53 |
+
# Release the capture and close all windows
|
54 |
+
cap.release()
|
55 |
+
cv2.destroyAllWindows()
|
untitled15.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 13:39:00 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
# Initialize the background subtractor
|
13 |
+
background_subtractor = cv2.createBackgroundSubtractorMOG2()
|
14 |
+
|
15 |
+
# Open a connection to the camera
|
16 |
+
cap = cv2.VideoCapture(0)
|
17 |
+
|
18 |
+
# Check if the camera opened successfully
|
19 |
+
if not cap.isOpened():
|
20 |
+
print("Error: Could not open camera.")
|
21 |
+
exit()
|
22 |
+
|
23 |
+
while True:
|
24 |
+
# Capture frame-by-frame
|
25 |
+
ret, frame = cap.read()
|
26 |
+
if not ret:
|
27 |
+
print("Error: Failed to capture image")
|
28 |
+
break
|
29 |
+
|
30 |
+
# Apply background subtraction
|
31 |
+
fg_mask = background_subtractor.apply(frame)
|
32 |
+
|
33 |
+
# Find contours of the detected objects
|
34 |
+
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
35 |
+
|
36 |
+
# Draw contours on the frame
|
37 |
+
for contour in contours:
|
38 |
+
if cv2.contourArea(contour) > 15: # Adjust the threshold for contour area
|
39 |
+
x, y, w, h = cv2.boundingRect(contour)
|
40 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 1)
|
41 |
+
cv2.putText(frame, "Anomaly Detected", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
42 |
+
|
43 |
+
# Display the resulting frame
|
44 |
+
cv2.imshow('Ghost Detector (Anomaly Detection)', frame)
|
45 |
+
|
46 |
+
# Break the loop on 'q' key press
|
47 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
48 |
+
break
|
49 |
+
|
50 |
+
# Release the capture and close all windows
|
51 |
+
cap.release()
|
52 |
+
cv2.destroyAllWindows()
|
untitled16.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 13:48:50 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
#!/usr/bin/env python3
|
10 |
+
# -*- coding: utf-8 -*-
|
11 |
+
"""
|
12 |
+
Advanced Ghost Detection Script with Machine Learning Integration and Real-Time Performance Enhancements
|
13 |
+
"""
|
14 |
+
|
15 |
+
import cv2
|
16 |
+
import numpy as np
|
17 |
+
from threading import Thread
|
18 |
+
from queue import Queue
|
19 |
+
from ultralytics import YOLO
|
20 |
+
|
21 |
+
class AdvancedGhostDetector:
|
22 |
+
def __init__(self, video_source=0, contour_area_threshold=100, model_path='yolov8s.pt'):
|
23 |
+
self.video_source = video_source
|
24 |
+
self.contour_area_threshold = contour_area_threshold
|
25 |
+
self.background_subtractor = cv2.createBackgroundSubtractorMOG2()
|
26 |
+
self.cap = cv2.VideoCapture(self.video_source)
|
27 |
+
self.frame_queue = Queue(maxsize=10)
|
28 |
+
self.model = YOLO(model_path)
|
29 |
+
|
30 |
+
if not self.cap.isOpened():
|
31 |
+
raise IOError("Error: Could not open camera.")
|
32 |
+
|
33 |
+
def capture_frames(self):
|
34 |
+
while True:
|
35 |
+
ret, frame = self.cap.read()
|
36 |
+
if not ret:
|
37 |
+
print("Error: Failed to capture image")
|
38 |
+
break
|
39 |
+
if not self.frame_queue.full():
|
40 |
+
self.frame_queue.put(frame)
|
41 |
+
|
42 |
+
def process_frame(self, frame):
|
43 |
+
# Apply background subtraction
|
44 |
+
fg_mask = self.background_subtractor.apply(frame)
|
45 |
+
|
46 |
+
# Find contours of the detected objects
|
47 |
+
contours, _ = cv2.findContours(fg_mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
48 |
+
|
49 |
+
# Draw contours on the frame
|
50 |
+
for contour in contours:
|
51 |
+
if cv2.contourArea(contour) > self.contour_area_threshold:
|
52 |
+
x, y, w, h = cv2.boundingRect(contour)
|
53 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
54 |
+
cv2.putText(frame, "Anomaly Detected", (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
55 |
+
|
56 |
+
return frame
|
57 |
+
|
58 |
+
def detect_objects(self, frame):
|
59 |
+
# Convert frame to RGB for YOLOv8
|
60 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
61 |
+
results = self.model(rgb_frame, imgsz=320)
|
62 |
+
|
63 |
+
# Draw bounding boxes and labels on the frame
|
64 |
+
for result in results:
|
65 |
+
boxes = result.boxes.data.cpu().numpy()
|
66 |
+
for box in boxes:
|
67 |
+
x1, y1, x2, y2, score, class_id = map(int, box)
|
68 |
+
label = f"{self.model.names[class_id]}: {score:.2f}"
|
69 |
+
cv2.rectangle(frame, (x1, y1), (x2, y2), (0, 255, 0), 2)
|
70 |
+
cv2.putText(frame, label, (x1, y1 - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
|
71 |
+
|
72 |
+
return frame
|
73 |
+
|
74 |
+
def display_frame(self, frame):
|
75 |
+
# Display the resulting frame
|
76 |
+
cv2.imshow('Advanced Ghost Detector', frame)
|
77 |
+
|
78 |
+
def run(self):
|
79 |
+
capture_thread = Thread(target=self.capture_frames, daemon=True)
|
80 |
+
capture_thread.start()
|
81 |
+
|
82 |
+
while True:
|
83 |
+
if not self.frame_queue.empty():
|
84 |
+
frame = self.frame_queue.get()
|
85 |
+
processed_frame = self.process_frame(frame)
|
86 |
+
detected_frame = self.detect_objects(processed_frame)
|
87 |
+
self.display_frame(detected_frame)
|
88 |
+
|
89 |
+
# Break the loop on 'q' key press
|
90 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
91 |
+
break
|
92 |
+
|
93 |
+
self.cleanup()
|
94 |
+
|
95 |
+
def cleanup(self):
|
96 |
+
# Release the capture and close all windows
|
97 |
+
self.cap.release()
|
98 |
+
cv2.destroyAllWindows()
|
99 |
+
|
100 |
+
if __name__ == "__main__":
|
101 |
+
try:
|
102 |
+
detector = AdvancedGhostDetector(video_source=0, contour_area_threshold=1000, model_path='yolov8n.pt')
|
103 |
+
detector.run()
|
104 |
+
except Exception as e:
|
105 |
+
print(f"An error occurred: {e}")
|
106 |
+
cv2.destroyAllWindows()
|
untitled18.py
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 14:49:57 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import mediapipe as mp
|
11 |
+
|
12 |
+
# Initialize MediaPipe Pose module
|
13 |
+
mp_pose = mp.solutions.pose
|
14 |
+
pose = mp_pose.Pose()
|
15 |
+
|
16 |
+
# Initialize MediaPipe Drawing module
|
17 |
+
mp_drawing = mp.solutions.drawing_utils
|
18 |
+
|
19 |
+
# Initialize webcam
|
20 |
+
cap = cv2.VideoCapture(0)
|
21 |
+
|
22 |
+
while True:
|
23 |
+
# Read frame from webcam
|
24 |
+
ret, frame = cap.read()
|
25 |
+
if not ret:
|
26 |
+
break
|
27 |
+
|
28 |
+
# Convert the frame to RGB
|
29 |
+
frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
30 |
+
|
31 |
+
# Process the frame and get pose landmarks
|
32 |
+
results = pose.process(frame_rgb)
|
33 |
+
|
34 |
+
# Draw pose landmarks on the frame
|
35 |
+
if results.pose_landmarks:
|
36 |
+
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
37 |
+
|
38 |
+
# Display the resulting frame
|
39 |
+
cv2.imshow("Pose Detection", frame)
|
40 |
+
|
41 |
+
# Exit on 'q' key press
|
42 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
43 |
+
break
|
44 |
+
|
45 |
+
# Release resources
|
46 |
+
cap.release()
|
47 |
+
cv2.destroyAllWindows()
|
untitled2.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from skimage import exposure, filters
|
4 |
+
from scipy.ndimage import gaussian_filter
|
5 |
+
|
6 |
+
def adjust_brightness_contrast(image, beta=50, alpha=1.5):
|
7 |
+
"""افزایش روشنایی و کنتراست تصویر"""
|
8 |
+
return cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
|
9 |
+
|
10 |
+
def gamma_correction(image, gamma=2.0):
|
11 |
+
"""تصحیح گاما برای تنظیم روشنایی و کنتراست کلی"""
|
12 |
+
yuv_image = cv2.cvtColor(image, cv2.COLOR_BGR2YUV)
|
13 |
+
yuv_image[..., 0] = np.clip(np.power(yuv_image[..., 0] / 255.0, gamma) * 255.0, 0, 255).astype(np.uint8)
|
14 |
+
return cv2.cvtColor(yuv_image, cv2.COLOR_YUV2BGR)
|
15 |
+
|
16 |
+
def apply_clahe(image):
|
17 |
+
"""اعمال CLAHE برای بهبود کنتراست محلی"""
|
18 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
19 |
+
clahe = cv2.createCLAHE(clipLimit=3.0, tileGridSize=(8, 8))
|
20 |
+
clahe_image = clahe.apply(gray_image)
|
21 |
+
return cv2.cvtColor(clahe_image, cv2.COLOR_GRAY2BGR)
|
22 |
+
|
23 |
+
def convert_to_hsv(image):
|
24 |
+
"""تبدیل تصویر به فضای رنگی HSV و افزایش روشنایی"""
|
25 |
+
hsv_image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
|
26 |
+
hsv_image[..., 2] = cv2.convertScaleAbs(hsv_image[..., 2], alpha=1.5, beta=30)
|
27 |
+
return cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)
|
28 |
+
|
29 |
+
def enhance_details(image):
|
30 |
+
"""افزایش جزئیات با استفاده از فیلتر شارپنینگ"""
|
31 |
+
kernel = np.array([[0, -1, 0],
|
32 |
+
[-1, 5, -1],
|
33 |
+
[0, -1, 0]])
|
34 |
+
sharpened_image = cv2.filter2D(image, -1, kernel)
|
35 |
+
return sharpened_image
|
36 |
+
|
37 |
+
def noise_reduction(image):
|
38 |
+
"""کاهش نویز با استفاده از فیلتر وینزر"""
|
39 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
40 |
+
denoised_image = cv2.fastNlMeansDenoising(gray_image, None, h=10, templateWindowSize=7, searchWindowSize=21)
|
41 |
+
return cv2.cvtColor(denoised_image, cv2.COLOR_GRAY2BGR)
|
42 |
+
|
43 |
+
def enhance_contrast(image):
|
44 |
+
"""افزایش کنتراست با استفاده از تکنیکهای پیشرفته"""
|
45 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
46 |
+
equalized_image = exposure.equalize_hist(gray_image)
|
47 |
+
return cv2.cvtColor((exposure.rescale_intensity(equalized_image, out_range=(0, 255))).astype(np.uint8), cv2.COLOR_GRAY2BGR)
|
48 |
+
|
49 |
+
def improve_low_light(image):
|
50 |
+
"""بهبود تصویر در شرایط نور کم"""
|
51 |
+
# مرحله 1: افزایش روشنایی اولیه
|
52 |
+
bright_image = adjust_brightness_contrast(image, beta=50, alpha=1.5)
|
53 |
+
|
54 |
+
# مرحله 2: تصحیح گاما
|
55 |
+
gamma_corrected_image = gamma_correction(bright_image, gamma=2.0)
|
56 |
+
|
57 |
+
# مرحله 3: افزایش کنتراست با CLAHE
|
58 |
+
clahe_image = apply_clahe(gamma_corrected_image)
|
59 |
+
|
60 |
+
# مرحله 4: تبدیل به فضای رنگی HSV و افزایش روشنایی
|
61 |
+
hsv_image = convert_to_hsv(clahe_image)
|
62 |
+
|
63 |
+
# مرحله 5: افزایش جزئیات با فیلتر شارپنینگ
|
64 |
+
detailed_image = enhance_details(hsv_image)
|
65 |
+
|
66 |
+
# مرحله 6: کاهش نویز (اگر نیاز باشد)
|
67 |
+
# denoised_image = noise_reduction(detailed_image)
|
68 |
+
|
69 |
+
# مرحله 7: افزایش کنتراست نهایی
|
70 |
+
final_image = enhance_contrast(detailed_image)
|
71 |
+
|
72 |
+
return final_image
|
73 |
+
|
74 |
+
def main():
|
75 |
+
"""اتصال به دوربین و نمایش تصویر بهبود یافته"""
|
76 |
+
cap = cv2.VideoCapture(0)
|
77 |
+
|
78 |
+
if not cap.isOpened():
|
79 |
+
print("خطا: دوربین باز نشد!")
|
80 |
+
return
|
81 |
+
|
82 |
+
while True:
|
83 |
+
# خواندن فریم از دوربین
|
84 |
+
ret, frame = cap.read()
|
85 |
+
|
86 |
+
if not ret:
|
87 |
+
print("خطا: نمیتوان فریم را خواند!")
|
88 |
+
break
|
89 |
+
|
90 |
+
# بهبود تصویر در شرایط نور کم
|
91 |
+
improved_frame = improve_low_light(frame)
|
92 |
+
|
93 |
+
# نمایش تصویر اصلی و تصویر بهبود یافته
|
94 |
+
cv2.imshow('Original Frame', frame)
|
95 |
+
cv2.imshow('Improved Frame', improved_frame)
|
96 |
+
|
97 |
+
# خروج از برنامه با فشار دادن کلید 'q'
|
98 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
99 |
+
break
|
100 |
+
|
101 |
+
# آزادسازی منابع و بستن پنجرهها
|
102 |
+
cap.release()
|
103 |
+
cv2.destroyAllWindows()
|
104 |
+
|
105 |
+
if __name__ == "__main__":
|
106 |
+
main()
|
untitled20.py
ADDED
@@ -0,0 +1,44 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import mediapipe as mp
|
3 |
+
|
4 |
+
# Initialize Mediapipe Pose and Drawing modules
|
5 |
+
mp_pose = mp.solutions.pose
|
6 |
+
mp_drawing = mp.solutions.drawing_utils
|
7 |
+
|
8 |
+
# Set up pose detection
|
9 |
+
pose = mp_pose.Pose(min_detection_confidence=0.5, min_tracking_confidence=0.5)
|
10 |
+
|
11 |
+
# Initialize camera
|
12 |
+
cap = cv2.VideoCapture(0)
|
13 |
+
|
14 |
+
while cap.isOpened():
|
15 |
+
ret, frame = cap.read()
|
16 |
+
if not ret:
|
17 |
+
break
|
18 |
+
|
19 |
+
# Flip the frame horizontally for a later selfie-view display
|
20 |
+
frame = cv2.flip(frame, 1)
|
21 |
+
# Convert the BGR image to RGB
|
22 |
+
rgb_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
|
23 |
+
# Process the frame and get pose landmarks
|
24 |
+
results = pose.process(rgb_frame)
|
25 |
+
|
26 |
+
# Draw landmarks on the frame
|
27 |
+
if results.pose_landmarks:
|
28 |
+
mp_drawing.draw_landmarks(frame, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
|
29 |
+
|
30 |
+
# Access and print the 3D landmarks
|
31 |
+
landmarks = results.pose_landmarks.landmark
|
32 |
+
for i, landmark in enumerate(landmarks):
|
33 |
+
print(f"Landmark {i}: x={landmark.x}, y={landmark.y}, z={landmark.z}")
|
34 |
+
|
35 |
+
# Display the frame
|
36 |
+
cv2.imshow('Pose Detection', frame)
|
37 |
+
|
38 |
+
# Break the loop if 'q' is pressed
|
39 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
40 |
+
break
|
41 |
+
|
42 |
+
# Release resources
|
43 |
+
cap.release()
|
44 |
+
cv2.destroyAllWindows()
|
untitled21.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
def detect_anomalies(frame1, frame2):
|
5 |
+
# Convert images to grayscale
|
6 |
+
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
|
7 |
+
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
|
8 |
+
|
9 |
+
# Compute the absolute difference between the two images
|
10 |
+
diff = cv2.absdiff(gray1, gray2)
|
11 |
+
|
12 |
+
# Threshold the difference to get binary image
|
13 |
+
_, thresh = cv2.threshold(diff, 50, 255, cv2.THRESH_BINARY)
|
14 |
+
|
15 |
+
# Find contours of the anomalies
|
16 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
17 |
+
|
18 |
+
# Draw contours on the original frame
|
19 |
+
for contour in contours:
|
20 |
+
if cv2.contourArea(contour) > 500: # Filter small contours
|
21 |
+
x, y, w, h = cv2.boundingRect(contour)
|
22 |
+
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 2)
|
23 |
+
|
24 |
+
return frame1
|
25 |
+
|
26 |
+
# Initialize video capture (0 is usually the default camera)
|
27 |
+
cap = cv2.VideoCapture(0)
|
28 |
+
|
29 |
+
# Read the first frame to initialize the previous frame
|
30 |
+
ret, prev_frame = cap.read()
|
31 |
+
|
32 |
+
while True:
|
33 |
+
# Read the current frame
|
34 |
+
ret, curr_frame = cap.read()
|
35 |
+
if not ret:
|
36 |
+
break
|
37 |
+
|
38 |
+
# Detect anomalies between previous and current frame
|
39 |
+
result_frame = detect_anomalies(prev_frame, curr_frame)
|
40 |
+
|
41 |
+
# Display the result
|
42 |
+
cv2.imshow('Anomalies Detected', result_frame)
|
43 |
+
|
44 |
+
# Update previous frame
|
45 |
+
prev_frame = curr_frame
|
46 |
+
|
47 |
+
# Exit on 'q' key press
|
48 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
49 |
+
break
|
50 |
+
|
51 |
+
# Release video capture and close windows
|
52 |
+
cap.release()
|
53 |
+
cv2.destroyAllWindows()
|
untitled22.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 16:08:30 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
def detect_anomalies(frame1, frame2, min_contour_area=1, threshold_value=7):
|
13 |
+
"""
|
14 |
+
Detects anomalies between two frames and highlights them.
|
15 |
+
|
16 |
+
Parameters:
|
17 |
+
- frame1: The previous frame.
|
18 |
+
- frame2: The current frame.
|
19 |
+
- min_contour_area: Minimum area for a contour to be considered an anomaly.
|
20 |
+
- threshold_value: Threshold value for binary conversion.
|
21 |
+
|
22 |
+
Returns:
|
23 |
+
- The frame with anomalies highlighted.
|
24 |
+
"""
|
25 |
+
# Convert images to grayscale
|
26 |
+
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
|
27 |
+
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
|
28 |
+
|
29 |
+
# Compute the absolute difference between the two images
|
30 |
+
diff = cv2.absdiff(gray1, gray2)
|
31 |
+
|
32 |
+
# Apply GaussianBlur to reduce noise and improve thresholding
|
33 |
+
blurred = cv2.GaussianBlur(diff, (5, 5), 0)
|
34 |
+
|
35 |
+
# Threshold the difference to get a binary image
|
36 |
+
_, thresh = cv2.threshold(blurred, threshold_value, 255, cv2.THRESH_BINARY)
|
37 |
+
|
38 |
+
# Find contours of the anomalies
|
39 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
40 |
+
|
41 |
+
# Draw contours on the original frame
|
42 |
+
for contour in contours:
|
43 |
+
if cv2.contourArea(contour) > min_contour_area: # Filter small contours
|
44 |
+
x, y, w, h = cv2.boundingRect(contour)
|
45 |
+
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 1)
|
46 |
+
|
47 |
+
return frame1
|
48 |
+
|
49 |
+
def main():
|
50 |
+
# Initialize video capture (0 is usually the default camera)
|
51 |
+
cap = cv2.VideoCapture(0)
|
52 |
+
|
53 |
+
# Check if the camera opened successfully
|
54 |
+
if not cap.isOpened():
|
55 |
+
print("Error: Could not open video capture.")
|
56 |
+
return
|
57 |
+
|
58 |
+
# Read the first frame to initialize the previous frame
|
59 |
+
ret, prev_frame = cap.read()
|
60 |
+
if not ret:
|
61 |
+
print("Error: Could not read initial frame.")
|
62 |
+
cap.release()
|
63 |
+
return
|
64 |
+
|
65 |
+
while True:
|
66 |
+
# Read the current frame
|
67 |
+
ret, curr_frame = cap.read()
|
68 |
+
if not ret:
|
69 |
+
print("Error: Could not read frame.")
|
70 |
+
break
|
71 |
+
|
72 |
+
# Detect anomalies between previous and current frame
|
73 |
+
result_frame = detect_anomalies(prev_frame, curr_frame)
|
74 |
+
|
75 |
+
# Display the result
|
76 |
+
cv2.imshow('Anomalies Detected', result_frame)
|
77 |
+
|
78 |
+
# Update previous frame
|
79 |
+
prev_frame = curr_frame
|
80 |
+
|
81 |
+
# Exit on 'q' key press
|
82 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
83 |
+
break
|
84 |
+
|
85 |
+
# Release video capture and close windows
|
86 |
+
cap.release()
|
87 |
+
cv2.destroyAllWindows()
|
88 |
+
|
89 |
+
if __name__ == "__main__":
|
90 |
+
main()
|
untitled23.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sun Aug 4 16:11:43 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
def detect_anomalies(frame1, frame2, min_contour_area=1, threshold_value=8, blur_ksize=(5, 5)):
|
13 |
+
"""
|
14 |
+
Detects anomalies between two frames with high sensitivity.
|
15 |
+
|
16 |
+
Parameters:
|
17 |
+
- frame1: The previous frame.
|
18 |
+
- frame2: The current frame.
|
19 |
+
- min_contour_area: Minimum area for a contour to be considered an anomaly.
|
20 |
+
- threshold_value: Threshold value for binary conversion.
|
21 |
+
- blur_ksize: Kernel size for Gaussian blur.
|
22 |
+
|
23 |
+
Returns:
|
24 |
+
- The frame with anomalies highlighted.
|
25 |
+
"""
|
26 |
+
# Convert images to grayscale
|
27 |
+
gray1 = cv2.cvtColor(frame1, cv2.COLOR_BGR2GRAY)
|
28 |
+
gray2 = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
|
29 |
+
|
30 |
+
# Compute the absolute difference between the two images
|
31 |
+
diff = cv2.absdiff(gray1, gray2)
|
32 |
+
|
33 |
+
# Apply GaussianBlur to reduce noise and improve thresholding
|
34 |
+
blurred = cv2.GaussianBlur(diff, blur_ksize, 0)
|
35 |
+
|
36 |
+
# Threshold the difference to get a binary image
|
37 |
+
_, thresh = cv2.threshold(blurred, threshold_value, 255, cv2.THRESH_BINARY)
|
38 |
+
|
39 |
+
# Find contours of the anomalies
|
40 |
+
contours, _ = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
41 |
+
|
42 |
+
# Draw contours on the original frame
|
43 |
+
for contour in contours:
|
44 |
+
if cv2.contourArea(contour) > min_contour_area: # Filter small contours
|
45 |
+
x, y, w, h = cv2.boundingRect(contour)
|
46 |
+
cv2.rectangle(frame1, (x, y), (x+w, y+h), (0, 255, 0), 1)
|
47 |
+
|
48 |
+
return frame1
|
49 |
+
|
50 |
+
def main():
|
51 |
+
# Initialize video capture (0 is usually the default camera)
|
52 |
+
cap = cv2.VideoCapture(0)
|
53 |
+
|
54 |
+
# Check if the camera opened successfully
|
55 |
+
if not cap.isOpened():
|
56 |
+
print("Error: Could not open video capture.")
|
57 |
+
return
|
58 |
+
|
59 |
+
# Read the first frame to initialize the previous frame
|
60 |
+
ret, prev_frame = cap.read()
|
61 |
+
if not ret:
|
62 |
+
print("Error: Could not read initial frame.")
|
63 |
+
cap.release()
|
64 |
+
return
|
65 |
+
|
66 |
+
while True:
|
67 |
+
# Read the current frame
|
68 |
+
ret, curr_frame = cap.read()
|
69 |
+
if not ret:
|
70 |
+
print("Error: Could not read frame.")
|
71 |
+
break
|
72 |
+
|
73 |
+
# Detect anomalies between previous and current frame
|
74 |
+
result_frame = detect_anomalies(prev_frame, curr_frame)
|
75 |
+
|
76 |
+
# Display the result
|
77 |
+
cv2.imshow('Anomalies Detected', result_frame)
|
78 |
+
|
79 |
+
# Update previous frame
|
80 |
+
prev_frame = curr_frame
|
81 |
+
|
82 |
+
# Exit on 'q' key press
|
83 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
84 |
+
break
|
85 |
+
|
86 |
+
# Release video capture and close windows
|
87 |
+
cap.release()
|
88 |
+
cv2.destroyAllWindows()
|
89 |
+
|
90 |
+
if __name__ == "__main__":
|
91 |
+
main()
|
untitled3.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
from skimage import exposure, filters
|
4 |
+
|
5 |
+
def adjust_brightness_contrast_gray(image, alpha=2.0, beta=50):
|
6 |
+
"""افزایش روشنایی و کنتراست تصویر خاکستری"""
|
7 |
+
return cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
|
8 |
+
|
9 |
+
def gamma_correction_gray(image, gamma=2.0):
|
10 |
+
"""تصحیح گاما برای تنظیم روشنایی و کنتراست کلی در تصاویر خاکستری"""
|
11 |
+
image = image / 255.0
|
12 |
+
image = np.power(image, gamma)
|
13 |
+
return (image * 255).astype(np.uint8)
|
14 |
+
|
15 |
+
def apply_clahe_gray(image):
|
16 |
+
"""اعمال CLAHE برای بهبود کنتراست محلی در تصاویر خاکستری"""
|
17 |
+
clahe = cv2.createCLAHE(clipLimit=8.0, tileGridSize=(1, 1))
|
18 |
+
clahe_image = clahe.apply(image)
|
19 |
+
return clahe_image
|
20 |
+
|
21 |
+
def enhance_details_gray(image):
|
22 |
+
"""افزایش جزئیات با استفاده از فیلتر شارپنینگ در تصاویر خاکستری"""
|
23 |
+
kernel = np.array([[0, -1, 0],
|
24 |
+
[-1, 5, -1],
|
25 |
+
[0, -1, 0]])
|
26 |
+
sharpened_image = cv2.filter2D(image, -1, kernel)
|
27 |
+
return sharpened_image
|
28 |
+
|
29 |
+
def reduce_noise_gray(image):
|
30 |
+
"""کاهش نویز با استفاده از فیلتر نهایی"""
|
31 |
+
return cv2.fastNlMeansDenoising(image, None, h=10, templateWindowSize=7, searchWindowSize=21)
|
32 |
+
|
33 |
+
def improve_low_light_gray(image):
|
34 |
+
"""بهبود تصویر خاکستری در شرایط نور کم"""
|
35 |
+
# مرحله 1: افزایش روشنایی و کنتراست اولیه
|
36 |
+
bright_image = adjust_brightness_contrast_gray(image, alpha=5.0, beta=10)
|
37 |
+
|
38 |
+
# مرحله 2: تصحیح گاما
|
39 |
+
gamma_corrected_image = gamma_correction_gray(bright_image, gamma=1.0)
|
40 |
+
|
41 |
+
# مرحله 3: افزایش کنتراست با CLAHE
|
42 |
+
clahe_image = apply_clahe_gray(gamma_corrected_image)
|
43 |
+
|
44 |
+
# مرحله 4: افزایش جزئیات با فیلتر شارپنینگ
|
45 |
+
detailed_image = enhance_details_gray(clahe_image)
|
46 |
+
|
47 |
+
# مرحله 5: کاهش نویز
|
48 |
+
denoised_image = reduce_noise_gray(detailed_image)
|
49 |
+
|
50 |
+
return denoised_image
|
51 |
+
|
52 |
+
def main():
|
53 |
+
"""اتصال به دوربین و نمایش تصویر بهبود یافته"""
|
54 |
+
cap = cv2.VideoCapture(0)
|
55 |
+
|
56 |
+
if not cap.isOpened():
|
57 |
+
print("خطا: دوربین باز نشد!")
|
58 |
+
return
|
59 |
+
|
60 |
+
while True:
|
61 |
+
# خواندن فریم از دوربین
|
62 |
+
ret, frame = cap.read()
|
63 |
+
|
64 |
+
if not ret:
|
65 |
+
print("خطا: نمیتوان فریم را خواند!")
|
66 |
+
break
|
67 |
+
|
68 |
+
# تبدیل فریم به خاکستری
|
69 |
+
gray_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
|
70 |
+
|
71 |
+
# بهبود تصویر خاکستری در شرایط نور کم
|
72 |
+
improved_frame = improve_low_light_gray(gray_frame)
|
73 |
+
|
74 |
+
# نمایش تصویر اصلی و تصویر بهبود یافته
|
75 |
+
cv2.imshow('Original Frame', gray_frame)
|
76 |
+
cv2.imshow('Improved Frame', improved_frame)
|
77 |
+
|
78 |
+
# خروج از برنامه با فشار دادن کلید 'q'
|
79 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
80 |
+
break
|
81 |
+
|
82 |
+
# آزادسازی منابع و بستن پنجرهها
|
83 |
+
cap.release()
|
84 |
+
cv2.destroyAllWindows()
|
85 |
+
|
86 |
+
if __name__ == "__main__":
|
87 |
+
main()
|
untitled4.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Aug 3 19:32:54 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
from skimage import exposure
|
12 |
+
|
13 |
+
def adjust_brightness_contrast(image, alpha=2.0, beta=50):
|
14 |
+
"""افزایش روشنایی و کنتراست تصویر"""
|
15 |
+
return cv2.convertScaleAbs(image, alpha=alpha, beta=beta)
|
16 |
+
|
17 |
+
def gamma_correction(image, gamma=2.0):
|
18 |
+
"""تصحیح گاما برای تنظیم روشنایی و کنتراست کلی"""
|
19 |
+
image = image / 255.0
|
20 |
+
image = np.power(image, gamma)
|
21 |
+
return (image * 255).astype(np.uint8)
|
22 |
+
|
23 |
+
def apply_clahe(image):
|
24 |
+
"""اعمال CLAHE برای بهبود کنتراست محلی"""
|
25 |
+
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
|
26 |
+
return clahe.apply(image)
|
27 |
+
|
28 |
+
def enhance_low_light(image):
|
29 |
+
"""بهبود تصویر در شرایط کمنور"""
|
30 |
+
# مرحله 1: تبدیل به خاکستری
|
31 |
+
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
|
32 |
+
|
33 |
+
# مرحله 2: افزایش روشنایی و کنتراست
|
34 |
+
bright_contrast_image = adjust_brightness_contrast(gray_image, alpha=1.5, beta=70)
|
35 |
+
|
36 |
+
# مرحله 3: تصحیح گاما
|
37 |
+
gamma_corrected_image = gamma_correction(bright_contrast_image, gamma=5)
|
38 |
+
|
39 |
+
# مرحله 4: افزایش کنتراست محلی با CLAHE
|
40 |
+
clahe_image = apply_clahe(gamma_corrected_image)
|
41 |
+
|
42 |
+
return clahe_image
|
43 |
+
|
44 |
+
def main():
|
45 |
+
"""اتصال به دوربین و نمایش تصویر بهبود یافته"""
|
46 |
+
cap = cv2.VideoCapture(0)
|
47 |
+
|
48 |
+
if not cap.isOpened():
|
49 |
+
print("خطا: دوربین باز نشد!")
|
50 |
+
return
|
51 |
+
|
52 |
+
while True:
|
53 |
+
# خواندن فریم از دوربین
|
54 |
+
ret, frame = cap.read()
|
55 |
+
|
56 |
+
if not ret:
|
57 |
+
print("خطا: نمیتوان فریم را خواند!")
|
58 |
+
break
|
59 |
+
|
60 |
+
# بهبود تصویر در شرایط کمنور
|
61 |
+
improved_frame = enhance_low_light(frame)
|
62 |
+
|
63 |
+
# نمایش تصویر اصلی و تصویر بهبود یافته
|
64 |
+
cv2.imshow('Original Frame', cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY))
|
65 |
+
cv2.imshow('Improved Frame', improved_frame)
|
66 |
+
|
67 |
+
# خروج از برنامه با فشار دادن کلید 'q'
|
68 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
69 |
+
break
|
70 |
+
|
71 |
+
# آزادسازی منابع و بستن پنجرهها
|
72 |
+
cap.release()
|
73 |
+
cv2.destroyAllWindows()
|
74 |
+
|
75 |
+
if __name__ == "__main__":
|
76 |
+
main()
|
untitled6.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
def main():
|
5 |
+
# ایجاد شیء VideoCapture برای دسترسی به دوربین
|
6 |
+
cap = cv2.VideoCapture(0)
|
7 |
+
|
8 |
+
# ایجاد مدل پسزمینه با استفاده از MOG2
|
9 |
+
backSub = cv2.createBackgroundSubtractorMOG2(history=500, varThreshold=16, detectShadows=True)
|
10 |
+
|
11 |
+
if not cap.isOpened():
|
12 |
+
print("خطا در باز کردن دوربین")
|
13 |
+
return
|
14 |
+
|
15 |
+
while True:
|
16 |
+
# خواندن فریم از دوربین
|
17 |
+
ret, frame = cap.read()
|
18 |
+
|
19 |
+
if not ret:
|
20 |
+
print("خطا در خواندن فریم")
|
21 |
+
break
|
22 |
+
|
23 |
+
# پردازش تصویر با مدل پسزمینه
|
24 |
+
fgMask = backSub.apply(frame)
|
25 |
+
|
26 |
+
# استفاده از فیلتر مورفولوژیکی برای بهبود نتایج
|
27 |
+
kernel = np.ones((5, 5), np.uint8)
|
28 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
|
29 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
|
30 |
+
|
31 |
+
# پیدا کردن کانتورهای موجود در تصویر باینری
|
32 |
+
contours, _ = cv2.findContours(fgMask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
33 |
+
|
34 |
+
# کشیدن مربع دور هر کانتور
|
35 |
+
for contour in contours:
|
36 |
+
if cv2.contourArea(contour) > 500: # برای فیلتر کردن کوچکترین کانتورها
|
37 |
+
x, y, w, h = cv2.boundingRect(contour)
|
38 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
39 |
+
|
40 |
+
# نمایش فریم اصلی و ماسک پسزمینه
|
41 |
+
cv2.imshow('Frame', frame)
|
42 |
+
cv2.imshow('Foreground Mask', fgMask)
|
43 |
+
|
44 |
+
# خروج از حلقه با فشار دادن کلید 'q'
|
45 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
46 |
+
break
|
47 |
+
|
48 |
+
# آزادسازی منابع
|
49 |
+
cap.release()
|
50 |
+
cv2.destroyAllWindows()
|
51 |
+
|
52 |
+
if __name__ == "__main__":
|
53 |
+
main()
|
untitled7.py
ADDED
@@ -0,0 +1,64 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Aug 3 21:01:48 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
def main():
|
13 |
+
# ایجاد شیء VideoCapture برای دسترسی به دوربین
|
14 |
+
cap = cv2.VideoCapture(0)
|
15 |
+
|
16 |
+
# ایجاد مدل پسزمینه با استفاده از KNN
|
17 |
+
backSub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=400.0, detectShadows=True)
|
18 |
+
|
19 |
+
if not cap.isOpened():
|
20 |
+
print("خطا در باز کردن دوربین")
|
21 |
+
return
|
22 |
+
|
23 |
+
while True:
|
24 |
+
# خواندن فریم از دوربین
|
25 |
+
ret, frame = cap.read()
|
26 |
+
|
27 |
+
if not ret:
|
28 |
+
print("خطا در خواندن فریم")
|
29 |
+
break
|
30 |
+
|
31 |
+
# پردازش تصویر با مدل پسزمینه
|
32 |
+
fgMask = backSub.apply(frame)
|
33 |
+
|
34 |
+
# استفاده از فیلترهای مورفولوژیکی برای بهبود نتایج
|
35 |
+
kernel = np.ones((5, 5), np.uint8)
|
36 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
|
37 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
|
38 |
+
|
39 |
+
# استفاده از فیلتر گوسی برای کاهش نویز
|
40 |
+
blurred = cv2.GaussianBlur(fgMask, (5, 5), 0)
|
41 |
+
|
42 |
+
# پیدا کردن کانتورهای موجود در تصویر باینری
|
43 |
+
contours, _ = cv2.findContours(blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
44 |
+
|
45 |
+
# کشیدن مربع دور هر کانتور
|
46 |
+
for contour in contours:
|
47 |
+
if cv2.contourArea(contour) > 500: # برای فیلتر کردن کوچکترین کانتورها
|
48 |
+
x, y, w, h = cv2.boundingRect(contour)
|
49 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
50 |
+
|
51 |
+
# نمایش فریم اصلی و ماسک پسزمینه
|
52 |
+
cv2.imshow('Frame', frame)
|
53 |
+
cv2.imshow('Foreground Mask', fgMask)
|
54 |
+
|
55 |
+
# خروج از حلقه با فشار دادن کلید 'q'
|
56 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
57 |
+
break
|
58 |
+
|
59 |
+
# آزادسازی منابع
|
60 |
+
cap.release()
|
61 |
+
cv2.destroyAllWindows()
|
62 |
+
|
63 |
+
if __name__ == "__main__":
|
64 |
+
main()
|
untitled9.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python3
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
Created on Sat Aug 3 21:01:48 2024
|
5 |
+
|
6 |
+
@author: ysnrfd
|
7 |
+
"""
|
8 |
+
|
9 |
+
import cv2
|
10 |
+
import numpy as np
|
11 |
+
|
12 |
+
def main():
|
13 |
+
# Initialize video capture
|
14 |
+
cap = cv2.VideoCapture(0)
|
15 |
+
|
16 |
+
# Create background subtractor with KNN
|
17 |
+
backSub = cv2.createBackgroundSubtractorKNN(history=500, dist2Threshold=.512, detectShadows=True)
|
18 |
+
|
19 |
+
if not cap.isOpened():
|
20 |
+
print("Error: Unable to open camera.")
|
21 |
+
return
|
22 |
+
|
23 |
+
try:
|
24 |
+
while True:
|
25 |
+
# Read frame from the camera
|
26 |
+
ret, frame = cap.read()
|
27 |
+
|
28 |
+
if not ret:
|
29 |
+
print("Error: Unable to read frame.")
|
30 |
+
break
|
31 |
+
|
32 |
+
# Apply background subtraction
|
33 |
+
fgMask = backSub.apply(frame)
|
34 |
+
|
35 |
+
# Apply morphological operations
|
36 |
+
kernel = np.ones((5, 5), np.uint8)
|
37 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_CLOSE, kernel)
|
38 |
+
fgMask = cv2.morphologyEx(fgMask, cv2.MORPH_OPEN, kernel)
|
39 |
+
|
40 |
+
# Apply Gaussian blur to reduce noise
|
41 |
+
blurred = cv2.GaussianBlur(fgMask, (5, 5), 0)
|
42 |
+
|
43 |
+
# Find contours
|
44 |
+
contours, _ = cv2.findContours(blurred, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
|
45 |
+
|
46 |
+
# Draw bounding boxes around detected objects
|
47 |
+
for contour in contours:
|
48 |
+
if cv2.contourArea(contour) > 500: # Filter out small contours
|
49 |
+
x, y, w, h = cv2.boundingRect(contour)
|
50 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
|
51 |
+
|
52 |
+
# Display the results
|
53 |
+
cv2.imshow('Frame', frame)
|
54 |
+
cv2.imshow('Foreground Mask', fgMask)
|
55 |
+
|
56 |
+
# Exit loop if 'q' is pressed
|
57 |
+
if cv2.waitKey(1) & 0xFF == ord('q'):
|
58 |
+
break
|
59 |
+
|
60 |
+
finally:
|
61 |
+
# Release resources
|
62 |
+
cap.release()
|
63 |
+
cv2.destroyAllWindows()
|
64 |
+
|
65 |
+
if __name__ == "__main__":
|
66 |
+
main()
|