diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000000000000000000000000000000000000..fbdd43f320cb9a28e2aab4e684affe3a1fc6ab5f --- /dev/null +++ b/.gitignore @@ -0,0 +1,2 @@ +temps/* +!temps/.gitkeep \ No newline at end of file diff --git a/app.py b/app.py new file mode 100644 index 0000000000000000000000000000000000000000..feecd4b64ea8e795273d29769fde0fdef225f0d7 --- /dev/null +++ b/app.py @@ -0,0 +1,72 @@ +import base64 +import json +import os, shutil +import re +import time +import uuid + +import cv2 + +import numpy as np +import streamlit as st +from PIL import Image +# from extract_video import extract_method_single_video + +import shlex +import subprocess + +def main(): + st.markdown("###") + uploaded_file = st.file_uploader('Upload a picture', type=['mp4', 'jpg', 'jpeg', 'png'], accept_multiple_files=False) + if uploaded_file: + random_id = uuid.uuid1() + filename = "{}.{}".format(random_id, uploaded_file.type.split("/")[-1]) + file_type = uploaded_file.type.split("/")[0] + + if uploaded_file.type == 'video/mp4': + with open(f"temps/{filename}", mode='wb') as f: + f.write(uploaded_file.read()) + st.video(uploaded_file) + pass + else: + img = Image.open(uploaded_file).convert('RGB') + ext = uploaded_file.type.split("/")[-1] + with open(f"temps/{filename}", mode='wb') as f: + f.write(uploaded_file.getbuffer()) + st.image(img) + + + + with st.spinner(f'Processing {file_type}...'): + subprocess.run(shlex.split(f"python.exe extract_video.py --device cuda --max_frames 50 --bs 2 --frame_interval 5 --confidence_threshold 0.997 --data_path temps/{filename}")) + st.text(f'1. Processing {file_type} ✅') + with st.spinner(f'Analyzing {file_type}...'): + pred = subprocess.run(shlex.split(f"python inference.py --weight weights/model_params_ffpp_c23.pickle --device cuda --image_folder temps/images/{filename}"), capture_output=True) + st.text(f'2. Analyzing {file_type} ✅') + + print(pred) + try: + fake_probability = float(pred.stdout.decode('utf-8').split('Mean prediction: ')[-1]) + if fake_probability > 0.6: + st.error(' FAKE! ', icon="🚨") + else: + st.success(" REAL FOOTAGE! ", icon="✅") + st.text("fake probability {:.2f}".format(fake_probability)) + + os.remove(f"temps/{filename}") + folder_name = ".".join(filename.split(".")[:-1]) + shutil.rmtree(f"temps/images/{folder_name}") + except: + st.text(pred.stdout.decode('utf-8')) + + st.text("") + st.text(pred) + + + +if __name__ == "__main__": + st.set_page_config( + page_title="Nodeflux Deepfake Detection", page_icon=":pencil2:" + ) + st.title("Deepfake Detection") + main() \ No newline at end of file diff --git a/data/FDDB/img_list.txt b/data/FDDB/img_list.txt new file mode 100644 index 0000000000000000000000000000000000000000..5cf3d3199ca5c9c5ef4a904f1b9c89b821a7978a --- /dev/null +++ b/data/FDDB/img_list.txt @@ -0,0 +1,2845 @@ +2002/08/11/big/img_591 +2002/08/26/big/img_265 +2002/07/19/big/img_423 +2002/08/24/big/img_490 +2002/08/31/big/img_17676 +2002/07/31/big/img_228 +2002/07/24/big/img_402 +2002/08/04/big/img_769 +2002/07/19/big/img_581 +2002/08/13/big/img_723 +2002/08/12/big/img_821 +2003/01/17/big/img_610 +2002/08/13/big/img_1116 +2002/08/28/big/img_19238 +2002/08/21/big/img_660 +2002/08/14/big/img_607 +2002/08/05/big/img_3708 +2002/08/19/big/img_511 +2002/08/07/big/img_1316 +2002/07/25/big/img_1047 +2002/07/23/big/img_474 +2002/07/27/big/img_970 +2002/09/02/big/img_15752 +2002/09/01/big/img_16378 +2002/09/01/big/img_16189 +2002/08/26/big/img_276 +2002/07/24/big/img_518 +2002/08/14/big/img_1027 +2002/08/24/big/img_733 +2002/08/15/big/img_249 +2003/01/15/big/img_1371 +2002/08/07/big/img_1348 +2003/01/01/big/img_331 +2002/08/23/big/img_536 +2002/07/30/big/img_224 +2002/08/10/big/img_763 +2002/08/21/big/img_293 +2002/08/15/big/img_1211 +2002/08/15/big/img_1194 +2003/01/15/big/img_390 +2002/08/06/big/img_2893 +2002/08/17/big/img_691 +2002/08/07/big/img_1695 +2002/08/16/big/img_829 +2002/07/25/big/img_201 +2002/08/23/big/img_36 +2003/01/15/big/img_763 +2003/01/15/big/img_637 +2002/08/22/big/img_592 +2002/07/25/big/img_817 +2003/01/15/big/img_1219 +2002/08/05/big/img_3508 +2002/08/15/big/img_1108 +2002/07/19/big/img_488 +2003/01/16/big/img_704 +2003/01/13/big/img_1087 +2002/08/10/big/img_670 +2002/07/24/big/img_104 +2002/08/27/big/img_19823 +2002/09/01/big/img_16229 +2003/01/13/big/img_846 +2002/08/04/big/img_412 +2002/07/22/big/img_554 +2002/08/12/big/img_331 +2002/08/02/big/img_533 +2002/08/12/big/img_259 +2002/08/18/big/img_328 +2003/01/14/big/img_630 +2002/08/05/big/img_3541 +2002/08/06/big/img_2390 +2002/08/20/big/img_150 +2002/08/02/big/img_1231 +2002/08/16/big/img_710 +2002/08/19/big/img_591 +2002/07/22/big/img_725 +2002/07/24/big/img_820 +2003/01/13/big/img_568 +2002/08/22/big/img_853 +2002/08/09/big/img_648 +2002/08/23/big/img_528 +2003/01/14/big/img_888 +2002/08/30/big/img_18201 +2002/08/13/big/img_965 +2003/01/14/big/img_660 +2002/07/19/big/img_517 +2003/01/14/big/img_406 +2002/08/30/big/img_18433 +2002/08/07/big/img_1630 +2002/08/06/big/img_2717 +2002/08/21/big/img_470 +2002/07/23/big/img_633 +2002/08/20/big/img_915 +2002/08/16/big/img_893 +2002/07/29/big/img_644 +2002/08/15/big/img_529 +2002/08/16/big/img_668 +2002/08/07/big/img_1871 +2002/07/25/big/img_192 +2002/07/31/big/img_961 +2002/08/19/big/img_738 +2002/07/31/big/img_382 +2002/08/19/big/img_298 +2003/01/17/big/img_608 +2002/08/21/big/img_514 +2002/07/23/big/img_183 +2003/01/17/big/img_536 +2002/07/24/big/img_478 +2002/08/06/big/img_2997 +2002/09/02/big/img_15380 +2002/08/07/big/img_1153 +2002/07/31/big/img_967 +2002/07/31/big/img_711 +2002/08/26/big/img_664 +2003/01/01/big/img_326 +2002/08/24/big/img_775 +2002/08/08/big/img_961 +2002/08/16/big/img_77 +2002/08/12/big/img_296 +2002/07/22/big/img_905 +2003/01/13/big/img_284 +2002/08/13/big/img_887 +2002/08/24/big/img_849 +2002/07/30/big/img_345 +2002/08/18/big/img_419 +2002/08/01/big/img_1347 +2002/08/05/big/img_3670 +2002/07/21/big/img_479 +2002/08/08/big/img_913 +2002/09/02/big/img_15828 +2002/08/30/big/img_18194 +2002/08/08/big/img_471 +2002/08/22/big/img_734 +2002/08/09/big/img_586 +2002/08/09/big/img_454 +2002/07/29/big/img_47 +2002/07/19/big/img_381 +2002/07/29/big/img_733 +2002/08/20/big/img_327 +2002/07/21/big/img_96 +2002/08/06/big/img_2680 +2002/07/25/big/img_919 +2002/07/21/big/img_158 +2002/07/22/big/img_801 +2002/07/22/big/img_567 +2002/07/24/big/img_804 +2002/07/24/big/img_690 +2003/01/15/big/img_576 +2002/08/14/big/img_335 +2003/01/13/big/img_390 +2002/08/11/big/img_258 +2002/07/23/big/img_917 +2002/08/15/big/img_525 +2003/01/15/big/img_505 +2002/07/30/big/img_886 +2003/01/16/big/img_640 +2003/01/14/big/img_642 +2003/01/17/big/img_844 +2002/08/04/big/img_571 +2002/08/29/big/img_18702 +2003/01/15/big/img_240 +2002/07/29/big/img_553 +2002/08/10/big/img_354 +2002/08/18/big/img_17 +2003/01/15/big/img_782 +2002/07/27/big/img_382 +2002/08/14/big/img_970 +2003/01/16/big/img_70 +2003/01/16/big/img_625 +2002/08/18/big/img_341 +2002/08/26/big/img_188 +2002/08/09/big/img_405 +2002/08/02/big/img_37 +2002/08/13/big/img_748 +2002/07/22/big/img_399 +2002/07/25/big/img_844 +2002/08/12/big/img_340 +2003/01/13/big/img_815 +2002/08/26/big/img_5 +2002/08/10/big/img_158 +2002/08/18/big/img_95 +2002/07/29/big/img_1297 +2003/01/13/big/img_508 +2002/09/01/big/img_16680 +2003/01/16/big/img_338 +2002/08/13/big/img_517 +2002/07/22/big/img_626 +2002/08/06/big/img_3024 +2002/07/26/big/img_499 +2003/01/13/big/img_387 +2002/08/31/big/img_18025 +2002/08/13/big/img_520 +2003/01/16/big/img_576 +2002/07/26/big/img_121 +2002/08/25/big/img_703 +2002/08/26/big/img_615 +2002/08/17/big/img_434 +2002/08/02/big/img_677 +2002/08/18/big/img_276 +2002/08/05/big/img_3672 +2002/07/26/big/img_700 +2002/07/31/big/img_277 +2003/01/14/big/img_220 +2002/08/23/big/img_232 +2002/08/31/big/img_17422 +2002/07/22/big/img_508 +2002/08/13/big/img_681 +2003/01/15/big/img_638 +2002/08/30/big/img_18408 +2003/01/14/big/img_533 +2003/01/17/big/img_12 +2002/08/28/big/img_19388 +2002/08/08/big/img_133 +2002/07/26/big/img_885 +2002/08/19/big/img_387 +2002/08/27/big/img_19976 +2002/08/26/big/img_118 +2002/08/28/big/img_19146 +2002/08/05/big/img_3259 +2002/08/15/big/img_536 +2002/07/22/big/img_279 +2002/07/22/big/img_9 +2002/08/13/big/img_301 +2002/08/15/big/img_974 +2002/08/06/big/img_2355 +2002/08/01/big/img_1526 +2002/08/03/big/img_417 +2002/08/04/big/img_407 +2002/08/15/big/img_1029 +2002/07/29/big/img_700 +2002/08/01/big/img_1463 +2002/08/31/big/img_17365 +2002/07/28/big/img_223 +2002/07/19/big/img_827 +2002/07/27/big/img_531 +2002/07/19/big/img_845 +2002/08/20/big/img_382 +2002/07/31/big/img_268 +2002/08/27/big/img_19705 +2002/08/02/big/img_830 +2002/08/23/big/img_250 +2002/07/20/big/img_777 +2002/08/21/big/img_879 +2002/08/26/big/img_20146 +2002/08/23/big/img_789 +2002/08/06/big/img_2683 +2002/08/25/big/img_576 +2002/08/09/big/img_498 +2002/08/08/big/img_384 +2002/08/26/big/img_592 +2002/07/29/big/img_1470 +2002/08/21/big/img_452 +2002/08/30/big/img_18395 +2002/08/15/big/img_215 +2002/07/21/big/img_643 +2002/07/22/big/img_209 +2003/01/17/big/img_346 +2002/08/25/big/img_658 +2002/08/21/big/img_221 +2002/08/14/big/img_60 +2003/01/17/big/img_885 +2003/01/16/big/img_482 +2002/08/19/big/img_593 +2002/08/08/big/img_233 +2002/07/30/big/img_458 +2002/07/23/big/img_384 +2003/01/15/big/img_670 +2003/01/15/big/img_267 +2002/08/26/big/img_540 +2002/07/29/big/img_552 +2002/07/30/big/img_997 +2003/01/17/big/img_377 +2002/08/21/big/img_265 +2002/08/09/big/img_561 +2002/07/31/big/img_945 +2002/09/02/big/img_15252 +2002/08/11/big/img_276 +2002/07/22/big/img_491 +2002/07/26/big/img_517 +2002/08/14/big/img_726 +2002/08/08/big/img_46 +2002/08/28/big/img_19458 +2002/08/06/big/img_2935 +2002/07/29/big/img_1392 +2002/08/13/big/img_776 +2002/08/24/big/img_616 +2002/08/14/big/img_1065 +2002/07/29/big/img_889 +2002/08/18/big/img_188 +2002/08/07/big/img_1453 +2002/08/02/big/img_760 +2002/07/28/big/img_416 +2002/08/07/big/img_1393 +2002/08/26/big/img_292 +2002/08/26/big/img_301 +2003/01/13/big/img_195 +2002/07/26/big/img_532 +2002/08/20/big/img_550 +2002/08/05/big/img_3658 +2002/08/26/big/img_738 +2002/09/02/big/img_15750 +2003/01/17/big/img_451 +2002/07/23/big/img_339 +2002/08/16/big/img_637 +2002/08/14/big/img_748 +2002/08/06/big/img_2739 +2002/07/25/big/img_482 +2002/08/19/big/img_191 +2002/08/26/big/img_537 +2003/01/15/big/img_716 +2003/01/15/big/img_767 +2002/08/02/big/img_452 +2002/08/08/big/img_1011 +2002/08/10/big/img_144 +2003/01/14/big/img_122 +2002/07/24/big/img_586 +2002/07/24/big/img_762 +2002/08/20/big/img_369 +2002/07/30/big/img_146 +2002/08/23/big/img_396 +2003/01/15/big/img_200 +2002/08/15/big/img_1183 +2003/01/14/big/img_698 +2002/08/09/big/img_792 +2002/08/06/big/img_2347 +2002/07/31/big/img_911 +2002/08/26/big/img_722 +2002/08/23/big/img_621 +2002/08/05/big/img_3790 +2003/01/13/big/img_633 +2002/08/09/big/img_224 +2002/07/24/big/img_454 +2002/07/21/big/img_202 +2002/08/02/big/img_630 +2002/08/30/big/img_18315 +2002/07/19/big/img_491 +2002/09/01/big/img_16456 +2002/08/09/big/img_242 +2002/07/25/big/img_595 +2002/07/22/big/img_522 +2002/08/01/big/img_1593 +2002/07/29/big/img_336 +2002/08/15/big/img_448 +2002/08/28/big/img_19281 +2002/07/29/big/img_342 +2002/08/12/big/img_78 +2003/01/14/big/img_525 +2002/07/28/big/img_147 +2002/08/11/big/img_353 +2002/08/22/big/img_513 +2002/08/04/big/img_721 +2002/08/17/big/img_247 +2003/01/14/big/img_891 +2002/08/20/big/img_853 +2002/07/19/big/img_414 +2002/08/01/big/img_1530 +2003/01/14/big/img_924 +2002/08/22/big/img_468 +2002/08/18/big/img_354 +2002/08/30/big/img_18193 +2002/08/23/big/img_492 +2002/08/15/big/img_871 +2002/08/12/big/img_494 +2002/08/06/big/img_2470 +2002/07/23/big/img_923 +2002/08/26/big/img_155 +2002/08/08/big/img_669 +2002/07/23/big/img_404 +2002/08/28/big/img_19421 +2002/08/29/big/img_18993 +2002/08/25/big/img_416 +2003/01/17/big/img_434 +2002/07/29/big/img_1370 +2002/07/28/big/img_483 +2002/08/11/big/img_50 +2002/08/10/big/img_404 +2002/09/02/big/img_15057 +2003/01/14/big/img_911 +2002/09/01/big/img_16697 +2003/01/16/big/img_665 +2002/09/01/big/img_16708 +2002/08/22/big/img_612 +2002/08/28/big/img_19471 +2002/08/02/big/img_198 +2003/01/16/big/img_527 +2002/08/22/big/img_209 +2002/08/30/big/img_18205 +2003/01/14/big/img_114 +2003/01/14/big/img_1028 +2003/01/16/big/img_894 +2003/01/14/big/img_837 +2002/07/30/big/img_9 +2002/08/06/big/img_2821 +2002/08/04/big/img_85 +2003/01/13/big/img_884 +2002/07/22/big/img_570 +2002/08/07/big/img_1773 +2002/07/26/big/img_208 +2003/01/17/big/img_946 +2002/07/19/big/img_930 +2003/01/01/big/img_698 +2003/01/17/big/img_612 +2002/07/19/big/img_372 +2002/07/30/big/img_721 +2003/01/14/big/img_649 +2002/08/19/big/img_4 +2002/07/25/big/img_1024 +2003/01/15/big/img_601 +2002/08/30/big/img_18470 +2002/07/22/big/img_29 +2002/08/07/big/img_1686 +2002/07/20/big/img_294 +2002/08/14/big/img_800 +2002/08/19/big/img_353 +2002/08/19/big/img_350 +2002/08/05/big/img_3392 +2002/08/09/big/img_622 +2003/01/15/big/img_236 +2002/08/11/big/img_643 +2002/08/05/big/img_3458 +2002/08/12/big/img_413 +2002/08/22/big/img_415 +2002/08/13/big/img_635 +2002/08/07/big/img_1198 +2002/08/04/big/img_873 +2002/08/12/big/img_407 +2003/01/15/big/img_346 +2002/08/02/big/img_275 +2002/08/17/big/img_997 +2002/08/21/big/img_958 +2002/08/20/big/img_579 +2002/07/29/big/img_142 +2003/01/14/big/img_1115 +2002/08/16/big/img_365 +2002/07/29/big/img_1414 +2002/08/17/big/img_489 +2002/08/13/big/img_1010 +2002/07/31/big/img_276 +2002/07/25/big/img_1000 +2002/08/23/big/img_524 +2002/08/28/big/img_19147 +2003/01/13/big/img_433 +2002/08/20/big/img_205 +2003/01/01/big/img_458 +2002/07/29/big/img_1449 +2003/01/16/big/img_696 +2002/08/28/big/img_19296 +2002/08/29/big/img_18688 +2002/08/21/big/img_767 +2002/08/20/big/img_532 +2002/08/26/big/img_187 +2002/07/26/big/img_183 +2002/07/27/big/img_890 +2003/01/13/big/img_576 +2002/07/30/big/img_15 +2002/07/31/big/img_889 +2002/08/31/big/img_17759 +2003/01/14/big/img_1114 +2002/07/19/big/img_445 +2002/08/03/big/img_593 +2002/07/24/big/img_750 +2002/07/30/big/img_133 +2002/08/25/big/img_671 +2002/07/20/big/img_351 +2002/08/31/big/img_17276 +2002/08/05/big/img_3231 +2002/09/02/big/img_15882 +2002/08/14/big/img_115 +2002/08/02/big/img_1148 +2002/07/25/big/img_936 +2002/07/31/big/img_639 +2002/08/04/big/img_427 +2002/08/22/big/img_843 +2003/01/17/big/img_17 +2003/01/13/big/img_690 +2002/08/13/big/img_472 +2002/08/09/big/img_425 +2002/08/05/big/img_3450 +2003/01/17/big/img_439 +2002/08/13/big/img_539 +2002/07/28/big/img_35 +2002/08/16/big/img_241 +2002/08/06/big/img_2898 +2003/01/16/big/img_429 +2002/08/05/big/img_3817 +2002/08/27/big/img_19919 +2002/07/19/big/img_422 +2002/08/15/big/img_560 +2002/07/23/big/img_750 +2002/07/30/big/img_353 +2002/08/05/big/img_43 +2002/08/23/big/img_305 +2002/08/01/big/img_2137 +2002/08/30/big/img_18097 +2002/08/01/big/img_1389 +2002/08/02/big/img_308 +2003/01/14/big/img_652 +2002/08/01/big/img_1798 +2003/01/14/big/img_732 +2003/01/16/big/img_294 +2002/08/26/big/img_213 +2002/07/24/big/img_842 +2003/01/13/big/img_630 +2003/01/13/big/img_634 +2002/08/06/big/img_2285 +2002/08/01/big/img_2162 +2002/08/30/big/img_18134 +2002/08/02/big/img_1045 +2002/08/01/big/img_2143 +2002/07/25/big/img_135 +2002/07/20/big/img_645 +2002/08/05/big/img_3666 +2002/08/14/big/img_523 +2002/08/04/big/img_425 +2003/01/14/big/img_137 +2003/01/01/big/img_176 +2002/08/15/big/img_505 +2002/08/24/big/img_386 +2002/08/05/big/img_3187 +2002/08/15/big/img_419 +2003/01/13/big/img_520 +2002/08/04/big/img_444 +2002/08/26/big/img_483 +2002/08/05/big/img_3449 +2002/08/30/big/img_18409 +2002/08/28/big/img_19455 +2002/08/27/big/img_20090 +2002/07/23/big/img_625 +2002/08/24/big/img_205 +2002/08/08/big/img_938 +2003/01/13/big/img_527 +2002/08/07/big/img_1712 +2002/07/24/big/img_801 +2002/08/09/big/img_579 +2003/01/14/big/img_41 +2003/01/15/big/img_1130 +2002/07/21/big/img_672 +2002/08/07/big/img_1590 +2003/01/01/big/img_532 +2002/08/02/big/img_529 +2002/08/05/big/img_3591 +2002/08/23/big/img_5 +2003/01/14/big/img_882 +2002/08/28/big/img_19234 +2002/07/24/big/img_398 +2003/01/14/big/img_592 +2002/08/22/big/img_548 +2002/08/12/big/img_761 +2003/01/16/big/img_497 +2002/08/18/big/img_133 +2002/08/08/big/img_874 +2002/07/19/big/img_247 +2002/08/15/big/img_170 +2002/08/27/big/img_19679 +2002/08/20/big/img_246 +2002/08/24/big/img_358 +2002/07/29/big/img_599 +2002/08/01/big/img_1555 +2002/07/30/big/img_491 +2002/07/30/big/img_371 +2003/01/16/big/img_682 +2002/07/25/big/img_619 +2003/01/15/big/img_587 +2002/08/02/big/img_1212 +2002/08/01/big/img_2152 +2002/07/25/big/img_668 +2003/01/16/big/img_574 +2002/08/28/big/img_19464 +2002/08/11/big/img_536 +2002/07/24/big/img_201 +2002/08/05/big/img_3488 +2002/07/25/big/img_887 +2002/07/22/big/img_789 +2002/07/30/big/img_432 +2002/08/16/big/img_166 +2002/09/01/big/img_16333 +2002/07/26/big/img_1010 +2002/07/21/big/img_793 +2002/07/22/big/img_720 +2002/07/31/big/img_337 +2002/07/27/big/img_185 +2002/08/23/big/img_440 +2002/07/31/big/img_801 +2002/07/25/big/img_478 +2003/01/14/big/img_171 +2002/08/07/big/img_1054 +2002/09/02/big/img_15659 +2002/07/29/big/img_1348 +2002/08/09/big/img_337 +2002/08/26/big/img_684 +2002/07/31/big/img_537 +2002/08/15/big/img_808 +2003/01/13/big/img_740 +2002/08/07/big/img_1667 +2002/08/03/big/img_404 +2002/08/06/big/img_2520 +2002/07/19/big/img_230 +2002/07/19/big/img_356 +2003/01/16/big/img_627 +2002/08/04/big/img_474 +2002/07/29/big/img_833 +2002/07/25/big/img_176 +2002/08/01/big/img_1684 +2002/08/21/big/img_643 +2002/08/27/big/img_19673 +2002/08/02/big/img_838 +2002/08/06/big/img_2378 +2003/01/15/big/img_48 +2002/07/30/big/img_470 +2002/08/15/big/img_963 +2002/08/24/big/img_444 +2002/08/16/big/img_662 +2002/08/15/big/img_1209 +2002/07/24/big/img_25 +2002/08/06/big/img_2740 +2002/07/29/big/img_996 +2002/08/31/big/img_18074 +2002/08/04/big/img_343 +2003/01/17/big/img_509 +2003/01/13/big/img_726 +2002/08/07/big/img_1466 +2002/07/26/big/img_307 +2002/08/10/big/img_598 +2002/08/13/big/img_890 +2002/08/14/big/img_997 +2002/07/19/big/img_392 +2002/08/02/big/img_475 +2002/08/29/big/img_19038 +2002/07/29/big/img_538 +2002/07/29/big/img_502 +2002/08/02/big/img_364 +2002/08/31/big/img_17353 +2002/08/08/big/img_539 +2002/08/01/big/img_1449 +2002/07/22/big/img_363 +2002/08/02/big/img_90 +2002/09/01/big/img_16867 +2002/08/05/big/img_3371 +2002/07/30/big/img_342 +2002/08/07/big/img_1363 +2002/08/22/big/img_790 +2003/01/15/big/img_404 +2002/08/05/big/img_3447 +2002/09/01/big/img_16167 +2003/01/13/big/img_840 +2002/08/22/big/img_1001 +2002/08/09/big/img_431 +2002/07/27/big/img_618 +2002/07/31/big/img_741 +2002/07/30/big/img_964 +2002/07/25/big/img_86 +2002/07/29/big/img_275 +2002/08/21/big/img_921 +2002/07/26/big/img_892 +2002/08/21/big/img_663 +2003/01/13/big/img_567 +2003/01/14/big/img_719 +2002/07/28/big/img_251 +2003/01/15/big/img_1123 +2002/07/29/big/img_260 +2002/08/24/big/img_337 +2002/08/01/big/img_1914 +2002/08/13/big/img_373 +2003/01/15/big/img_589 +2002/08/13/big/img_906 +2002/07/26/big/img_270 +2002/08/26/big/img_313 +2002/08/25/big/img_694 +2003/01/01/big/img_327 +2002/07/23/big/img_261 +2002/08/26/big/img_642 +2002/07/29/big/img_918 +2002/07/23/big/img_455 +2002/07/24/big/img_612 +2002/07/23/big/img_534 +2002/07/19/big/img_534 +2002/07/19/big/img_726 +2002/08/01/big/img_2146 +2002/08/02/big/img_543 +2003/01/16/big/img_777 +2002/07/30/big/img_484 +2002/08/13/big/img_1161 +2002/07/21/big/img_390 +2002/08/06/big/img_2288 +2002/08/21/big/img_677 +2002/08/13/big/img_747 +2002/08/15/big/img_1248 +2002/07/31/big/img_416 +2002/09/02/big/img_15259 +2002/08/16/big/img_781 +2002/08/24/big/img_754 +2002/07/24/big/img_803 +2002/08/20/big/img_609 +2002/08/28/big/img_19571 +2002/09/01/big/img_16140 +2002/08/26/big/img_769 +2002/07/20/big/img_588 +2002/08/02/big/img_898 +2002/07/21/big/img_466 +2002/08/14/big/img_1046 +2002/07/25/big/img_212 +2002/08/26/big/img_353 +2002/08/19/big/img_810 +2002/08/31/big/img_17824 +2002/08/12/big/img_631 +2002/07/19/big/img_828 +2002/07/24/big/img_130 +2002/08/25/big/img_580 +2002/07/31/big/img_699 +2002/07/23/big/img_808 +2002/07/31/big/img_377 +2003/01/16/big/img_570 +2002/09/01/big/img_16254 +2002/07/21/big/img_471 +2002/08/01/big/img_1548 +2002/08/18/big/img_252 +2002/08/19/big/img_576 +2002/08/20/big/img_464 +2002/07/27/big/img_735 +2002/08/21/big/img_589 +2003/01/15/big/img_1192 +2002/08/09/big/img_302 +2002/07/31/big/img_594 +2002/08/23/big/img_19 +2002/08/29/big/img_18819 +2002/08/19/big/img_293 +2002/07/30/big/img_331 +2002/08/23/big/img_607 +2002/07/30/big/img_363 +2002/08/16/big/img_766 +2003/01/13/big/img_481 +2002/08/06/big/img_2515 +2002/09/02/big/img_15913 +2002/09/02/big/img_15827 +2002/09/02/big/img_15053 +2002/08/07/big/img_1576 +2002/07/23/big/img_268 +2002/08/21/big/img_152 +2003/01/15/big/img_578 +2002/07/21/big/img_589 +2002/07/20/big/img_548 +2002/08/27/big/img_19693 +2002/08/31/big/img_17252 +2002/07/31/big/img_138 +2002/07/23/big/img_372 +2002/08/16/big/img_695 +2002/07/27/big/img_287 +2002/08/15/big/img_315 +2002/08/10/big/img_361 +2002/07/29/big/img_899 +2002/08/13/big/img_771 +2002/08/21/big/img_92 +2003/01/15/big/img_425 +2003/01/16/big/img_450 +2002/09/01/big/img_16942 +2002/08/02/big/img_51 +2002/09/02/big/img_15379 +2002/08/24/big/img_147 +2002/08/30/big/img_18122 +2002/07/26/big/img_950 +2002/08/07/big/img_1400 +2002/08/17/big/img_468 +2002/08/15/big/img_470 +2002/07/30/big/img_318 +2002/07/22/big/img_644 +2002/08/27/big/img_19732 +2002/07/23/big/img_601 +2002/08/26/big/img_398 +2002/08/21/big/img_428 +2002/08/06/big/img_2119 +2002/08/29/big/img_19103 +2003/01/14/big/img_933 +2002/08/11/big/img_674 +2002/08/28/big/img_19420 +2002/08/03/big/img_418 +2002/08/17/big/img_312 +2002/07/25/big/img_1044 +2003/01/17/big/img_671 +2002/08/30/big/img_18297 +2002/07/25/big/img_755 +2002/07/23/big/img_471 +2002/08/21/big/img_39 +2002/07/26/big/img_699 +2003/01/14/big/img_33 +2002/07/31/big/img_411 +2002/08/16/big/img_645 +2003/01/17/big/img_116 +2002/09/02/big/img_15903 +2002/08/20/big/img_120 +2002/08/22/big/img_176 +2002/07/29/big/img_1316 +2002/08/27/big/img_19914 +2002/07/22/big/img_719 +2002/08/28/big/img_19239 +2003/01/13/big/img_385 +2002/08/08/big/img_525 +2002/07/19/big/img_782 +2002/08/13/big/img_843 +2002/07/30/big/img_107 +2002/08/11/big/img_752 +2002/07/29/big/img_383 +2002/08/26/big/img_249 +2002/08/29/big/img_18860 +2002/07/30/big/img_70 +2002/07/26/big/img_194 +2002/08/15/big/img_530 +2002/08/08/big/img_816 +2002/07/31/big/img_286 +2003/01/13/big/img_294 +2002/07/31/big/img_251 +2002/07/24/big/img_13 +2002/08/31/big/img_17938 +2002/07/22/big/img_642 +2003/01/14/big/img_728 +2002/08/18/big/img_47 +2002/08/22/big/img_306 +2002/08/20/big/img_348 +2002/08/15/big/img_764 +2002/08/08/big/img_163 +2002/07/23/big/img_531 +2002/07/23/big/img_467 +2003/01/16/big/img_743 +2003/01/13/big/img_535 +2002/08/02/big/img_523 +2002/08/22/big/img_120 +2002/08/11/big/img_496 +2002/08/29/big/img_19075 +2002/08/08/big/img_465 +2002/08/09/big/img_790 +2002/08/19/big/img_588 +2002/08/23/big/img_407 +2003/01/17/big/img_435 +2002/08/24/big/img_398 +2002/08/27/big/img_19899 +2003/01/15/big/img_335 +2002/08/13/big/img_493 +2002/09/02/big/img_15460 +2002/07/31/big/img_470 +2002/08/05/big/img_3550 +2002/07/28/big/img_123 +2002/08/01/big/img_1498 +2002/08/04/big/img_504 +2003/01/17/big/img_427 +2002/08/27/big/img_19708 +2002/07/27/big/img_861 +2002/07/25/big/img_685 +2002/07/31/big/img_207 +2003/01/14/big/img_745 +2002/08/31/big/img_17756 +2002/08/24/big/img_288 +2002/08/18/big/img_181 +2002/08/10/big/img_520 +2002/08/25/big/img_705 +2002/08/23/big/img_226 +2002/08/04/big/img_727 +2002/07/24/big/img_625 +2002/08/28/big/img_19157 +2002/08/23/big/img_586 +2002/07/31/big/img_232 +2003/01/13/big/img_240 +2003/01/14/big/img_321 +2003/01/15/big/img_533 +2002/07/23/big/img_480 +2002/07/24/big/img_371 +2002/08/21/big/img_702 +2002/08/31/big/img_17075 +2002/09/02/big/img_15278 +2002/07/29/big/img_246 +2003/01/15/big/img_829 +2003/01/15/big/img_1213 +2003/01/16/big/img_441 +2002/08/14/big/img_921 +2002/07/23/big/img_425 +2002/08/15/big/img_296 +2002/07/19/big/img_135 +2002/07/26/big/img_402 +2003/01/17/big/img_88 +2002/08/20/big/img_872 +2002/08/13/big/img_1110 +2003/01/16/big/img_1040 +2002/07/23/big/img_9 +2002/08/13/big/img_700 +2002/08/16/big/img_371 +2002/08/27/big/img_19966 +2003/01/17/big/img_391 +2002/08/18/big/img_426 +2002/08/01/big/img_1618 +2002/07/21/big/img_754 +2003/01/14/big/img_1101 +2003/01/16/big/img_1022 +2002/07/22/big/img_275 +2002/08/24/big/img_86 +2002/08/17/big/img_582 +2003/01/15/big/img_765 +2003/01/17/big/img_449 +2002/07/28/big/img_265 +2003/01/13/big/img_552 +2002/07/28/big/img_115 +2003/01/16/big/img_56 +2002/08/02/big/img_1232 +2003/01/17/big/img_925 +2002/07/22/big/img_445 +2002/07/25/big/img_957 +2002/07/20/big/img_589 +2002/08/31/big/img_17107 +2002/07/29/big/img_483 +2002/08/14/big/img_1063 +2002/08/07/big/img_1545 +2002/08/14/big/img_680 +2002/09/01/big/img_16694 +2002/08/14/big/img_257 +2002/08/11/big/img_726 +2002/07/26/big/img_681 +2002/07/25/big/img_481 +2003/01/14/big/img_737 +2002/08/28/big/img_19480 +2003/01/16/big/img_362 +2002/08/27/big/img_19865 +2003/01/01/big/img_547 +2002/09/02/big/img_15074 +2002/08/01/big/img_1453 +2002/08/22/big/img_594 +2002/08/28/big/img_19263 +2002/08/13/big/img_478 +2002/07/29/big/img_1358 +2003/01/14/big/img_1022 +2002/08/16/big/img_450 +2002/08/02/big/img_159 +2002/07/26/big/img_781 +2003/01/13/big/img_601 +2002/08/20/big/img_407 +2002/08/15/big/img_468 +2002/08/31/big/img_17902 +2002/08/16/big/img_81 +2002/07/25/big/img_987 +2002/07/25/big/img_500 +2002/08/02/big/img_31 +2002/08/18/big/img_538 +2002/08/08/big/img_54 +2002/07/23/big/img_686 +2002/07/24/big/img_836 +2003/01/17/big/img_734 +2002/08/16/big/img_1055 +2003/01/16/big/img_521 +2002/07/25/big/img_612 +2002/08/22/big/img_778 +2002/08/03/big/img_251 +2002/08/12/big/img_436 +2002/08/23/big/img_705 +2002/07/28/big/img_243 +2002/07/25/big/img_1029 +2002/08/20/big/img_287 +2002/08/29/big/img_18739 +2002/08/05/big/img_3272 +2002/07/27/big/img_214 +2003/01/14/big/img_5 +2002/08/01/big/img_1380 +2002/08/29/big/img_19097 +2002/07/30/big/img_486 +2002/08/29/big/img_18707 +2002/08/10/big/img_559 +2002/08/15/big/img_365 +2002/08/09/big/img_525 +2002/08/10/big/img_689 +2002/07/25/big/img_502 +2002/08/03/big/img_667 +2002/08/10/big/img_855 +2002/08/10/big/img_706 +2002/08/18/big/img_603 +2003/01/16/big/img_1055 +2002/08/31/big/img_17890 +2002/08/15/big/img_761 +2003/01/15/big/img_489 +2002/08/26/big/img_351 +2002/08/01/big/img_1772 +2002/08/31/big/img_17729 +2002/07/25/big/img_609 +2003/01/13/big/img_539 +2002/07/27/big/img_686 +2002/07/31/big/img_311 +2002/08/22/big/img_799 +2003/01/16/big/img_936 +2002/08/31/big/img_17813 +2002/08/04/big/img_862 +2002/08/09/big/img_332 +2002/07/20/big/img_148 +2002/08/12/big/img_426 +2002/07/24/big/img_69 +2002/07/27/big/img_685 +2002/08/02/big/img_480 +2002/08/26/big/img_154 +2002/07/24/big/img_598 +2002/08/01/big/img_1881 +2002/08/20/big/img_667 +2003/01/14/big/img_495 +2002/07/21/big/img_744 +2002/07/30/big/img_150 +2002/07/23/big/img_924 +2002/08/08/big/img_272 +2002/07/23/big/img_310 +2002/07/25/big/img_1011 +2002/09/02/big/img_15725 +2002/07/19/big/img_814 +2002/08/20/big/img_936 +2002/07/25/big/img_85 +2002/08/24/big/img_662 +2002/08/09/big/img_495 +2003/01/15/big/img_196 +2002/08/16/big/img_707 +2002/08/28/big/img_19370 +2002/08/06/big/img_2366 +2002/08/06/big/img_3012 +2002/08/01/big/img_1452 +2002/07/31/big/img_742 +2002/07/27/big/img_914 +2003/01/13/big/img_290 +2002/07/31/big/img_288 +2002/08/02/big/img_171 +2002/08/22/big/img_191 +2002/07/27/big/img_1066 +2002/08/12/big/img_383 +2003/01/17/big/img_1018 +2002/08/01/big/img_1785 +2002/08/11/big/img_390 +2002/08/27/big/img_20037 +2002/08/12/big/img_38 +2003/01/15/big/img_103 +2002/08/26/big/img_31 +2002/08/18/big/img_660 +2002/07/22/big/img_694 +2002/08/15/big/img_24 +2002/07/27/big/img_1077 +2002/08/01/big/img_1943 +2002/07/22/big/img_292 +2002/09/01/big/img_16857 +2002/07/22/big/img_892 +2003/01/14/big/img_46 +2002/08/09/big/img_469 +2002/08/09/big/img_414 +2003/01/16/big/img_40 +2002/08/28/big/img_19231 +2002/07/27/big/img_978 +2002/07/23/big/img_475 +2002/07/25/big/img_92 +2002/08/09/big/img_799 +2002/07/25/big/img_491 +2002/08/03/big/img_654 +2003/01/15/big/img_687 +2002/08/11/big/img_478 +2002/08/07/big/img_1664 +2002/08/20/big/img_362 +2002/08/01/big/img_1298 +2003/01/13/big/img_500 +2002/08/06/big/img_2896 +2002/08/30/big/img_18529 +2002/08/16/big/img_1020 +2002/07/29/big/img_892 +2002/08/29/big/img_18726 +2002/07/21/big/img_453 +2002/08/17/big/img_437 +2002/07/19/big/img_665 +2002/07/22/big/img_440 +2002/07/19/big/img_582 +2002/07/21/big/img_233 +2003/01/01/big/img_82 +2002/07/25/big/img_341 +2002/07/29/big/img_864 +2002/08/02/big/img_276 +2002/08/29/big/img_18654 +2002/07/27/big/img_1024 +2002/08/19/big/img_373 +2003/01/15/big/img_241 +2002/07/25/big/img_84 +2002/08/13/big/img_834 +2002/08/10/big/img_511 +2002/08/01/big/img_1627 +2002/08/08/big/img_607 +2002/08/06/big/img_2083 +2002/08/01/big/img_1486 +2002/08/08/big/img_700 +2002/08/01/big/img_1954 +2002/08/21/big/img_54 +2002/07/30/big/img_847 +2002/08/28/big/img_19169 +2002/07/21/big/img_549 +2002/08/03/big/img_693 +2002/07/31/big/img_1002 +2003/01/14/big/img_1035 +2003/01/16/big/img_622 +2002/07/30/big/img_1201 +2002/08/10/big/img_444 +2002/07/31/big/img_374 +2002/08/21/big/img_301 +2002/08/13/big/img_1095 +2003/01/13/big/img_288 +2002/07/25/big/img_232 +2003/01/13/big/img_967 +2002/08/26/big/img_360 +2002/08/05/big/img_67 +2002/08/29/big/img_18969 +2002/07/28/big/img_16 +2002/08/16/big/img_515 +2002/07/20/big/img_708 +2002/08/18/big/img_178 +2003/01/15/big/img_509 +2002/07/25/big/img_430 +2002/08/21/big/img_738 +2002/08/16/big/img_886 +2002/09/02/big/img_15605 +2002/09/01/big/img_16242 +2002/08/24/big/img_711 +2002/07/25/big/img_90 +2002/08/09/big/img_491 +2002/07/30/big/img_534 +2003/01/13/big/img_474 +2002/08/25/big/img_510 +2002/08/15/big/img_555 +2002/08/02/big/img_775 +2002/07/23/big/img_975 +2002/08/19/big/img_229 +2003/01/17/big/img_860 +2003/01/02/big/img_10 +2002/07/23/big/img_542 +2002/08/06/big/img_2535 +2002/07/22/big/img_37 +2002/08/06/big/img_2342 +2002/08/25/big/img_515 +2002/08/25/big/img_336 +2002/08/18/big/img_837 +2002/08/21/big/img_616 +2003/01/17/big/img_24 +2002/07/26/big/img_936 +2002/08/14/big/img_896 +2002/07/29/big/img_465 +2002/07/31/big/img_543 +2002/08/01/big/img_1411 +2002/08/02/big/img_423 +2002/08/21/big/img_44 +2002/07/31/big/img_11 +2003/01/15/big/img_628 +2003/01/15/big/img_605 +2002/07/30/big/img_571 +2002/07/23/big/img_428 +2002/08/15/big/img_942 +2002/07/26/big/img_531 +2003/01/16/big/img_59 +2002/08/02/big/img_410 +2002/07/31/big/img_230 +2002/08/19/big/img_806 +2003/01/14/big/img_462 +2002/08/16/big/img_370 +2002/08/13/big/img_380 +2002/08/16/big/img_932 +2002/07/19/big/img_393 +2002/08/20/big/img_764 +2002/08/15/big/img_616 +2002/07/26/big/img_267 +2002/07/27/big/img_1069 +2002/08/14/big/img_1041 +2003/01/13/big/img_594 +2002/09/01/big/img_16845 +2002/08/09/big/img_229 +2003/01/16/big/img_639 +2002/08/19/big/img_398 +2002/08/18/big/img_978 +2002/08/24/big/img_296 +2002/07/29/big/img_415 +2002/07/30/big/img_923 +2002/08/18/big/img_575 +2002/08/22/big/img_182 +2002/07/25/big/img_806 +2002/07/22/big/img_49 +2002/07/29/big/img_989 +2003/01/17/big/img_789 +2003/01/15/big/img_503 +2002/09/01/big/img_16062 +2003/01/17/big/img_794 +2002/08/15/big/img_564 +2003/01/15/big/img_222 +2002/08/01/big/img_1656 +2003/01/13/big/img_432 +2002/07/19/big/img_426 +2002/08/17/big/img_244 +2002/08/13/big/img_805 +2002/09/02/big/img_15067 +2002/08/11/big/img_58 +2002/08/22/big/img_636 +2002/07/22/big/img_416 +2002/08/13/big/img_836 +2002/08/26/big/img_363 +2002/07/30/big/img_917 +2003/01/14/big/img_206 +2002/08/12/big/img_311 +2002/08/31/big/img_17623 +2002/07/29/big/img_661 +2003/01/13/big/img_417 +2002/08/02/big/img_463 +2002/08/02/big/img_669 +2002/08/26/big/img_670 +2002/08/02/big/img_375 +2002/07/19/big/img_209 +2002/08/08/big/img_115 +2002/08/21/big/img_399 +2002/08/20/big/img_911 +2002/08/07/big/img_1212 +2002/08/20/big/img_578 +2002/08/22/big/img_554 +2002/08/21/big/img_484 +2002/07/25/big/img_450 +2002/08/03/big/img_542 +2002/08/15/big/img_561 +2002/07/23/big/img_360 +2002/08/30/big/img_18137 +2002/07/25/big/img_250 +2002/08/03/big/img_647 +2002/08/20/big/img_375 +2002/08/14/big/img_387 +2002/09/01/big/img_16990 +2002/08/28/big/img_19341 +2003/01/15/big/img_239 +2002/08/20/big/img_528 +2002/08/12/big/img_130 +2002/09/02/big/img_15108 +2003/01/15/big/img_372 +2002/08/16/big/img_678 +2002/08/04/big/img_623 +2002/07/23/big/img_477 +2002/08/28/big/img_19590 +2003/01/17/big/img_978 +2002/09/01/big/img_16692 +2002/07/20/big/img_109 +2002/08/06/big/img_2660 +2003/01/14/big/img_464 +2002/08/09/big/img_618 +2002/07/22/big/img_722 +2002/08/25/big/img_419 +2002/08/03/big/img_314 +2002/08/25/big/img_40 +2002/07/27/big/img_430 +2002/08/10/big/img_569 +2002/08/23/big/img_398 +2002/07/23/big/img_893 +2002/08/16/big/img_261 +2002/08/06/big/img_2668 +2002/07/22/big/img_835 +2002/09/02/big/img_15093 +2003/01/16/big/img_65 +2002/08/21/big/img_448 +2003/01/14/big/img_351 +2003/01/17/big/img_133 +2002/07/28/big/img_493 +2003/01/15/big/img_640 +2002/09/01/big/img_16880 +2002/08/15/big/img_350 +2002/08/20/big/img_624 +2002/08/25/big/img_604 +2002/08/06/big/img_2200 +2002/08/23/big/img_290 +2002/08/13/big/img_1152 +2003/01/14/big/img_251 +2002/08/02/big/img_538 +2002/08/22/big/img_613 +2003/01/13/big/img_351 +2002/08/18/big/img_368 +2002/07/23/big/img_392 +2002/07/25/big/img_198 +2002/07/25/big/img_418 +2002/08/26/big/img_614 +2002/07/23/big/img_405 +2003/01/14/big/img_445 +2002/07/25/big/img_326 +2002/08/10/big/img_734 +2003/01/14/big/img_530 +2002/08/08/big/img_561 +2002/08/29/big/img_18990 +2002/08/10/big/img_576 +2002/07/29/big/img_1494 +2002/07/19/big/img_198 +2002/08/10/big/img_562 +2002/07/22/big/img_901 +2003/01/14/big/img_37 +2002/09/02/big/img_15629 +2003/01/14/big/img_58 +2002/08/01/big/img_1364 +2002/07/27/big/img_636 +2003/01/13/big/img_241 +2002/09/01/big/img_16988 +2003/01/13/big/img_560 +2002/08/09/big/img_533 +2002/07/31/big/img_249 +2003/01/17/big/img_1007 +2002/07/21/big/img_64 +2003/01/13/big/img_537 +2003/01/15/big/img_606 +2002/08/18/big/img_651 +2002/08/24/big/img_405 +2002/07/26/big/img_837 +2002/08/09/big/img_562 +2002/08/01/big/img_1983 +2002/08/03/big/img_514 +2002/07/29/big/img_314 +2002/08/12/big/img_493 +2003/01/14/big/img_121 +2003/01/14/big/img_479 +2002/08/04/big/img_410 +2002/07/22/big/img_607 +2003/01/17/big/img_417 +2002/07/20/big/img_547 +2002/08/13/big/img_396 +2002/08/31/big/img_17538 +2002/08/13/big/img_187 +2002/08/12/big/img_328 +2003/01/14/big/img_569 +2002/07/27/big/img_1081 +2002/08/14/big/img_504 +2002/08/23/big/img_785 +2002/07/26/big/img_339 +2002/08/07/big/img_1156 +2002/08/07/big/img_1456 +2002/08/23/big/img_378 +2002/08/27/big/img_19719 +2002/07/31/big/img_39 +2002/07/31/big/img_883 +2003/01/14/big/img_676 +2002/07/29/big/img_214 +2002/07/26/big/img_669 +2002/07/25/big/img_202 +2002/08/08/big/img_259 +2003/01/17/big/img_943 +2003/01/15/big/img_512 +2002/08/05/big/img_3295 +2002/08/27/big/img_19685 +2002/08/08/big/img_277 +2002/08/30/big/img_18154 +2002/07/22/big/img_663 +2002/08/29/big/img_18914 +2002/07/31/big/img_908 +2002/08/27/big/img_19926 +2003/01/13/big/img_791 +2003/01/15/big/img_827 +2002/08/18/big/img_878 +2002/08/14/big/img_670 +2002/07/20/big/img_182 +2002/08/15/big/img_291 +2002/08/06/big/img_2600 +2002/07/23/big/img_587 +2002/08/14/big/img_577 +2003/01/15/big/img_585 +2002/07/30/big/img_310 +2002/08/03/big/img_658 +2002/08/10/big/img_157 +2002/08/19/big/img_811 +2002/07/29/big/img_1318 +2002/08/04/big/img_104 +2002/07/30/big/img_332 +2002/07/24/big/img_789 +2002/07/29/big/img_516 +2002/07/23/big/img_843 +2002/08/01/big/img_1528 +2002/08/13/big/img_798 +2002/08/07/big/img_1729 +2002/08/28/big/img_19448 +2003/01/16/big/img_95 +2002/08/12/big/img_473 +2002/07/27/big/img_269 +2003/01/16/big/img_621 +2002/07/29/big/img_772 +2002/07/24/big/img_171 +2002/07/19/big/img_429 +2002/08/07/big/img_1933 +2002/08/27/big/img_19629 +2002/08/05/big/img_3688 +2002/08/07/big/img_1691 +2002/07/23/big/img_600 +2002/07/29/big/img_666 +2002/08/25/big/img_566 +2002/08/06/big/img_2659 +2002/08/29/big/img_18929 +2002/08/16/big/img_407 +2002/08/18/big/img_774 +2002/08/19/big/img_249 +2002/08/06/big/img_2427 +2002/08/29/big/img_18899 +2002/08/01/big/img_1818 +2002/07/31/big/img_108 +2002/07/29/big/img_500 +2002/08/11/big/img_115 +2002/07/19/big/img_521 +2002/08/02/big/img_1163 +2002/07/22/big/img_62 +2002/08/13/big/img_466 +2002/08/21/big/img_956 +2002/08/23/big/img_602 +2002/08/20/big/img_858 +2002/07/25/big/img_690 +2002/07/19/big/img_130 +2002/08/04/big/img_874 +2002/07/26/big/img_489 +2002/07/22/big/img_548 +2002/08/10/big/img_191 +2002/07/25/big/img_1051 +2002/08/18/big/img_473 +2002/08/12/big/img_755 +2002/08/18/big/img_413 +2002/08/08/big/img_1044 +2002/08/17/big/img_680 +2002/08/26/big/img_235 +2002/08/20/big/img_330 +2002/08/22/big/img_344 +2002/08/09/big/img_593 +2002/07/31/big/img_1006 +2002/08/14/big/img_337 +2002/08/16/big/img_728 +2002/07/24/big/img_834 +2002/08/04/big/img_552 +2002/09/02/big/img_15213 +2002/07/25/big/img_725 +2002/08/30/big/img_18290 +2003/01/01/big/img_475 +2002/07/27/big/img_1083 +2002/08/29/big/img_18955 +2002/08/31/big/img_17232 +2002/08/08/big/img_480 +2002/08/01/big/img_1311 +2002/07/30/big/img_745 +2002/08/03/big/img_649 +2002/08/12/big/img_193 +2002/07/29/big/img_228 +2002/07/25/big/img_836 +2002/08/20/big/img_400 +2002/07/30/big/img_507 +2002/09/02/big/img_15072 +2002/07/26/big/img_658 +2002/07/28/big/img_503 +2002/08/05/big/img_3814 +2002/08/24/big/img_745 +2003/01/13/big/img_817 +2002/08/08/big/img_579 +2002/07/22/big/img_251 +2003/01/13/big/img_689 +2002/07/25/big/img_407 +2002/08/13/big/img_1050 +2002/08/14/big/img_733 +2002/07/24/big/img_82 +2003/01/17/big/img_288 +2003/01/15/big/img_475 +2002/08/14/big/img_620 +2002/08/21/big/img_167 +2002/07/19/big/img_300 +2002/07/26/big/img_219 +2002/08/01/big/img_1468 +2002/07/23/big/img_260 +2002/08/09/big/img_555 +2002/07/19/big/img_160 +2002/08/02/big/img_1060 +2003/01/14/big/img_149 +2002/08/15/big/img_346 +2002/08/24/big/img_597 +2002/08/22/big/img_502 +2002/08/30/big/img_18228 +2002/07/21/big/img_766 +2003/01/15/big/img_841 +2002/07/24/big/img_516 +2002/08/02/big/img_265 +2002/08/15/big/img_1243 +2003/01/15/big/img_223 +2002/08/04/big/img_236 +2002/07/22/big/img_309 +2002/07/20/big/img_656 +2002/07/31/big/img_412 +2002/09/01/big/img_16462 +2003/01/16/big/img_431 +2002/07/22/big/img_793 +2002/08/15/big/img_877 +2002/07/26/big/img_282 +2002/07/25/big/img_529 +2002/08/24/big/img_613 +2003/01/17/big/img_700 +2002/08/06/big/img_2526 +2002/08/24/big/img_394 +2002/08/21/big/img_521 +2002/08/25/big/img_560 +2002/07/29/big/img_966 +2002/07/25/big/img_448 +2003/01/13/big/img_782 +2002/08/21/big/img_296 +2002/09/01/big/img_16755 +2002/08/05/big/img_3552 +2002/09/02/big/img_15823 +2003/01/14/big/img_193 +2002/07/21/big/img_159 +2002/08/02/big/img_564 +2002/08/16/big/img_300 +2002/07/19/big/img_269 +2002/08/13/big/img_676 +2002/07/28/big/img_57 +2002/08/05/big/img_3318 +2002/07/31/big/img_218 +2002/08/21/big/img_898 +2002/07/29/big/img_109 +2002/07/19/big/img_854 +2002/08/23/big/img_311 +2002/08/14/big/img_318 +2002/07/25/big/img_523 +2002/07/21/big/img_678 +2003/01/17/big/img_690 +2002/08/28/big/img_19503 +2002/08/18/big/img_251 +2002/08/22/big/img_672 +2002/08/20/big/img_663 +2002/08/02/big/img_148 +2002/09/02/big/img_15580 +2002/07/25/big/img_778 +2002/08/14/big/img_565 +2002/08/12/big/img_374 +2002/08/13/big/img_1018 +2002/08/20/big/img_474 +2002/08/25/big/img_33 +2002/08/02/big/img_1190 +2002/08/08/big/img_864 +2002/08/14/big/img_1071 +2002/08/30/big/img_18103 +2002/08/18/big/img_533 +2003/01/16/big/img_650 +2002/07/25/big/img_108 +2002/07/26/big/img_81 +2002/07/27/big/img_543 +2002/07/29/big/img_521 +2003/01/13/big/img_434 +2002/08/26/big/img_674 +2002/08/06/big/img_2932 +2002/08/07/big/img_1262 +2003/01/15/big/img_201 +2003/01/16/big/img_673 +2002/09/02/big/img_15988 +2002/07/29/big/img_1306 +2003/01/14/big/img_1072 +2002/08/30/big/img_18232 +2002/08/05/big/img_3711 +2002/07/23/big/img_775 +2002/08/01/big/img_16 +2003/01/16/big/img_630 +2002/08/22/big/img_695 +2002/08/14/big/img_51 +2002/08/14/big/img_782 +2002/08/24/big/img_742 +2003/01/14/big/img_512 +2003/01/15/big/img_1183 +2003/01/15/big/img_714 +2002/08/01/big/img_2078 +2002/07/31/big/img_682 +2002/09/02/big/img_15687 +2002/07/26/big/img_518 +2002/08/27/big/img_19676 +2002/09/02/big/img_15969 +2002/08/02/big/img_931 +2002/08/25/big/img_508 +2002/08/29/big/img_18616 +2002/07/22/big/img_839 +2002/07/28/big/img_313 +2003/01/14/big/img_155 +2002/08/02/big/img_1105 +2002/08/09/big/img_53 +2002/08/16/big/img_469 +2002/08/15/big/img_502 +2002/08/20/big/img_575 +2002/07/25/big/img_138 +2003/01/16/big/img_579 +2002/07/19/big/img_352 +2003/01/14/big/img_762 +2003/01/01/big/img_588 +2002/08/02/big/img_981 +2002/08/21/big/img_447 +2002/09/01/big/img_16151 +2003/01/14/big/img_769 +2002/08/23/big/img_461 +2002/08/17/big/img_240 +2002/09/02/big/img_15220 +2002/07/19/big/img_408 +2002/09/02/big/img_15496 +2002/07/29/big/img_758 +2002/08/28/big/img_19392 +2002/08/06/big/img_2723 +2002/08/31/big/img_17752 +2002/08/23/big/img_469 +2002/08/13/big/img_515 +2002/09/02/big/img_15551 +2002/08/03/big/img_462 +2002/07/24/big/img_613 +2002/07/22/big/img_61 +2002/08/08/big/img_171 +2002/08/21/big/img_177 +2003/01/14/big/img_105 +2002/08/02/big/img_1017 +2002/08/22/big/img_106 +2002/07/27/big/img_542 +2002/07/21/big/img_665 +2002/07/23/big/img_595 +2002/08/04/big/img_657 +2002/08/29/big/img_19002 +2003/01/15/big/img_550 +2002/08/14/big/img_662 +2002/07/20/big/img_425 +2002/08/30/big/img_18528 +2002/07/26/big/img_611 +2002/07/22/big/img_849 +2002/08/07/big/img_1655 +2002/08/21/big/img_638 +2003/01/17/big/img_732 +2003/01/01/big/img_496 +2002/08/18/big/img_713 +2002/08/08/big/img_109 +2002/07/27/big/img_1008 +2002/07/20/big/img_559 +2002/08/16/big/img_699 +2002/08/31/big/img_17702 +2002/07/31/big/img_1013 +2002/08/01/big/img_2027 +2002/08/02/big/img_1001 +2002/08/03/big/img_210 +2002/08/01/big/img_2087 +2003/01/14/big/img_199 +2002/07/29/big/img_48 +2002/07/19/big/img_727 +2002/08/09/big/img_249 +2002/08/04/big/img_632 +2002/08/22/big/img_620 +2003/01/01/big/img_457 +2002/08/05/big/img_3223 +2002/07/27/big/img_240 +2002/07/25/big/img_797 +2002/08/13/big/img_430 +2002/07/25/big/img_615 +2002/08/12/big/img_28 +2002/07/30/big/img_220 +2002/07/24/big/img_89 +2002/08/21/big/img_357 +2002/08/09/big/img_590 +2003/01/13/big/img_525 +2002/08/17/big/img_818 +2003/01/02/big/img_7 +2002/07/26/big/img_636 +2003/01/13/big/img_1122 +2002/07/23/big/img_810 +2002/08/20/big/img_888 +2002/07/27/big/img_3 +2002/08/15/big/img_451 +2002/09/02/big/img_15787 +2002/07/31/big/img_281 +2002/08/05/big/img_3274 +2002/08/07/big/img_1254 +2002/07/31/big/img_27 +2002/08/01/big/img_1366 +2002/07/30/big/img_182 +2002/08/27/big/img_19690 +2002/07/29/big/img_68 +2002/08/23/big/img_754 +2002/07/30/big/img_540 +2002/08/27/big/img_20063 +2002/08/14/big/img_471 +2002/08/02/big/img_615 +2002/07/30/big/img_186 +2002/08/25/big/img_150 +2002/07/27/big/img_626 +2002/07/20/big/img_225 +2003/01/15/big/img_1252 +2002/07/19/big/img_367 +2003/01/15/big/img_582 +2002/08/09/big/img_572 +2002/08/08/big/img_428 +2003/01/15/big/img_639 +2002/08/28/big/img_19245 +2002/07/24/big/img_321 +2002/08/02/big/img_662 +2002/08/08/big/img_1033 +2003/01/17/big/img_867 +2002/07/22/big/img_652 +2003/01/14/big/img_224 +2002/08/18/big/img_49 +2002/07/26/big/img_46 +2002/08/31/big/img_18021 +2002/07/25/big/img_151 +2002/08/23/big/img_540 +2002/08/25/big/img_693 +2002/07/23/big/img_340 +2002/07/28/big/img_117 +2002/09/02/big/img_15768 +2002/08/26/big/img_562 +2002/07/24/big/img_480 +2003/01/15/big/img_341 +2002/08/10/big/img_783 +2002/08/20/big/img_132 +2003/01/14/big/img_370 +2002/07/20/big/img_720 +2002/08/03/big/img_144 +2002/08/20/big/img_538 +2002/08/01/big/img_1745 +2002/08/11/big/img_683 +2002/08/03/big/img_328 +2002/08/10/big/img_793 +2002/08/14/big/img_689 +2002/08/02/big/img_162 +2003/01/17/big/img_411 +2002/07/31/big/img_361 +2002/08/15/big/img_289 +2002/08/08/big/img_254 +2002/08/15/big/img_996 +2002/08/20/big/img_785 +2002/07/24/big/img_511 +2002/08/06/big/img_2614 +2002/08/29/big/img_18733 +2002/08/17/big/img_78 +2002/07/30/big/img_378 +2002/08/31/big/img_17947 +2002/08/26/big/img_88 +2002/07/30/big/img_558 +2002/08/02/big/img_67 +2003/01/14/big/img_325 +2002/07/29/big/img_1357 +2002/07/19/big/img_391 +2002/07/30/big/img_307 +2003/01/13/big/img_219 +2002/07/24/big/img_807 +2002/08/23/big/img_543 +2002/08/29/big/img_18620 +2002/07/22/big/img_769 +2002/08/26/big/img_503 +2002/07/30/big/img_78 +2002/08/14/big/img_1036 +2002/08/09/big/img_58 +2002/07/24/big/img_616 +2002/08/02/big/img_464 +2002/07/26/big/img_576 +2002/07/22/big/img_273 +2003/01/16/big/img_470 +2002/07/29/big/img_329 +2002/07/30/big/img_1086 +2002/07/31/big/img_353 +2002/09/02/big/img_15275 +2003/01/17/big/img_555 +2002/08/26/big/img_212 +2002/08/01/big/img_1692 +2003/01/15/big/img_600 +2002/07/29/big/img_825 +2002/08/08/big/img_68 +2002/08/10/big/img_719 +2002/07/31/big/img_636 +2002/07/29/big/img_325 +2002/07/21/big/img_515 +2002/07/22/big/img_705 +2003/01/13/big/img_818 +2002/08/09/big/img_486 +2002/08/22/big/img_141 +2002/07/22/big/img_303 +2002/08/09/big/img_393 +2002/07/29/big/img_963 +2002/08/02/big/img_1215 +2002/08/19/big/img_674 +2002/08/12/big/img_690 +2002/08/21/big/img_637 +2002/08/21/big/img_841 +2002/08/24/big/img_71 +2002/07/25/big/img_596 +2002/07/24/big/img_864 +2002/08/18/big/img_293 +2003/01/14/big/img_657 +2002/08/15/big/img_411 +2002/08/16/big/img_348 +2002/08/05/big/img_3157 +2002/07/20/big/img_663 +2003/01/13/big/img_654 +2003/01/16/big/img_433 +2002/08/30/big/img_18200 +2002/08/12/big/img_226 +2003/01/16/big/img_491 +2002/08/08/big/img_666 +2002/07/19/big/img_576 +2003/01/15/big/img_776 +2003/01/16/big/img_899 +2002/07/19/big/img_397 +2002/08/14/big/img_44 +2003/01/15/big/img_762 +2002/08/02/big/img_982 +2002/09/02/big/img_15234 +2002/08/17/big/img_556 +2002/08/21/big/img_410 +2002/08/21/big/img_386 +2002/07/19/big/img_690 +2002/08/05/big/img_3052 +2002/08/14/big/img_219 +2002/08/16/big/img_273 +2003/01/15/big/img_752 +2002/08/08/big/img_184 +2002/07/31/big/img_743 +2002/08/23/big/img_338 +2003/01/14/big/img_1055 +2002/08/05/big/img_3405 +2003/01/15/big/img_17 +2002/08/03/big/img_141 +2002/08/14/big/img_549 +2002/07/27/big/img_1034 +2002/07/31/big/img_932 +2002/08/30/big/img_18487 +2002/09/02/big/img_15814 +2002/08/01/big/img_2086 +2002/09/01/big/img_16535 +2002/07/22/big/img_500 +2003/01/13/big/img_400 +2002/08/25/big/img_607 +2002/08/30/big/img_18384 +2003/01/14/big/img_951 +2002/08/13/big/img_1150 +2002/08/08/big/img_1022 +2002/08/10/big/img_428 +2002/08/28/big/img_19242 +2002/08/05/big/img_3098 +2002/07/23/big/img_400 +2002/08/26/big/img_365 +2002/07/20/big/img_318 +2002/08/13/big/img_740 +2003/01/16/big/img_37 +2002/08/26/big/img_274 +2002/08/02/big/img_205 +2002/08/21/big/img_695 +2002/08/06/big/img_2289 +2002/08/20/big/img_794 +2002/08/18/big/img_438 +2002/08/07/big/img_1380 +2002/08/02/big/img_737 +2002/08/07/big/img_1651 +2002/08/15/big/img_1238 +2002/08/01/big/img_1681 +2002/08/06/big/img_3017 +2002/07/23/big/img_706 +2002/07/31/big/img_392 +2002/08/09/big/img_539 +2002/07/29/big/img_835 +2002/08/26/big/img_723 +2002/08/28/big/img_19235 +2003/01/16/big/img_353 +2002/08/10/big/img_150 +2002/08/29/big/img_19025 +2002/08/21/big/img_310 +2002/08/10/big/img_823 +2002/07/26/big/img_981 +2002/08/11/big/img_288 +2002/08/19/big/img_534 +2002/08/21/big/img_300 +2002/07/31/big/img_49 +2002/07/30/big/img_469 +2002/08/28/big/img_19197 +2002/08/25/big/img_205 +2002/08/10/big/img_390 +2002/08/23/big/img_291 +2002/08/26/big/img_230 +2002/08/18/big/img_76 +2002/07/23/big/img_409 +2002/08/14/big/img_1053 +2003/01/14/big/img_291 +2002/08/10/big/img_503 +2002/08/27/big/img_19928 +2002/08/03/big/img_563 +2002/08/17/big/img_250 +2002/08/06/big/img_2381 +2002/08/17/big/img_948 +2002/08/06/big/img_2710 +2002/07/22/big/img_696 +2002/07/31/big/img_670 +2002/08/12/big/img_594 +2002/07/29/big/img_624 +2003/01/17/big/img_934 +2002/08/03/big/img_584 +2002/08/22/big/img_1003 +2002/08/05/big/img_3396 +2003/01/13/big/img_570 +2002/08/02/big/img_219 +2002/09/02/big/img_15774 +2002/08/16/big/img_818 +2002/08/23/big/img_402 +2003/01/14/big/img_552 +2002/07/29/big/img_71 +2002/08/05/big/img_3592 +2002/08/16/big/img_80 +2002/07/27/big/img_672 +2003/01/13/big/img_470 +2003/01/16/big/img_702 +2002/09/01/big/img_16130 +2002/08/08/big/img_240 +2002/09/01/big/img_16338 +2002/07/26/big/img_312 +2003/01/14/big/img_538 +2002/07/20/big/img_695 +2002/08/30/big/img_18098 +2002/08/25/big/img_259 +2002/08/16/big/img_1042 +2002/08/09/big/img_837 +2002/08/31/big/img_17760 +2002/07/31/big/img_14 +2002/08/09/big/img_361 +2003/01/16/big/img_107 +2002/08/14/big/img_124 +2002/07/19/big/img_463 +2003/01/15/big/img_275 +2002/07/25/big/img_1151 +2002/07/29/big/img_1501 +2002/08/27/big/img_19889 +2002/08/29/big/img_18603 +2003/01/17/big/img_601 +2002/08/25/big/img_355 +2002/08/08/big/img_297 +2002/08/20/big/img_290 +2002/07/31/big/img_195 +2003/01/01/big/img_336 +2002/08/18/big/img_369 +2002/07/25/big/img_621 +2002/08/11/big/img_508 +2003/01/14/big/img_458 +2003/01/15/big/img_795 +2002/08/12/big/img_498 +2002/08/01/big/img_1734 +2002/08/02/big/img_246 +2002/08/16/big/img_565 +2002/08/11/big/img_475 +2002/08/22/big/img_408 +2002/07/28/big/img_78 +2002/07/21/big/img_81 +2003/01/14/big/img_697 +2002/08/14/big/img_661 +2002/08/15/big/img_507 +2002/08/19/big/img_55 +2002/07/22/big/img_152 +2003/01/14/big/img_470 +2002/08/03/big/img_379 +2002/08/22/big/img_506 +2003/01/16/big/img_966 +2002/08/18/big/img_698 +2002/08/24/big/img_528 +2002/08/23/big/img_10 +2002/08/01/big/img_1655 +2002/08/22/big/img_953 +2002/07/19/big/img_630 +2002/07/22/big/img_889 +2002/08/16/big/img_351 +2003/01/16/big/img_83 +2002/07/19/big/img_805 +2002/08/14/big/img_704 +2002/07/19/big/img_389 +2002/08/31/big/img_17765 +2002/07/29/big/img_606 +2003/01/17/big/img_939 +2002/09/02/big/img_15081 +2002/08/21/big/img_181 +2002/07/29/big/img_1321 +2002/07/21/big/img_497 +2002/07/20/big/img_539 +2002/08/24/big/img_119 +2002/08/01/big/img_1281 +2002/07/26/big/img_207 +2002/07/26/big/img_432 +2002/07/27/big/img_1006 +2002/08/05/big/img_3087 +2002/08/14/big/img_252 +2002/08/14/big/img_798 +2002/07/24/big/img_538 +2002/09/02/big/img_15507 +2002/08/08/big/img_901 +2003/01/14/big/img_557 +2002/08/07/big/img_1819 +2002/08/04/big/img_470 +2002/08/01/big/img_1504 +2002/08/16/big/img_1070 +2002/08/16/big/img_372 +2002/08/23/big/img_416 +2002/08/30/big/img_18208 +2002/08/01/big/img_2043 +2002/07/22/big/img_385 +2002/08/22/big/img_466 +2002/08/21/big/img_869 +2002/08/28/big/img_19429 +2002/08/02/big/img_770 +2002/07/23/big/img_433 +2003/01/14/big/img_13 +2002/07/27/big/img_953 +2002/09/02/big/img_15728 +2002/08/01/big/img_1361 +2002/08/29/big/img_18897 +2002/08/26/big/img_534 +2002/08/11/big/img_121 +2002/08/26/big/img_20130 +2002/07/31/big/img_363 +2002/08/13/big/img_978 +2002/07/25/big/img_835 +2002/08/02/big/img_906 +2003/01/14/big/img_548 +2002/07/30/big/img_80 +2002/07/26/big/img_982 +2003/01/16/big/img_99 +2002/08/19/big/img_362 +2002/08/24/big/img_376 +2002/08/07/big/img_1264 +2002/07/27/big/img_938 +2003/01/17/big/img_535 +2002/07/26/big/img_457 +2002/08/08/big/img_848 +2003/01/15/big/img_859 +2003/01/15/big/img_622 +2002/07/30/big/img_403 +2002/07/29/big/img_217 +2002/07/26/big/img_891 +2002/07/24/big/img_70 +2002/08/25/big/img_619 +2002/08/05/big/img_3375 +2002/08/01/big/img_2160 +2002/08/06/big/img_2227 +2003/01/14/big/img_117 +2002/08/14/big/img_227 +2002/08/13/big/img_565 +2002/08/19/big/img_625 +2002/08/03/big/img_812 +2002/07/24/big/img_41 +2002/08/16/big/img_235 +2002/07/29/big/img_759 +2002/07/21/big/img_433 +2002/07/29/big/img_190 +2003/01/16/big/img_435 +2003/01/13/big/img_708 +2002/07/30/big/img_57 +2002/08/22/big/img_162 +2003/01/01/big/img_558 +2003/01/15/big/img_604 +2002/08/16/big/img_935 +2002/08/20/big/img_394 +2002/07/28/big/img_465 +2002/09/02/big/img_15534 +2002/08/16/big/img_87 +2002/07/22/big/img_469 +2002/08/12/big/img_245 +2003/01/13/big/img_236 +2002/08/06/big/img_2736 +2002/08/03/big/img_348 +2003/01/14/big/img_218 +2002/07/26/big/img_232 +2003/01/15/big/img_244 +2002/07/25/big/img_1121 +2002/08/01/big/img_1484 +2002/07/26/big/img_541 +2002/08/07/big/img_1244 +2002/07/31/big/img_3 +2002/08/30/big/img_18437 +2002/08/29/big/img_19094 +2002/08/01/big/img_1355 +2002/08/19/big/img_338 +2002/07/19/big/img_255 +2002/07/21/big/img_76 +2002/08/25/big/img_199 +2002/08/12/big/img_740 +2002/07/30/big/img_852 +2002/08/15/big/img_599 +2002/08/23/big/img_254 +2002/08/19/big/img_125 +2002/07/24/big/img_2 +2002/08/04/big/img_145 +2002/08/05/big/img_3137 +2002/07/28/big/img_463 +2003/01/14/big/img_801 +2002/07/23/big/img_366 +2002/08/26/big/img_600 +2002/08/26/big/img_649 +2002/09/02/big/img_15849 +2002/07/26/big/img_248 +2003/01/13/big/img_200 +2002/08/07/big/img_1794 +2002/08/31/big/img_17270 +2002/08/23/big/img_608 +2003/01/13/big/img_837 +2002/08/23/big/img_581 +2002/08/20/big/img_754 +2002/08/18/big/img_183 +2002/08/20/big/img_328 +2002/07/22/big/img_494 +2002/07/29/big/img_399 +2002/08/28/big/img_19284 +2002/08/08/big/img_566 +2002/07/25/big/img_376 +2002/07/23/big/img_138 +2002/07/25/big/img_435 +2002/08/17/big/img_685 +2002/07/19/big/img_90 +2002/07/20/big/img_716 +2002/08/31/big/img_17458 +2002/08/26/big/img_461 +2002/07/25/big/img_355 +2002/08/06/big/img_2152 +2002/07/27/big/img_932 +2002/07/23/big/img_232 +2002/08/08/big/img_1020 +2002/07/31/big/img_366 +2002/08/06/big/img_2667 +2002/08/21/big/img_465 +2002/08/15/big/img_305 +2002/08/02/big/img_247 +2002/07/28/big/img_46 +2002/08/27/big/img_19922 +2002/08/23/big/img_643 +2003/01/13/big/img_624 +2002/08/23/big/img_625 +2002/08/05/big/img_3787 +2003/01/13/big/img_627 +2002/09/01/big/img_16381 +2002/08/05/big/img_3668 +2002/07/21/big/img_535 +2002/08/27/big/img_19680 +2002/07/22/big/img_413 +2002/07/29/big/img_481 +2003/01/15/big/img_496 +2002/07/23/big/img_701 +2002/08/29/big/img_18670 +2002/07/28/big/img_319 +2003/01/14/big/img_517 +2002/07/26/big/img_256 +2003/01/16/big/img_593 +2002/07/30/big/img_956 +2002/07/30/big/img_667 +2002/07/25/big/img_100 +2002/08/11/big/img_570 +2002/07/26/big/img_745 +2002/08/04/big/img_834 +2002/08/25/big/img_521 +2002/08/01/big/img_2148 +2002/09/02/big/img_15183 +2002/08/22/big/img_514 +2002/08/23/big/img_477 +2002/07/23/big/img_336 +2002/07/26/big/img_481 +2002/08/20/big/img_409 +2002/07/23/big/img_918 +2002/08/09/big/img_474 +2002/08/02/big/img_929 +2002/08/31/big/img_17932 +2002/08/19/big/img_161 +2002/08/09/big/img_667 +2002/07/31/big/img_805 +2002/09/02/big/img_15678 +2002/08/31/big/img_17509 +2002/08/29/big/img_18998 +2002/07/23/big/img_301 +2002/08/07/big/img_1612 +2002/08/06/big/img_2472 +2002/07/23/big/img_466 +2002/08/27/big/img_19634 +2003/01/16/big/img_16 +2002/08/14/big/img_193 +2002/08/21/big/img_340 +2002/08/27/big/img_19799 +2002/08/01/big/img_1345 +2002/08/07/big/img_1448 +2002/08/11/big/img_324 +2003/01/16/big/img_754 +2002/08/13/big/img_418 +2003/01/16/big/img_544 +2002/08/19/big/img_135 +2002/08/10/big/img_455 +2002/08/10/big/img_693 +2002/08/31/big/img_17967 +2002/08/28/big/img_19229 +2002/08/04/big/img_811 +2002/09/01/big/img_16225 +2003/01/16/big/img_428 +2002/09/02/big/img_15295 +2002/07/26/big/img_108 +2002/07/21/big/img_477 +2002/08/07/big/img_1354 +2002/08/23/big/img_246 +2002/08/16/big/img_652 +2002/07/27/big/img_553 +2002/07/31/big/img_346 +2002/08/04/big/img_537 +2002/08/08/big/img_498 +2002/08/29/big/img_18956 +2003/01/13/big/img_922 +2002/08/31/big/img_17425 +2002/07/26/big/img_438 +2002/08/19/big/img_185 +2003/01/16/big/img_33 +2002/08/10/big/img_252 +2002/07/29/big/img_598 +2002/08/27/big/img_19820 +2002/08/06/big/img_2664 +2002/08/20/big/img_705 +2003/01/14/big/img_816 +2002/08/03/big/img_552 +2002/07/25/big/img_561 +2002/07/25/big/img_934 +2002/08/01/big/img_1893 +2003/01/14/big/img_746 +2003/01/16/big/img_519 +2002/08/03/big/img_681 +2002/07/24/big/img_808 +2002/08/14/big/img_803 +2002/08/25/big/img_155 +2002/07/30/big/img_1107 +2002/08/29/big/img_18882 +2003/01/15/big/img_598 +2002/08/19/big/img_122 +2002/07/30/big/img_428 +2002/07/24/big/img_684 +2002/08/22/big/img_192 +2002/08/22/big/img_543 +2002/08/07/big/img_1318 +2002/08/18/big/img_25 +2002/07/26/big/img_583 +2002/07/20/big/img_464 +2002/08/19/big/img_664 +2002/08/24/big/img_861 +2002/09/01/big/img_16136 +2002/08/22/big/img_400 +2002/08/12/big/img_445 +2003/01/14/big/img_174 +2002/08/27/big/img_19677 +2002/08/31/big/img_17214 +2002/08/30/big/img_18175 +2003/01/17/big/img_402 +2002/08/06/big/img_2396 +2002/08/18/big/img_448 +2002/08/21/big/img_165 +2002/08/31/big/img_17609 +2003/01/01/big/img_151 +2002/08/26/big/img_372 +2002/09/02/big/img_15994 +2002/07/26/big/img_660 +2002/09/02/big/img_15197 +2002/07/29/big/img_258 +2002/08/30/big/img_18525 +2003/01/13/big/img_368 +2002/07/29/big/img_1538 +2002/07/21/big/img_787 +2002/08/18/big/img_152 +2002/08/06/big/img_2379 +2003/01/17/big/img_864 +2002/08/27/big/img_19998 +2002/08/01/big/img_1634 +2002/07/25/big/img_414 +2002/08/22/big/img_627 +2002/08/07/big/img_1669 +2002/08/16/big/img_1052 +2002/08/31/big/img_17796 +2002/08/18/big/img_199 +2002/09/02/big/img_15147 +2002/08/09/big/img_460 +2002/08/14/big/img_581 +2002/08/30/big/img_18286 +2002/07/26/big/img_337 +2002/08/18/big/img_589 +2003/01/14/big/img_866 +2002/07/20/big/img_624 +2002/08/01/big/img_1801 +2002/07/24/big/img_683 +2002/08/09/big/img_725 +2003/01/14/big/img_34 +2002/07/30/big/img_144 +2002/07/30/big/img_706 +2002/08/08/big/img_394 +2002/08/19/big/img_619 +2002/08/06/big/img_2703 +2002/08/29/big/img_19034 +2002/07/24/big/img_67 +2002/08/27/big/img_19841 +2002/08/19/big/img_427 +2003/01/14/big/img_333 +2002/09/01/big/img_16406 +2002/07/19/big/img_882 +2002/08/17/big/img_238 +2003/01/14/big/img_739 +2002/07/22/big/img_151 +2002/08/21/big/img_743 +2002/07/25/big/img_1048 +2002/07/30/big/img_395 +2003/01/13/big/img_584 +2002/08/13/big/img_742 +2002/08/13/big/img_1168 +2003/01/14/big/img_147 +2002/07/26/big/img_803 +2002/08/05/big/img_3298 +2002/08/07/big/img_1451 +2002/08/16/big/img_424 +2002/07/29/big/img_1069 +2002/09/01/big/img_16735 +2002/07/21/big/img_637 +2003/01/14/big/img_585 +2002/08/02/big/img_358 +2003/01/13/big/img_358 +2002/08/14/big/img_198 +2002/08/17/big/img_935 +2002/08/04/big/img_42 +2002/08/30/big/img_18245 +2002/07/25/big/img_158 +2002/08/22/big/img_744 +2002/08/06/big/img_2291 +2002/08/05/big/img_3044 +2002/07/30/big/img_272 +2002/08/23/big/img_641 +2002/07/24/big/img_797 +2002/07/30/big/img_392 +2003/01/14/big/img_447 +2002/07/31/big/img_898 +2002/08/06/big/img_2812 +2002/08/13/big/img_564 +2002/07/22/big/img_43 +2002/07/26/big/img_634 +2002/07/19/big/img_843 +2002/08/26/big/img_58 +2002/07/21/big/img_375 +2002/08/25/big/img_729 +2002/07/19/big/img_561 +2003/01/15/big/img_884 +2002/07/25/big/img_891 +2002/08/09/big/img_558 +2002/08/26/big/img_587 +2002/08/13/big/img_1146 +2002/09/02/big/img_15153 +2002/07/26/big/img_316 +2002/08/01/big/img_1940 +2002/08/26/big/img_90 +2003/01/13/big/img_347 +2002/07/25/big/img_520 +2002/08/29/big/img_18718 +2002/08/28/big/img_19219 +2002/08/13/big/img_375 +2002/07/20/big/img_719 +2002/08/31/big/img_17431 +2002/07/28/big/img_192 +2002/08/26/big/img_259 +2002/08/18/big/img_484 +2002/07/29/big/img_580 +2002/07/26/big/img_84 +2002/08/02/big/img_302 +2002/08/31/big/img_17007 +2003/01/15/big/img_543 +2002/09/01/big/img_16488 +2002/08/22/big/img_798 +2002/07/30/big/img_383 +2002/08/04/big/img_668 +2002/08/13/big/img_156 +2002/08/07/big/img_1353 +2002/07/25/big/img_281 +2003/01/14/big/img_587 +2003/01/15/big/img_524 +2002/08/19/big/img_726 +2002/08/21/big/img_709 +2002/08/26/big/img_465 +2002/07/31/big/img_658 +2002/08/28/big/img_19148 +2002/07/23/big/img_423 +2002/08/16/big/img_758 +2002/08/22/big/img_523 +2002/08/16/big/img_591 +2002/08/23/big/img_845 +2002/07/26/big/img_678 +2002/08/09/big/img_806 +2002/08/06/big/img_2369 +2002/07/29/big/img_457 +2002/07/19/big/img_278 +2002/08/30/big/img_18107 +2002/07/26/big/img_444 +2002/08/20/big/img_278 +2002/08/26/big/img_92 +2002/08/26/big/img_257 +2002/07/25/big/img_266 +2002/08/05/big/img_3829 +2002/07/26/big/img_757 +2002/07/29/big/img_1536 +2002/08/09/big/img_472 +2003/01/17/big/img_480 +2002/08/28/big/img_19355 +2002/07/26/big/img_97 +2002/08/06/big/img_2503 +2002/07/19/big/img_254 +2002/08/01/big/img_1470 +2002/08/21/big/img_42 +2002/08/20/big/img_217 +2002/08/06/big/img_2459 +2002/07/19/big/img_552 +2002/08/13/big/img_717 +2002/08/12/big/img_586 +2002/08/20/big/img_411 +2003/01/13/big/img_768 +2002/08/07/big/img_1747 +2002/08/15/big/img_385 +2002/08/01/big/img_1648 +2002/08/15/big/img_311 +2002/08/21/big/img_95 +2002/08/09/big/img_108 +2002/08/21/big/img_398 +2002/08/17/big/img_340 +2002/08/14/big/img_474 +2002/08/13/big/img_294 +2002/08/24/big/img_840 +2002/08/09/big/img_808 +2002/08/23/big/img_491 +2002/07/28/big/img_33 +2003/01/13/big/img_664 +2002/08/02/big/img_261 +2002/08/09/big/img_591 +2002/07/26/big/img_309 +2003/01/14/big/img_372 +2002/08/19/big/img_581 +2002/08/19/big/img_168 +2002/08/26/big/img_422 +2002/07/24/big/img_106 +2002/08/01/big/img_1936 +2002/08/05/big/img_3764 +2002/08/21/big/img_266 +2002/08/31/big/img_17968 +2002/08/01/big/img_1941 +2002/08/15/big/img_550 +2002/08/14/big/img_13 +2002/07/30/big/img_171 +2003/01/13/big/img_490 +2002/07/25/big/img_427 +2002/07/19/big/img_770 +2002/08/12/big/img_759 +2003/01/15/big/img_1360 +2002/08/05/big/img_3692 +2003/01/16/big/img_30 +2002/07/25/big/img_1026 +2002/07/22/big/img_288 +2002/08/29/big/img_18801 +2002/07/24/big/img_793 +2002/08/13/big/img_178 +2002/08/06/big/img_2322 +2003/01/14/big/img_560 +2002/08/18/big/img_408 +2003/01/16/big/img_915 +2003/01/16/big/img_679 +2002/08/07/big/img_1552 +2002/08/29/big/img_19050 +2002/08/01/big/img_2172 +2002/07/31/big/img_30 +2002/07/30/big/img_1019 +2002/07/30/big/img_587 +2003/01/13/big/img_773 +2002/07/30/big/img_410 +2002/07/28/big/img_65 +2002/08/05/big/img_3138 +2002/07/23/big/img_541 +2002/08/22/big/img_963 +2002/07/27/big/img_657 +2002/07/30/big/img_1051 +2003/01/16/big/img_150 +2002/07/31/big/img_519 +2002/08/01/big/img_1961 +2002/08/05/big/img_3752 +2002/07/23/big/img_631 +2003/01/14/big/img_237 +2002/07/28/big/img_21 +2002/07/22/big/img_813 +2002/08/05/big/img_3563 +2003/01/17/big/img_620 +2002/07/19/big/img_523 +2002/07/30/big/img_904 +2002/08/29/big/img_18642 +2002/08/11/big/img_492 +2002/08/01/big/img_2130 +2002/07/25/big/img_618 +2002/08/17/big/img_305 +2003/01/16/big/img_520 +2002/07/26/big/img_495 +2002/08/17/big/img_164 +2002/08/03/big/img_440 +2002/07/24/big/img_441 +2002/08/06/big/img_2146 +2002/08/11/big/img_558 +2002/08/02/big/img_545 +2002/08/31/big/img_18090 +2003/01/01/big/img_136 +2002/07/25/big/img_1099 +2003/01/13/big/img_728 +2003/01/16/big/img_197 +2002/07/26/big/img_651 +2002/08/11/big/img_676 +2003/01/15/big/img_10 +2002/08/21/big/img_250 +2002/08/14/big/img_325 +2002/08/04/big/img_390 +2002/07/24/big/img_554 +2003/01/16/big/img_333 +2002/07/31/big/img_922 +2002/09/02/big/img_15586 +2003/01/16/big/img_184 +2002/07/22/big/img_766 +2002/07/21/big/img_608 +2002/08/07/big/img_1578 +2002/08/17/big/img_961 +2002/07/27/big/img_324 +2002/08/05/big/img_3765 +2002/08/23/big/img_462 +2003/01/16/big/img_382 +2002/08/27/big/img_19838 +2002/08/01/big/img_1505 +2002/08/21/big/img_662 +2002/08/14/big/img_605 +2002/08/19/big/img_816 +2002/07/29/big/img_136 +2002/08/20/big/img_719 +2002/08/06/big/img_2826 +2002/08/10/big/img_630 +2003/01/17/big/img_973 +2002/08/14/big/img_116 +2002/08/02/big/img_666 +2002/08/21/big/img_710 +2002/08/05/big/img_55 +2002/07/31/big/img_229 +2002/08/01/big/img_1549 +2002/07/23/big/img_432 +2002/07/21/big/img_430 +2002/08/21/big/img_549 +2002/08/08/big/img_985 +2002/07/20/big/img_610 +2002/07/23/big/img_978 +2002/08/23/big/img_219 +2002/07/25/big/img_175 +2003/01/15/big/img_230 +2002/08/23/big/img_385 +2002/07/31/big/img_879 +2002/08/12/big/img_495 +2002/08/22/big/img_499 +2002/08/30/big/img_18322 +2002/08/15/big/img_795 +2002/08/13/big/img_835 +2003/01/17/big/img_930 +2002/07/30/big/img_873 +2002/08/11/big/img_257 +2002/07/31/big/img_593 +2002/08/21/big/img_916 +2003/01/13/big/img_814 +2002/07/25/big/img_722 +2002/08/16/big/img_379 +2002/07/31/big/img_497 +2002/07/22/big/img_602 +2002/08/21/big/img_642 +2002/08/21/big/img_614 +2002/08/23/big/img_482 +2002/07/29/big/img_603 +2002/08/13/big/img_705 +2002/07/23/big/img_833 +2003/01/14/big/img_511 +2002/07/24/big/img_376 +2002/08/17/big/img_1030 +2002/08/05/big/img_3576 +2002/08/16/big/img_540 +2002/07/22/big/img_630 +2002/08/10/big/img_180 +2002/08/14/big/img_905 +2002/08/29/big/img_18777 +2002/08/22/big/img_693 +2003/01/16/big/img_933 +2002/08/20/big/img_555 +2002/08/15/big/img_549 +2003/01/14/big/img_830 +2003/01/16/big/img_64 +2002/08/27/big/img_19670 +2002/08/22/big/img_729 +2002/07/27/big/img_981 +2002/08/09/big/img_458 +2003/01/17/big/img_884 +2002/07/25/big/img_639 +2002/08/31/big/img_18008 +2002/08/22/big/img_249 +2002/08/17/big/img_971 +2002/08/04/big/img_308 +2002/07/28/big/img_362 +2002/08/12/big/img_142 +2002/08/26/big/img_61 +2002/08/14/big/img_422 +2002/07/19/big/img_607 +2003/01/15/big/img_717 +2002/08/01/big/img_1475 +2002/08/29/big/img_19061 +2003/01/01/big/img_346 +2002/07/20/big/img_315 +2003/01/15/big/img_756 +2002/08/15/big/img_879 +2002/08/08/big/img_615 +2003/01/13/big/img_431 +2002/08/05/big/img_3233 +2002/08/24/big/img_526 +2003/01/13/big/img_717 +2002/09/01/big/img_16408 +2002/07/22/big/img_217 +2002/07/31/big/img_960 +2002/08/21/big/img_610 +2002/08/05/big/img_3753 +2002/08/03/big/img_151 +2002/08/21/big/img_267 +2002/08/01/big/img_2175 +2002/08/04/big/img_556 +2002/08/21/big/img_527 +2002/09/02/big/img_15800 +2002/07/27/big/img_156 +2002/07/20/big/img_590 +2002/08/15/big/img_700 +2002/08/08/big/img_444 +2002/07/25/big/img_94 +2002/07/24/big/img_778 +2002/08/14/big/img_694 +2002/07/20/big/img_666 +2002/08/02/big/img_200 +2002/08/02/big/img_578 +2003/01/17/big/img_332 +2002/09/01/big/img_16352 +2002/08/27/big/img_19668 +2002/07/23/big/img_823 +2002/08/13/big/img_431 +2003/01/16/big/img_463 +2002/08/27/big/img_19711 +2002/08/23/big/img_154 +2002/07/31/big/img_360 +2002/08/23/big/img_555 +2002/08/10/big/img_561 +2003/01/14/big/img_550 +2002/08/07/big/img_1370 +2002/07/30/big/img_1184 +2002/08/01/big/img_1445 +2002/08/23/big/img_22 +2002/07/30/big/img_606 +2003/01/17/big/img_271 +2002/08/31/big/img_17316 +2002/08/16/big/img_973 +2002/07/26/big/img_77 +2002/07/20/big/img_788 +2002/08/06/big/img_2426 +2002/08/07/big/img_1498 +2002/08/16/big/img_358 +2002/08/06/big/img_2851 +2002/08/12/big/img_359 +2002/08/01/big/img_1521 +2002/08/02/big/img_709 +2002/08/20/big/img_935 +2002/08/12/big/img_188 +2002/08/24/big/img_411 +2002/08/22/big/img_680 +2002/08/06/big/img_2480 +2002/07/20/big/img_627 +2002/07/30/big/img_214 +2002/07/25/big/img_354 +2002/08/02/big/img_636 +2003/01/15/big/img_661 +2002/08/07/big/img_1327 +2002/08/01/big/img_2108 +2002/08/31/big/img_17919 +2002/08/29/big/img_18768 +2002/08/05/big/img_3840 +2002/07/26/big/img_242 +2003/01/14/big/img_451 +2002/08/20/big/img_923 +2002/08/27/big/img_19908 +2002/08/16/big/img_282 +2002/08/19/big/img_440 +2003/01/01/big/img_230 +2002/08/08/big/img_212 +2002/07/20/big/img_443 +2002/08/25/big/img_635 +2003/01/13/big/img_1169 +2002/07/26/big/img_998 +2002/08/15/big/img_995 +2002/08/06/big/img_3002 +2002/07/29/big/img_460 +2003/01/14/big/img_925 +2002/07/23/big/img_539 +2002/08/16/big/img_694 +2003/01/13/big/img_459 +2002/07/23/big/img_249 +2002/08/20/big/img_539 +2002/08/04/big/img_186 +2002/08/26/big/img_264 +2002/07/22/big/img_704 +2002/08/25/big/img_277 +2002/08/22/big/img_988 +2002/07/29/big/img_504 +2002/08/05/big/img_3600 +2002/08/30/big/img_18380 +2003/01/14/big/img_937 +2002/08/21/big/img_254 +2002/08/10/big/img_130 +2002/08/20/big/img_339 +2003/01/14/big/img_428 +2002/08/20/big/img_889 +2002/08/31/big/img_17637 +2002/07/26/big/img_644 +2002/09/01/big/img_16776 +2002/08/06/big/img_2239 +2002/08/06/big/img_2646 +2003/01/13/big/img_491 +2002/08/10/big/img_579 +2002/08/21/big/img_713 +2002/08/22/big/img_482 +2002/07/22/big/img_167 +2002/07/24/big/img_539 +2002/08/14/big/img_721 +2002/07/25/big/img_389 +2002/09/01/big/img_16591 +2002/08/13/big/img_543 +2003/01/14/big/img_432 +2002/08/09/big/img_287 +2002/07/26/big/img_126 +2002/08/23/big/img_412 +2002/08/15/big/img_1034 +2002/08/28/big/img_19485 +2002/07/31/big/img_236 +2002/07/30/big/img_523 +2002/07/19/big/img_141 +2003/01/17/big/img_957 +2002/08/04/big/img_81 +2002/07/25/big/img_206 +2002/08/15/big/img_716 +2002/08/13/big/img_403 +2002/08/15/big/img_685 +2002/07/26/big/img_884 +2002/07/19/big/img_499 +2002/07/23/big/img_772 +2002/07/27/big/img_752 +2003/01/14/big/img_493 +2002/08/25/big/img_664 +2002/07/31/big/img_334 +2002/08/26/big/img_678 +2002/09/01/big/img_16541 +2003/01/14/big/img_347 +2002/07/23/big/img_187 +2002/07/30/big/img_1163 +2002/08/05/big/img_35 +2002/08/22/big/img_944 +2002/08/07/big/img_1239 +2002/07/29/big/img_1215 +2002/08/03/big/img_312 +2002/08/05/big/img_3523 +2002/07/29/big/img_218 +2002/08/13/big/img_672 +2002/08/16/big/img_205 +2002/08/17/big/img_594 +2002/07/29/big/img_1411 +2002/07/30/big/img_942 +2003/01/16/big/img_312 +2002/08/08/big/img_312 +2002/07/25/big/img_15 +2002/08/09/big/img_839 +2002/08/01/big/img_2069 +2002/08/31/big/img_17512 +2002/08/01/big/img_3 +2002/07/31/big/img_320 +2003/01/15/big/img_1265 +2002/08/14/big/img_563 +2002/07/31/big/img_167 +2002/08/20/big/img_374 +2002/08/13/big/img_406 +2002/08/08/big/img_625 +2002/08/02/big/img_314 +2002/08/27/big/img_19964 +2002/09/01/big/img_16670 +2002/07/31/big/img_599 +2002/08/29/big/img_18906 +2002/07/24/big/img_373 +2002/07/26/big/img_513 +2002/09/02/big/img_15497 +2002/08/19/big/img_117 +2003/01/01/big/img_158 +2002/08/24/big/img_178 +2003/01/13/big/img_935 +2002/08/13/big/img_609 +2002/08/30/big/img_18341 +2002/08/25/big/img_674 +2003/01/13/big/img_209 +2002/08/13/big/img_258 +2002/08/05/big/img_3543 +2002/08/07/big/img_1970 +2002/08/06/big/img_3004 +2003/01/17/big/img_487 +2002/08/24/big/img_873 +2002/08/29/big/img_18730 +2002/08/09/big/img_375 +2003/01/16/big/img_751 +2002/08/02/big/img_603 +2002/08/19/big/img_325 +2002/09/01/big/img_16420 +2002/08/05/big/img_3633 +2002/08/21/big/img_516 +2002/07/19/big/img_501 +2002/07/26/big/img_688 +2002/07/24/big/img_256 +2002/07/25/big/img_438 +2002/07/31/big/img_1017 +2002/08/22/big/img_512 +2002/07/21/big/img_543 +2002/08/08/big/img_223 +2002/08/19/big/img_189 +2002/08/12/big/img_630 +2002/07/30/big/img_958 +2002/07/28/big/img_208 +2002/08/31/big/img_17691 +2002/07/22/big/img_542 +2002/07/19/big/img_741 +2002/07/19/big/img_158 +2002/08/15/big/img_399 +2002/08/01/big/img_2159 +2002/08/14/big/img_455 +2002/08/17/big/img_1011 +2002/08/26/big/img_744 +2002/08/12/big/img_624 +2003/01/17/big/img_821 +2002/08/16/big/img_980 +2002/07/28/big/img_281 +2002/07/25/big/img_171 +2002/08/03/big/img_116 +2002/07/22/big/img_467 +2002/07/31/big/img_750 +2002/07/26/big/img_435 +2002/07/19/big/img_822 +2002/08/13/big/img_626 +2002/08/11/big/img_344 +2002/08/02/big/img_473 +2002/09/01/big/img_16817 +2002/08/01/big/img_1275 +2002/08/28/big/img_19270 +2002/07/23/big/img_607 +2002/08/09/big/img_316 +2002/07/29/big/img_626 +2002/07/24/big/img_824 +2002/07/22/big/img_342 +2002/08/08/big/img_794 +2002/08/07/big/img_1209 +2002/07/19/big/img_18 +2002/08/25/big/img_634 +2002/07/24/big/img_730 +2003/01/17/big/img_356 +2002/07/23/big/img_305 +2002/07/30/big/img_453 +2003/01/13/big/img_972 +2002/08/06/big/img_2610 +2002/08/29/big/img_18920 +2002/07/31/big/img_123 +2002/07/26/big/img_979 +2002/08/24/big/img_635 +2002/08/05/big/img_3704 +2002/08/07/big/img_1358 +2002/07/22/big/img_306 +2002/08/13/big/img_619 +2002/08/02/big/img_366 diff --git a/data/__init__.py b/data/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ea50ebaf88d64e75f4960bc99b14f138a343e575 --- /dev/null +++ b/data/__init__.py @@ -0,0 +1,3 @@ +from .wider_face import WiderFaceDetection, detection_collate +from .data_augment import * +from .config import * diff --git a/data/__pycache__/__init__.cpython-39.pyc b/data/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b43c0981bd37e13c4bcee38582737c6a82a1744e Binary files /dev/null and b/data/__pycache__/__init__.cpython-39.pyc differ diff --git a/data/__pycache__/config.cpython-39.pyc b/data/__pycache__/config.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d24df830217345b8805f26cb9b0e3db155aed739 Binary files /dev/null and b/data/__pycache__/config.cpython-39.pyc differ diff --git a/data/__pycache__/data_augment.cpython-39.pyc b/data/__pycache__/data_augment.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e0d705477d363ee934ac888eec766e8406ec90bf Binary files /dev/null and b/data/__pycache__/data_augment.cpython-39.pyc differ diff --git a/data/__pycache__/wider_face.cpython-39.pyc b/data/__pycache__/wider_face.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4dfec18cf6f09449138130e1c61778d48e208af7 Binary files /dev/null and b/data/__pycache__/wider_face.cpython-39.pyc differ diff --git a/data/config.py b/data/config.py new file mode 100644 index 0000000000000000000000000000000000000000..591f34911aa10af4015bad988c2c56c980fda885 --- /dev/null +++ b/data/config.py @@ -0,0 +1,42 @@ +# config.py + +cfg_mnet = { + 'name': 'mobilenet0.25', + 'min_sizes': [[16, 32], [64, 128], [256, 512]], + 'steps': [8, 16, 32], + 'variance': [0.1, 0.2], + 'clip': False, + 'loc_weight': 2.0, + 'gpu_train': True, + 'batch_size': 32, + 'ngpu': 1, + 'epoch': 250, + 'decay1': 190, + 'decay2': 220, + 'image_size': 640, + 'pretrain': True, + 'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3}, + 'in_channel': 32, + 'out_channel': 64 +} + +cfg_re50 = { + 'name': 'Resnet50', + 'min_sizes': [[16, 32], [64, 128], [256, 512]], + 'steps': [8, 16, 32], + 'variance': [0.1, 0.2], + 'clip': False, + 'loc_weight': 2.0, + 'gpu_train': True, + 'batch_size': 24, + 'ngpu': 4, + 'epoch': 100, + 'decay1': 70, + 'decay2': 90, + 'image_size': 840, + 'pretrain': True, + 'return_layers': {'layer2': 1, 'layer3': 2, 'layer4': 3}, + 'in_channel': 256, + 'out_channel': 256 +} + diff --git a/data/data_augment.py b/data/data_augment.py new file mode 100644 index 0000000000000000000000000000000000000000..c1b52ae19bf8d9ac3fa256b68730ce1b556c6d6e --- /dev/null +++ b/data/data_augment.py @@ -0,0 +1,237 @@ +import cv2 +import numpy as np +import random +from utils.box_utils import matrix_iof + + +def _crop(image, boxes, labels, landm, img_dim): + height, width, _ = image.shape + pad_image_flag = True + + for _ in range(250): + """ + if random.uniform(0, 1) <= 0.2: + scale = 1.0 + else: + scale = random.uniform(0.3, 1.0) + """ + PRE_SCALES = [0.3, 0.45, 0.6, 0.8, 1.0] + scale = random.choice(PRE_SCALES) + short_side = min(width, height) + w = int(scale * short_side) + h = w + + if width == w: + l = 0 + else: + l = random.randrange(width - w) + if height == h: + t = 0 + else: + t = random.randrange(height - h) + roi = np.array((l, t, l + w, t + h)) + + value = matrix_iof(boxes, roi[np.newaxis]) + flag = (value >= 1) + if not flag.any(): + continue + + centers = (boxes[:, :2] + boxes[:, 2:]) / 2 + mask_a = np.logical_and(roi[:2] < centers, centers < roi[2:]).all(axis=1) + boxes_t = boxes[mask_a].copy() + labels_t = labels[mask_a].copy() + landms_t = landm[mask_a].copy() + landms_t = landms_t.reshape([-1, 5, 2]) + + if boxes_t.shape[0] == 0: + continue + + image_t = image[roi[1]:roi[3], roi[0]:roi[2]] + + boxes_t[:, :2] = np.maximum(boxes_t[:, :2], roi[:2]) + boxes_t[:, :2] -= roi[:2] + boxes_t[:, 2:] = np.minimum(boxes_t[:, 2:], roi[2:]) + boxes_t[:, 2:] -= roi[:2] + + # landm + landms_t[:, :, :2] = landms_t[:, :, :2] - roi[:2] + landms_t[:, :, :2] = np.maximum(landms_t[:, :, :2], np.array([0, 0])) + landms_t[:, :, :2] = np.minimum(landms_t[:, :, :2], roi[2:] - roi[:2]) + landms_t = landms_t.reshape([-1, 10]) + + + # make sure that the cropped image contains at least one face > 16 pixel at training image scale + b_w_t = (boxes_t[:, 2] - boxes_t[:, 0] + 1) / w * img_dim + b_h_t = (boxes_t[:, 3] - boxes_t[:, 1] + 1) / h * img_dim + mask_b = np.minimum(b_w_t, b_h_t) > 0.0 + boxes_t = boxes_t[mask_b] + labels_t = labels_t[mask_b] + landms_t = landms_t[mask_b] + + if boxes_t.shape[0] == 0: + continue + + pad_image_flag = False + + return image_t, boxes_t, labels_t, landms_t, pad_image_flag + return image, boxes, labels, landm, pad_image_flag + + +def _distort(image): + + def _convert(image, alpha=1, beta=0): + tmp = image.astype(float) * alpha + beta + tmp[tmp < 0] = 0 + tmp[tmp > 255] = 255 + image[:] = tmp + + image = image.copy() + + if random.randrange(2): + + #brightness distortion + if random.randrange(2): + _convert(image, beta=random.uniform(-32, 32)) + + #contrast distortion + if random.randrange(2): + _convert(image, alpha=random.uniform(0.5, 1.5)) + + image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + + #saturation distortion + if random.randrange(2): + _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5)) + + #hue distortion + if random.randrange(2): + tmp = image[:, :, 0].astype(int) + random.randint(-18, 18) + tmp %= 180 + image[:, :, 0] = tmp + + image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) + + else: + + #brightness distortion + if random.randrange(2): + _convert(image, beta=random.uniform(-32, 32)) + + image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) + + #saturation distortion + if random.randrange(2): + _convert(image[:, :, 1], alpha=random.uniform(0.5, 1.5)) + + #hue distortion + if random.randrange(2): + tmp = image[:, :, 0].astype(int) + random.randint(-18, 18) + tmp %= 180 + image[:, :, 0] = tmp + + image = cv2.cvtColor(image, cv2.COLOR_HSV2BGR) + + #contrast distortion + if random.randrange(2): + _convert(image, alpha=random.uniform(0.5, 1.5)) + + return image + + +def _expand(image, boxes, fill, p): + if random.randrange(2): + return image, boxes + + height, width, depth = image.shape + + scale = random.uniform(1, p) + w = int(scale * width) + h = int(scale * height) + + left = random.randint(0, w - width) + top = random.randint(0, h - height) + + boxes_t = boxes.copy() + boxes_t[:, :2] += (left, top) + boxes_t[:, 2:] += (left, top) + expand_image = np.empty( + (h, w, depth), + dtype=image.dtype) + expand_image[:, :] = fill + expand_image[top:top + height, left:left + width] = image + image = expand_image + + return image, boxes_t + + +def _mirror(image, boxes, landms): + _, width, _ = image.shape + if random.randrange(2): + image = image[:, ::-1] + boxes = boxes.copy() + boxes[:, 0::2] = width - boxes[:, 2::-2] + + # landm + landms = landms.copy() + landms = landms.reshape([-1, 5, 2]) + landms[:, :, 0] = width - landms[:, :, 0] + tmp = landms[:, 1, :].copy() + landms[:, 1, :] = landms[:, 0, :] + landms[:, 0, :] = tmp + tmp1 = landms[:, 4, :].copy() + landms[:, 4, :] = landms[:, 3, :] + landms[:, 3, :] = tmp1 + landms = landms.reshape([-1, 10]) + + return image, boxes, landms + + +def _pad_to_square(image, rgb_mean, pad_image_flag): + if not pad_image_flag: + return image + height, width, _ = image.shape + long_side = max(width, height) + image_t = np.empty((long_side, long_side, 3), dtype=image.dtype) + image_t[:, :] = rgb_mean + image_t[0:0 + height, 0:0 + width] = image + return image_t + + +def _resize_subtract_mean(image, insize, rgb_mean): + interp_methods = [cv2.INTER_LINEAR, cv2.INTER_CUBIC, cv2.INTER_AREA, cv2.INTER_NEAREST, cv2.INTER_LANCZOS4] + interp_method = interp_methods[random.randrange(5)] + image = cv2.resize(image, (insize, insize), interpolation=interp_method) + image = image.astype(np.float32) + image -= rgb_mean + return image.transpose(2, 0, 1) + + +class preproc(object): + + def __init__(self, img_dim, rgb_means): + self.img_dim = img_dim + self.rgb_means = rgb_means + + def __call__(self, image, targets): + assert targets.shape[0] > 0, "this image does not have gt" + + boxes = targets[:, :4].copy() + labels = targets[:, -1].copy() + landm = targets[:, 4:-1].copy() + + image_t, boxes_t, labels_t, landm_t, pad_image_flag = _crop(image, boxes, labels, landm, self.img_dim) + image_t = _distort(image_t) + image_t = _pad_to_square(image_t,self.rgb_means, pad_image_flag) + image_t, boxes_t, landm_t = _mirror(image_t, boxes_t, landm_t) + height, width, _ = image_t.shape + image_t = _resize_subtract_mean(image_t, self.img_dim, self.rgb_means) + boxes_t[:, 0::2] /= width + boxes_t[:, 1::2] /= height + + landm_t[:, 0::2] /= width + landm_t[:, 1::2] /= height + + labels_t = np.expand_dims(labels_t, 1) + targets_t = np.hstack((boxes_t, landm_t, labels_t)) + + return image_t, targets_t diff --git a/data/wider_face.py b/data/wider_face.py new file mode 100644 index 0000000000000000000000000000000000000000..22f56efdc221bd4162d22884669ba44a3d4de5cd --- /dev/null +++ b/data/wider_face.py @@ -0,0 +1,101 @@ +import os +import os.path +import sys +import torch +import torch.utils.data as data +import cv2 +import numpy as np + +class WiderFaceDetection(data.Dataset): + def __init__(self, txt_path, preproc=None): + self.preproc = preproc + self.imgs_path = [] + self.words = [] + f = open(txt_path,'r') + lines = f.readlines() + isFirst = True + labels = [] + for line in lines: + line = line.rstrip() + if line.startswith('#'): + if isFirst is True: + isFirst = False + else: + labels_copy = labels.copy() + self.words.append(labels_copy) + labels.clear() + path = line[2:] + path = txt_path.replace('label.txt','images/') + path + self.imgs_path.append(path) + else: + line = line.split(' ') + label = [float(x) for x in line] + labels.append(label) + + self.words.append(labels) + + def __len__(self): + return len(self.imgs_path) + + def __getitem__(self, index): + img = cv2.imread(self.imgs_path[index]) + height, width, _ = img.shape + + labels = self.words[index] + annotations = np.zeros((0, 15)) + if len(labels) == 0: + return annotations + for idx, label in enumerate(labels): + annotation = np.zeros((1, 15)) + # bbox + annotation[0, 0] = label[0] # x1 + annotation[0, 1] = label[1] # y1 + annotation[0, 2] = label[0] + label[2] # x2 + annotation[0, 3] = label[1] + label[3] # y2 + + # landmarks + annotation[0, 4] = label[4] # l0_x + annotation[0, 5] = label[5] # l0_y + annotation[0, 6] = label[7] # l1_x + annotation[0, 7] = label[8] # l1_y + annotation[0, 8] = label[10] # l2_x + annotation[0, 9] = label[11] # l2_y + annotation[0, 10] = label[13] # l3_x + annotation[0, 11] = label[14] # l3_y + annotation[0, 12] = label[16] # l4_x + annotation[0, 13] = label[17] # l4_y + if (annotation[0, 4]<0): + annotation[0, 14] = -1 + else: + annotation[0, 14] = 1 + + annotations = np.append(annotations, annotation, axis=0) + target = np.array(annotations) + if self.preproc is not None: + img, target = self.preproc(img, target) + + return torch.from_numpy(img), target + +def detection_collate(batch): + """Custom collate fn for dealing with batches of images that have a different + number of associated object annotations (bounding boxes). + + Arguments: + batch: (tuple) A tuple of tensor images and lists of annotations + + Return: + A tuple containing: + 1) (tensor) batch of images stacked on their 0 dim + 2) (list of tensors) annotations for a given image are stacked on 0 dim + """ + targets = [] + imgs = [] + for _, sample in enumerate(batch): + for _, tup in enumerate(sample): + if torch.is_tensor(tup): + imgs.append(tup) + elif isinstance(tup, type(np.empty(0))): + annos = torch.from_numpy(tup).float() + targets.append(annos) + + return (torch.stack(imgs, 0), targets) diff --git a/dataset/__init__.py b/dataset/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..27584c89c9379621bffb513aafafe2cc1bd41b8c --- /dev/null +++ b/dataset/__init__.py @@ -0,0 +1,17 @@ +from .abstract_dataset import AbstractDataset +from .faceforensics import FaceForensics +from .wild_deepfake import WildDeepfake +from .celeb_df import CelebDF +from .dfdc import DFDC + +LOADERS = { + "FaceForensics": FaceForensics, + "WildDeepfake": WildDeepfake, + "CelebDF": CelebDF, + "DFDC": DFDC, +} + + +def load_dataset(name="FaceForensics"): + print(f"Loading dataset: '{name}'...") + return LOADERS[name] diff --git a/dataset/abstract_dataset.py b/dataset/abstract_dataset.py new file mode 100644 index 0000000000000000000000000000000000000000..5b029f8c2b3d0a41c383abfd235ceb41e028e8fc --- /dev/null +++ b/dataset/abstract_dataset.py @@ -0,0 +1,41 @@ +import cv2 +import torch +import numpy as np +from torchvision.datasets import VisionDataset +import albumentations +from albumentations import Compose +from albumentations.pytorch.transforms import ToTensorV2 + + +class AbstractDataset(VisionDataset): + def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): + super(AbstractDataset, self).__init__(cfg['root'], transforms=transforms, + transform=transform, target_transform=target_transform) + # fix for re-production + np.random.seed(seed) + + self.images = list() + self.targets = list() + self.split = cfg['split'] + if self.transforms is None: + self.transforms = Compose( + [getattr(albumentations, _['name'])(**_['params']) for _ in cfg['transforms']] + + [ToTensorV2()] + ) + + def __len__(self): + return len(self.images) + + def __getitem__(self, index): + path = self.images[index] + tgt = self.targets[index] + return path, tgt + + def load_item(self, items): + images = list() + for item in items: + img = cv2.imread(item) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + image = self.transforms(image=img)['image'] + images.append(image) + return torch.stack(images, dim=0) diff --git a/dataset/celeb_df.py b/dataset/celeb_df.py new file mode 100644 index 0000000000000000000000000000000000000000..f5bd36653918e0da454c7bf5988561a2b9d885a8 --- /dev/null +++ b/dataset/celeb_df.py @@ -0,0 +1,126 @@ +import numpy as np +from glob import glob +from os import listdir +from os.path import join +from dataset import AbstractDataset + +SPLITS = ["train", "test"] + + +class CelebDF(AbstractDataset): + """ + Celeb-DF v2 Dataset proposed in "Celeb-DF: A Large-scale Challenging Dataset for DeepFake Forensics". + """ + + def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): + # pre-check + if cfg['split'] not in SPLITS: + raise ValueError(f"split should be one of {SPLITS}, but found {cfg['split']}.") + super(CelebDF, self).__init__(cfg, seed, transforms, transform, target_transform) + print(f"Loading data from 'Celeb-DF' of split '{cfg['split']}'" + f"\nPlease wait patiently...") + self.categories = ['original', 'fake'] + self.root = cfg['root'] + images_ids = self.__get_images_ids() + test_ids = self.__get_test_ids() + train_ids = [images_ids[0] - test_ids[0], + images_ids[1] - test_ids[1], + images_ids[2] - test_ids[2]] + self.images, self.targets = self.__get_images( + test_ids if cfg['split'] == "test" else train_ids, cfg['balance']) + assert len(self.images) == len(self.targets), "The number of images and targets not consistent." + print("Data from 'Celeb-DF' loaded.\n") + print(f"Dataset contains {len(self.images)} images.\n") + + def __get_images_ids(self): + youtube_real = listdir(join(self.root, 'YouTube-real', 'images')) + celeb_real = listdir(join(self.root, 'Celeb-real', 'images')) + celeb_fake = listdir(join(self.root, 'Celeb-synthesis', 'images')) + return set(youtube_real), set(celeb_real), set(celeb_fake) + + def __get_test_ids(self): + youtube_real = set() + celeb_real = set() + celeb_fake = set() + with open(join(self.root, "List_of_testing_videos.txt"), "r", encoding="utf-8") as f: + contents = f.readlines() + for line in contents: + name = line.split(" ")[-1] + number = name.split("/")[-1].split(".")[0] + if "YouTube-real" in name: + youtube_real.add(number) + elif "Celeb-real" in name: + celeb_real.add(number) + elif "Celeb-synthesis" in name: + celeb_fake.add(number) + else: + raise ValueError("'List_of_testing_videos.txt' file corrupted.") + return youtube_real, celeb_real, celeb_fake + + def __get_images(self, ids, balance=False): + real = list() + fake = list() + # YouTube-real + for _ in ids[0]: + real.extend(glob(join(self.root, 'YouTube-real', 'images', _, '*.png'))) + # Celeb-real + for _ in ids[1]: + real.extend(glob(join(self.root, 'Celeb-real', 'images', _, '*.png'))) + # Celeb-synthesis + for _ in ids[2]: + fake.extend(glob(join(self.root, 'Celeb-synthesis', 'images', _, '*.png'))) + print(f"Real: {len(real)}, Fake: {len(fake)}") + if balance: + fake = np.random.choice(fake, size=len(real), replace=False) + print(f"After Balance | Real: {len(real)}, Fake: {len(fake)}") + real_tgt = [0] * len(real) + fake_tgt = [1] * len(fake) + return [*real, *fake], [*real_tgt, *fake_tgt] + + +if __name__ == '__main__': + import yaml + + config_path = "../config/dataset/celeb_df.yml" + with open(config_path) as config_file: + config = yaml.load(config_file, Loader=yaml.FullLoader) + config = config["train_cfg"] + # config = config["test_cfg"] + + def run_dataset(): + dataset = CelebDF(config) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataset): + path, target = _ + print(f"path: {path}, target: {target}") + if i >= 9: + break + + + def run_dataloader(display_samples=False): + from torch.utils import data + import matplotlib.pyplot as plt + + dataset = CelebDF(config) + dataloader = data.DataLoader(dataset, batch_size=8, shuffle=True) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataloader): + path, targets = _ + image = dataloader.dataset.load_item(path) + print(f"image: {image.shape}, target: {targets}") + if display_samples: + plt.figure() + img = image[0].permute([1, 2, 0]).numpy() + plt.imshow(img) + # plt.savefig("./img_" + str(i) + ".png") + plt.show() + if i >= 9: + break + + + ########################### + # run the functions below # + ########################### + + # run_dataset() + run_dataloader(False) diff --git a/dataset/dfdc.py b/dataset/dfdc.py new file mode 100644 index 0000000000000000000000000000000000000000..098ede98fbe30ffb9afb66daed0416a4458dbd55 --- /dev/null +++ b/dataset/dfdc.py @@ -0,0 +1,124 @@ +import json +from glob import glob +from os.path import join +from dataset import AbstractDataset + +SPLIT = ["train", "val", "test"] +LABEL_MAP = {"REAL": 0, "FAKE": 1} + + +class DFDC(AbstractDataset): + """ + Deepfake Detection Challenge organized by Facebook + """ + + def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): + # pre-check + if cfg['split'] not in SPLIT: + raise ValueError(f"split should be one of {SPLIT}, but found {cfg['split']}.") + super(DFDC, self).__init__(cfg, seed, transforms, transform, target_transform) + print(f"Loading data from 'DFDC' of split '{cfg['split']}'" + f"\nPlease wait patiently...") + self.categories = ['original', 'fake'] + self.root = cfg['root'] + self.num_real = 0 + self.num_fake = 0 + if self.split == "test": + self.__load_test_data() + elif self.split == "train": + self.__load_train_data() + assert len(self.images) == len(self.targets), "Length of images and targets not the same!" + print(f"Data from 'DFDC' loaded.") + print(f"Real: {self.num_real}, Fake: {self.num_fake}.") + print(f"Dataset contains {len(self.images)} images\n") + + def __load_test_data(self): + label_path = join(self.root, "test", "labels.csv") + with open(label_path, encoding="utf-8") as file: + content = file.readlines() + for _ in content: + if ".mp4" in _: + key = _.split(".")[0] + label = _.split(",")[1].strip() + label = int(label) + imgs = glob(join(self.root, "test", "images", key, "*.png")) + num = len(imgs) + self.images.extend(imgs) + self.targets.extend([label] * num) + if label == 0: + self.num_real += num + elif label == 1: + self.num_fake += num + + def __load_train_data(self): + train_folds = glob(join(self.root, "dfdc_train_part_*")) + for fold in train_folds: + fold_imgs = list() + fold_tgts = list() + metadata_path = join(fold, "metadata.json") + try: + with open(metadata_path, "r", encoding="utf-8") as file: + metadata = json.loads(file.readline()) + for k, v in metadata.items(): + index = k.split(".")[0] + label = LABEL_MAP[v["label"]] + imgs = glob(join(fold, "images", index, "*.png")) + fold_imgs.extend(imgs) + fold_tgts.extend([label] * len(imgs)) + if label == 0: + self.num_real += len(imgs) + elif label == 1: + self.num_fake += len(imgs) + self.images.extend(fold_imgs) + self.targets.extend(fold_tgts) + except FileNotFoundError: + continue + + +if __name__ == '__main__': + import yaml + + config_path = "../config/dataset/dfdc.yml" + with open(config_path) as config_file: + config = yaml.load(config_file, Loader=yaml.FullLoader) + config = config["train_cfg"] + # config = config["test_cfg"] + + + def run_dataset(): + dataset = DFDC(config) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataset): + path, target = _ + print(f"path: {path}, target: {target}") + if i >= 9: + break + + + def run_dataloader(display_samples=False): + from torch.utils import data + import matplotlib.pyplot as plt + + dataset = DFDC(config) + dataloader = data.DataLoader(dataset, batch_size=8, shuffle=True) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataloader): + path, targets = _ + image = dataloader.dataset.load_item(path) + print(f"image: {image.shape}, target: {targets}") + if display_samples: + plt.figure() + img = image[0].permute([1, 2, 0]).numpy() + plt.imshow(img) + # plt.savefig("./img_" + str(i) + ".png") + plt.show() + if i >= 9: + break + + + ########################### + # run the functions below # + ########################### + + # run_dataset() + run_dataloader(False) diff --git a/dataset/faceforensics.py b/dataset/faceforensics.py new file mode 100644 index 0000000000000000000000000000000000000000..baf9fa43f250e6e585a6f1f771e17250373a5f0a --- /dev/null +++ b/dataset/faceforensics.py @@ -0,0 +1,107 @@ +import torch +import numpy as np +from os.path import join +from dataset import AbstractDataset + +METHOD = ['all', 'Deepfakes', 'Face2Face', 'FaceSwap', 'NeuralTextures'] +SPLIT = ['train', 'val', 'test'] +COMP2NAME = {'c0': 'raw', 'c23': 'c23', 'c40': 'c40'} +SOURCE_MAP = {'youtube': 2, 'Deepfakes': 3, 'Face2Face': 4, 'FaceSwap': 5, 'NeuralTextures': 6} + + +class FaceForensics(AbstractDataset): + """ + FaceForensics++ Dataset proposed in "FaceForensics++: Learning to Detect Manipulated Facial Images" + """ + + def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): + # pre-check + if cfg['split'] not in SPLIT: + raise ValueError(f"split should be one of {SPLIT}, " + f"but found {cfg['split']}.") + if cfg['method'] not in METHOD: + raise ValueError(f"method should be one of {METHOD}, " + f"but found {cfg['method']}.") + if cfg['compression'] not in COMP2NAME.keys(): + raise ValueError(f"compression should be one of {COMP2NAME.keys()}, " + f"but found {cfg['compression']}.") + super(FaceForensics, self).__init__( + cfg, seed, transforms, transform, target_transform) + print(f"Loading data from 'FF++ {cfg['method']}' of split '{cfg['split']}' " + f"and compression '{cfg['compression']}'\nPlease wait patiently...") + + self.categories = ['original', 'fake'] + # load the path of dataset images + indices = join(self.root, cfg['split'] + "_" + cfg['compression'] + ".pickle") + indices = torch.load(indices) + if cfg['method'] == "all": + # full dataset + self.images = [join(cfg['root'], _[0]) for _ in indices] + self.targets = [_[1] for _ in indices] + else: + # specific manipulated method + self.images = list() + self.targets = list() + nums = 0 + for _ in indices: + if cfg['method'] in _[0]: + self.images.append(join(cfg['root'], _[0])) + self.targets.append(_[1]) + nums = len(self.targets) + ori = list() + for _ in indices: + if "original_sequences" in _[0]: + ori.append(join(cfg['root'], _[0])) + choices = np.random.choice(ori, size=nums, replace=False) + self.images.extend(choices) + self.targets.extend([0] * nums) + print("Data from 'FF++' loaded.\n") + print(f"Dataset contains {len(self.images)} images.\n") + + +if __name__ == '__main__': + import yaml + + config_path = "../config/dataset/faceforensics.yml" + with open(config_path) as config_file: + config = yaml.load(config_file, Loader=yaml.FullLoader) + config = config["train_cfg"] + # config = config["test_cfg"] + + def run_dataset(): + dataset = FaceForensics(config) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataset): + path, target = _ + print(f"path: {path}, target: {target}") + if i >= 9: + break + + + def run_dataloader(display_samples=False): + from torch.utils import data + import matplotlib.pyplot as plt + + dataset = FaceForensics(config) + dataloader = data.DataLoader(dataset, batch_size=8, shuffle=True) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataloader): + path, targets = _ + image = dataloader.dataset.load_item(path) + print(f"image: {image.shape}, target: {targets}") + if display_samples: + plt.figure() + img = image[0].permute([1, 2, 0]).numpy() + plt.imshow(img) + # plt.savefig("./img_" + str(i) + ".png") + plt.show() + if i >= 9: + break + + + ########################### + # run the functions below # + ########################### + + # run_dataset() + run_dataloader(False) diff --git a/dataset/wild_deepfake.py b/dataset/wild_deepfake.py new file mode 100644 index 0000000000000000000000000000000000000000..5f39c1a81f04fadae39dd46d0aae9cefc023b6ab --- /dev/null +++ b/dataset/wild_deepfake.py @@ -0,0 +1,100 @@ +import torch +import numpy as np +from os.path import join +from dataset import AbstractDataset + +SPLITS = ["train", "test"] + + +class WildDeepfake(AbstractDataset): + """ + Wild Deepfake Dataset proposed in "WildDeepfake: A Challenging Real-World Dataset for Deepfake Detection" + """ + + def __init__(self, cfg, seed=2022, transforms=None, transform=None, target_transform=None): + # pre-check + if cfg['split'] not in SPLITS: + raise ValueError(f"split should be one of {SPLITS}, but found {cfg['split']}.") + super(WildDeepfake, self).__init__(cfg, seed, transforms, transform, target_transform) + print(f"Loading data from 'WildDeepfake' of split '{cfg['split']}'" + f"\nPlease wait patiently...") + self.categories = ['original', 'fake'] + self.root = cfg['root'] + self.num_train = cfg.get('num_image_train', None) + self.num_test = cfg.get('num_image_test', None) + self.images, self.targets = self.__get_images() + print(f"Data from 'WildDeepfake' loaded.") + print(f"Dataset contains {len(self.images)} images.\n") + + def __get_images(self): + if self.split == 'train': + num = self.num_train + elif self.split == 'test': + num = self.num_test + else: + num = None + real_images = torch.load(join(self.root, self.split, "real.pickle")) + if num is not None: + real_images = np.random.choice(real_images, num // 3, replace=False) + real_tgts = [torch.tensor(0)] * len(real_images) + print(f"real: {len(real_tgts)}") + fake_images = torch.load(join(self.root, self.split, "fake.pickle")) + if num is not None: + fake_images = np.random.choice(fake_images, num - num // 3, replace=False) + fake_tgts = [torch.tensor(1)] * len(fake_images) + print(f"fake: {len(fake_tgts)}") + return real_images + fake_images, real_tgts + fake_tgts + + def __getitem__(self, index): + path = join(self.root, self.split, self.images[index]) + tgt = self.targets[index] + return path, tgt + + +if __name__ == '__main__': + import yaml + + config_path = "../config/dataset/wilddeepfake.yml" + with open(config_path) as config_file: + config = yaml.load(config_file, Loader=yaml.FullLoader) + config = config["train_cfg"] + # config = config["test_cfg"] + + + def run_dataset(): + dataset = WildDeepfake(config) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataset): + path, target = _ + print(f"path: {path}, target: {target}") + if i >= 9: + break + + + def run_dataloader(display_samples=False): + from torch.utils import data + import matplotlib.pyplot as plt + + dataset = WildDeepfake(config) + dataloader = data.DataLoader(dataset, batch_size=8, shuffle=True) + print(f"dataset: {len(dataset)}") + for i, _ in enumerate(dataloader): + path, targets = _ + image = dataloader.dataset.load_item(path) + print(f"image: {image.shape}, target: {targets}") + if display_samples: + plt.figure() + img = image[0].permute([1, 2, 0]).numpy() + plt.imshow(img) + # plt.savefig("./img_" + str(i) + ".png") + plt.show() + if i >= 9: + break + + + ########################### + # run the functions below # + ########################### + + # run_dataset() + run_dataloader(False) diff --git a/extract_video.py b/extract_video.py new file mode 100644 index 0000000000000000000000000000000000000000..7aef73275e31b541c8c3728bd82775fd7aec196d --- /dev/null +++ b/extract_video.py @@ -0,0 +1,233 @@ +import os +from os.path import join +import argparse +import numpy as np +import cv2 +import torch +from tqdm import tqdm + +from data import cfg_mnet, cfg_re50 +from layers.functions.prior_box import PriorBox +from utils.nms.py_cpu_nms import py_cpu_nms +from models.retinaface import RetinaFace +from utils.box_utils import decode + +np.random.seed(0) + + +def check_keys(model, pretrained_state_dict): + ckpt_keys = set(pretrained_state_dict.keys()) + model_keys = set(model.state_dict().keys()) + used_pretrained_keys = model_keys & ckpt_keys + unused_pretrained_keys = ckpt_keys - model_keys + missing_keys = model_keys - ckpt_keys + print('Missing keys:{}'.format(len(missing_keys))) + print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys))) + print('Used keys:{}'.format(len(used_pretrained_keys))) + assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint' + return True + + +def remove_prefix(state_dict, prefix): + ''' Old style model is stored with all names of parameters sharing common prefix 'module.' ''' + print('remove prefix \'{}\''.format(prefix)) + + def f(x): return x.split(prefix, 1)[-1] if x.startswith(prefix) else x + + return {f(key): value for key, value in state_dict.items()} + + +def load_model(model, pretrained_path, load_to_cpu): + print('Loading pretrained model from {}'.format(pretrained_path)) + if load_to_cpu: + pretrained_dict = torch.load( + pretrained_path, map_location=lambda storage, loc: storage) + else: + pretrained_dict = torch.load( + pretrained_path, map_location=lambda storage, loc: storage.cuda(device)) + if "state_dict" in pretrained_dict.keys(): + pretrained_dict = remove_prefix( + pretrained_dict['state_dict'], 'module.') + else: + pretrained_dict = remove_prefix(pretrained_dict, 'module.') + check_keys(model, pretrained_dict) + model.load_state_dict(pretrained_dict, strict=False) + model.to(device) + return model + + +def detect(img_list, output_path, resize=1): + os.makedirs(output_path, exist_ok=True) + im_height, im_width, _ = img_list[0].shape + scale = torch.Tensor([im_width, im_height, im_width, im_height]) + img_x = torch.stack(img_list, dim=0).permute([0, 3, 1, 2]) + scale = scale.to(device) + + # batch size + batch_size = args.bs + # forward times + f_times = img_x.shape[0] // batch_size + if img_x.shape[0] % batch_size != 0: + f_times += 1 + locs_list = list() + confs_list = list() + for _ in range(f_times): + if _ != f_times - 1: + batch_img_x = img_x[_ * batch_size:(_ + 1) * batch_size] + else: + batch_img_x = img_x[_ * batch_size:] # last batch + batch_img_x = batch_img_x.to(device).float() + l, c, _ = net(batch_img_x) + locs_list.append(l) + confs_list.append(c) + locs = torch.cat(locs_list, dim=0) + confs = torch.cat(confs_list, dim=0) + + priorbox = PriorBox(cfg, image_size=(im_height, im_width)) + priors = priorbox.forward() + priors = priors.to(device) + prior_data = priors.data + + img_cpu = img_x.permute([0, 2, 3, 1]).cpu().numpy() + i = 0 + for img, loc, conf in zip(img_cpu, locs, confs): + boxes = decode(loc.data, prior_data, cfg['variance']) + boxes = boxes * scale / resize + boxes = boxes.cpu().numpy() + scores = conf.data.cpu().numpy()[:, 1] + + # ignore low scores + inds = np.where(scores > args.confidence_threshold)[0] + boxes = boxes[inds] + scores = scores[inds] + + # keep top-K before NMS + order = scores.argsort()[::-1][:args.top_k] + boxes = boxes[order] + scores = scores[order] + + # do NMS + dets = np.hstack((boxes, scores[:, np.newaxis])).astype( + np.float32, copy=False) + keep = py_cpu_nms(dets, args.nms_threshold) + # keep = nms(dets, args.nms_threshold,force_cpu=args.cpu) + dets = dets[keep, :] + + # keep top-K faster NMS + dets = dets[:args.keep_top_k, :] + + if len(dets) == 0: + continue + det = list(map(int, dets[0])) + x, y, size_bb_x, size_bb_y = get_boundingbox(det, img.shape[1], img.shape[0]) + cropped_img = img[y:y + size_bb_y, x:x + size_bb_x, :] + (104, 117, 123) + cv2.imwrite(join(output_path, '{:04d}.png'.format(i)), cropped_img) + i += 1 + pass + + +def extract_frames(data_path, interval=1): + """Method to extract frames""" + if data_path.split('.')[-1] == "mp4": + reader = cv2.VideoCapture(data_path) + frame_num = 0 + frames = list() + + while reader.isOpened(): + success, image = reader.read() + if not success: + break + cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = torch.tensor(image) - torch.tensor([104, 117, 123]) + if frame_num % interval == 0: + frames.append(image) + frame_num += 1 + if len(frames) > args.max_frames: + break + reader.release() + if len(frames) > args.max_frames: + samples = np.random.choice( + np.arange(0, len(frames)), size=args.max_frames, replace=False) + return [frames[_] for _ in samples] + return frames + else: + image = cv2.imread(data_path) + cv2.cvtColor(image, cv2.COLOR_BGR2RGB) + image = torch.tensor(image) - torch.tensor([104, 117, 123]) + return [image] + + +def get_boundingbox(bbox, width, height, scale=1.8, minsize=None): + x1 = bbox[0] + y1 = bbox[1] + x2 = bbox[2] + y2 = bbox[3] + size_bb_x = int((x2 - x1) * scale) + size_bb_y = int((y2 - y1) * scale) + if minsize: + if size_bb_x < minsize: + size_bb_x = minsize + if size_bb_y < minsize: + size_bb_y = minsize + center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2 + + # Check for out of bounds, x-y top left corner + x1 = max(int(center_x - size_bb_x // 2), 0) + y1 = max(int(center_y - size_bb_y // 2), 0) + # Check for too big bb size for given x, y + size_bb_x = min(width - x1, size_bb_x) + size_bb_y = min(height - y1, size_bb_y) + return x1, y1, size_bb_x, size_bb_y + + +def extract_method_videos(data_path, interval): + video = data_path.split('/')[-1] + result_path = '/'.join(data_path.split('/')[:-1]) + images_path = join(result_path, 'images') + + image_folder = video.split('.')[0] + try: + print(data_path) + image_list = extract_frames(data_path, interval) + detect(image_list, join(images_path, image_folder)) + except Exception as ex: + f = open("failure.txt", "a", encoding="utf-8") + f.writelines(image_folder + + f" Exception for {image_folder}: {ex}\n") + f.close() + + +if __name__ == '__main__': + p = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + p.add_argument('--data_path', '-p', type=str, help='path to the data') + p.add_argument('--confidence_threshold', default=0.05, + type=float, help='confidence threshold') + p.add_argument('--top_k', default=5, type=int, help='top_k') + p.add_argument('--nms_threshold', default=0.4, + type=float, help='nms threshold') + p.add_argument('--keep_top_k', default=1, type=int, help='keep_top_k') + p.add_argument('--bs', default=32, type=int, help='batch size') + p.add_argument('--frame_interval', '-fi', default=1, type=int, help='frame interval') + p.add_argument('--device', "-d", default="cuda:0", type=str, help='device') + p.add_argument('--max_frames', default=100, type=int, help='maximum frames per video') + + args = p.parse_args() + + torch.set_grad_enabled(False) + # use resnet-50 + cfg = cfg_re50 + pretrained_weights = './weights/Resnet50_Final.pth' + + torch.backends.cudnn.benchmark = True + device = torch.device(args.device) + print(device) + + # net and model + net = RetinaFace(cfg=cfg, phase='test') + net = load_model(net, pretrained_weights, args.device) + net.eval() + print('Finished loading model!') + + extract_method_videos(args.data_path, args.frame_interval) \ No newline at end of file diff --git a/inference.py b/inference.py new file mode 100644 index 0000000000000000000000000000000000000000..3e07450738e55d46cae9342f05600b850b41e4f4 --- /dev/null +++ b/inference.py @@ -0,0 +1,142 @@ +import cv2 +import torch +import random +import argparse +from glob import glob +from os.path import join +from model.network import Recce +from model.common import freeze_weights +from albumentations import Compose, Normalize, Resize +from albumentations.pytorch.transforms import ToTensorV2 + +import os + +os.environ['KMP_DUPLICATE_LIB_OK']='True' + +# fix random seed +seed = 0 +random.seed(seed) +torch.manual_seed(seed) +torch.cuda.manual_seed(seed) +torch.cuda.manual_seed_all(seed) + +parser = argparse.ArgumentParser(description="This code helps you use a trained model to " + "do inference.") +parser.add_argument("--weight", "-w", + type=str, + default=None, + help="Specify the path to the model weight (the state dict file). " + "Do not use this argument when '--bin' is set.") +parser.add_argument("--bin", "-b", + type=str, + default=None, + help="Specify the path to the model bin which ends up with '.bin' " + "(which is generated by the trainer of this project). " + "Do not use this argument when '--weight' is set.") +parser.add_argument("--image", "-i", + type=str, + default=None, + help="Specify the path to the input image. " + "Do not use this argument when '--image_folder' is set.") +parser.add_argument("--image_folder", "-f", + type=str, + default=None, + help="Specify the directory to evaluate all the images. " + "Do not use this argument when '--image' is set.") + +parser.add_argument('--device', '-d', type=str, + default="cpu", + help="Specify the device to load the model. Default: 'cpu'.") +parser.add_argument('--image_size', '-s', type=int, + default=299, + help="Specify the spatial size of the input image(s). Default: 299.") +parser.add_argument('--visualize', '-v', action="store_true", + default=False, help='Visualize images.') + + +def preprocess(file_path): + img = cv2.imread(file_path) + img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + compose = Compose([Resize(height=args.image_size, width=args.image_size), + Normalize(mean=[0.5] * 3, std=[0.5] * 3), + ToTensorV2()]) + img = compose(image=img)['image'].unsqueeze(0) + return img + + +def prepare_data(): + paths = list() + images = list() + # check the console arguments + if args.image and args.image_folder: + raise ValueError("Only one of '--image' or '--image_folder' can be set.") + elif args.image: + images.append(preprocess(args.image)) + paths.append(args.image) + elif args.image_folder: + image_folder = '.'.join(args.image_folder.split('.')[:-1]) + image_paths = glob(image_folder + "/*.jpg") + image_paths.extend(glob(image_folder + "/*.png")) + for _ in image_paths: + images.append(preprocess(_)) + paths.append(_) + else: + raise ValueError("Neither of '--image' nor '--image_folder' is set. Please specify either " + "one of these two arguments to load input image(s) properly.") + return paths, images + + +def inference(model, images, paths, device): + mean_pred = 0 + for img, pt in zip(images, paths): + img = img.to(device) + prediction = model(img) + prediction = torch.sigmoid(prediction).cpu() + fake = True if prediction >= 0.5 else False + + mean_pred += prediction.item() + + print(f"path: {pt} \t\t| fake probability: {prediction.item():.4f} \t| " + f"prediction: {'fake' if fake else 'real'}") + if args.visualize: + cvimg = cv2.imread(pt) + cvimg = cv2.putText(cvimg, f'p: {prediction.item():.2f}, ' + f"{'fake' if fake else 'real'}", + (5, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.5, + (0, 0, 255) if fake else (255, 0, 0), 2) + cv2.imshow("image", cvimg) + cv2.waitKey(0) + cv2.destroyWindow("image") + mean_pred = mean_pred / len(images) + return mean_pred + + +def main(): + print("Arguments:\n", args, end="\n\n") + # set device + device = torch.device(args.device) + # load model + model = eval("Recce")(num_classes=1) + # check the console arguments + if args.weight and args.bin: + raise ValueError("Only one of '--weight' or '--bin' can be set.") + elif args.weight: + weights = torch.load(args.weight, map_location="cpu") + elif args.bin: + weights = torch.load(args.bin, map_location="cpu")["model"] + else: + raise ValueError("Neither of '--weight' nor '--bin' is set. Please specify either " + "one of these two arguments to load model's weight properly.") + model.load_state_dict(weights) + model = model.to(device) + freeze_weights(model) + model.eval() + + paths, images = prepare_data() + print("Inference:") + mean_pred = inference(model, images=images, paths=paths, device=device) + print("Mean prediction:", mean_pred) + + +if __name__ == '__main__': + args = parser.parse_args() + main() diff --git a/layers/__init__.py b/layers/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..53a3f4b5160995d93bc7911e808b3045d74362c9 --- /dev/null +++ b/layers/__init__.py @@ -0,0 +1,2 @@ +from .functions import * +from .modules import * diff --git a/layers/__pycache__/__init__.cpython-39.pyc b/layers/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7c5b66356541488fcb95f7a1d2a706d5df392844 Binary files /dev/null and b/layers/__pycache__/__init__.cpython-39.pyc differ diff --git a/layers/functions/__pycache__/prior_box.cpython-39.pyc b/layers/functions/__pycache__/prior_box.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..267c1bd33d7cc88b913b3af15b5c0b349257923c Binary files /dev/null and b/layers/functions/__pycache__/prior_box.cpython-39.pyc differ diff --git a/layers/functions/prior_box.py b/layers/functions/prior_box.py new file mode 100644 index 0000000000000000000000000000000000000000..80c7f858371ed71f39ed609eb44b423d8693bf61 --- /dev/null +++ b/layers/functions/prior_box.py @@ -0,0 +1,34 @@ +import torch +from itertools import product as product +import numpy as np +from math import ceil + + +class PriorBox(object): + def __init__(self, cfg, image_size=None, phase='train'): + super(PriorBox, self).__init__() + self.min_sizes = cfg['min_sizes'] + self.steps = cfg['steps'] + self.clip = cfg['clip'] + self.image_size = image_size + self.feature_maps = [[ceil(self.image_size[0]/step), ceil(self.image_size[1]/step)] for step in self.steps] + self.name = "s" + + def forward(self): + anchors = [] + for k, f in enumerate(self.feature_maps): + min_sizes = self.min_sizes[k] + for i, j in product(range(f[0]), range(f[1])): + for min_size in min_sizes: + s_kx = min_size / self.image_size[1] + s_ky = min_size / self.image_size[0] + dense_cx = [x * self.steps[k] / self.image_size[1] for x in [j + 0.5]] + dense_cy = [y * self.steps[k] / self.image_size[0] for y in [i + 0.5]] + for cy, cx in product(dense_cy, dense_cx): + anchors += [cx, cy, s_kx, s_ky] + + # back to torch land + output = torch.Tensor(anchors).view(-1, 4) + if self.clip: + output.clamp_(max=1, min=0) + return output diff --git a/layers/modules/__init__.py b/layers/modules/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..cf24bddbf283f233d0b93fc074a2bac2f5c044a9 --- /dev/null +++ b/layers/modules/__init__.py @@ -0,0 +1,3 @@ +from .multibox_loss import MultiBoxLoss + +__all__ = ['MultiBoxLoss'] diff --git a/layers/modules/__pycache__/__init__.cpython-39.pyc b/layers/modules/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f43c634cb0c4049aed14534e84053c1e8e5f5fd4 Binary files /dev/null and b/layers/modules/__pycache__/__init__.cpython-39.pyc differ diff --git a/layers/modules/__pycache__/multibox_loss.cpython-39.pyc b/layers/modules/__pycache__/multibox_loss.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b772caf2bc91d2b7b235c422211519416c78a80 Binary files /dev/null and b/layers/modules/__pycache__/multibox_loss.cpython-39.pyc differ diff --git a/layers/modules/multibox_loss.py b/layers/modules/multibox_loss.py new file mode 100644 index 0000000000000000000000000000000000000000..096620480eba59e9d893c1940899f7e3d6736cae --- /dev/null +++ b/layers/modules/multibox_loss.py @@ -0,0 +1,125 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +from torch.autograd import Variable +from utils.box_utils import match, log_sum_exp +from data import cfg_mnet +GPU = cfg_mnet['gpu_train'] + +class MultiBoxLoss(nn.Module): + """SSD Weighted Loss Function + Compute Targets: + 1) Produce Confidence Target Indices by matching ground truth boxes + with (default) 'priorboxes' that have jaccard index > threshold parameter + (default threshold: 0.5). + 2) Produce localization target by 'encoding' variance into offsets of ground + truth boxes and their matched 'priorboxes'. + 3) Hard negative mining to filter the excessive number of negative examples + that comes with using a large number of default bounding boxes. + (default negative:positive ratio 3:1) + Objective Loss: + L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N + Where, Lconf is the CrossEntropy Loss and Lloc is the SmoothL1 Loss + weighted by α which is set to 1 by cross val. + Args: + c: class confidences, + l: predicted boxes, + g: ground truth boxes + N: number of matched default boxes + See: https://arxiv.org/pdf/1512.02325.pdf for more details. + """ + + def __init__(self, num_classes, overlap_thresh, prior_for_matching, bkg_label, neg_mining, neg_pos, neg_overlap, encode_target): + super(MultiBoxLoss, self).__init__() + self.num_classes = num_classes + self.threshold = overlap_thresh + self.background_label = bkg_label + self.encode_target = encode_target + self.use_prior_for_matching = prior_for_matching + self.do_neg_mining = neg_mining + self.negpos_ratio = neg_pos + self.neg_overlap = neg_overlap + self.variance = [0.1, 0.2] + + def forward(self, predictions, priors, targets): + """Multibox Loss + Args: + predictions (tuple): A tuple containing loc preds, conf preds, + and prior boxes from SSD net. + conf shape: torch.size(batch_size,num_priors,num_classes) + loc shape: torch.size(batch_size,num_priors,4) + priors shape: torch.size(num_priors,4) + + ground_truth (tensor): Ground truth boxes and labels for a batch, + shape: [batch_size,num_objs,5] (last idx is the label). + """ + + loc_data, conf_data, landm_data = predictions + priors = priors + num = loc_data.size(0) + num_priors = (priors.size(0)) + + # match priors (default boxes) and ground truth boxes + loc_t = torch.Tensor(num, num_priors, 4) + landm_t = torch.Tensor(num, num_priors, 10) + conf_t = torch.LongTensor(num, num_priors) + for idx in range(num): + truths = targets[idx][:, :4].data + labels = targets[idx][:, -1].data + landms = targets[idx][:, 4:14].data + defaults = priors.data + match(self.threshold, truths, defaults, self.variance, labels, landms, loc_t, conf_t, landm_t, idx) + if GPU: + loc_t = loc_t.cuda() + conf_t = conf_t.cuda() + landm_t = landm_t.cuda() + + zeros = torch.tensor(0).cuda() + # landm Loss (Smooth L1) + # Shape: [batch,num_priors,10] + pos1 = conf_t > zeros + num_pos_landm = pos1.long().sum(1, keepdim=True) + N1 = max(num_pos_landm.data.sum().float(), 1) + pos_idx1 = pos1.unsqueeze(pos1.dim()).expand_as(landm_data) + landm_p = landm_data[pos_idx1].view(-1, 10) + landm_t = landm_t[pos_idx1].view(-1, 10) + loss_landm = F.smooth_l1_loss(landm_p, landm_t, reduction='sum') + + + pos = conf_t != zeros + conf_t[pos] = 1 + + # Localization Loss (Smooth L1) + # Shape: [batch,num_priors,4] + pos_idx = pos.unsqueeze(pos.dim()).expand_as(loc_data) + loc_p = loc_data[pos_idx].view(-1, 4) + loc_t = loc_t[pos_idx].view(-1, 4) + loss_l = F.smooth_l1_loss(loc_p, loc_t, reduction='sum') + + # Compute max conf across batch for hard negative mining + batch_conf = conf_data.view(-1, self.num_classes) + loss_c = log_sum_exp(batch_conf) - batch_conf.gather(1, conf_t.view(-1, 1)) + + # Hard Negative Mining + loss_c[pos.view(-1, 1)] = 0 # filter out pos boxes for now + loss_c = loss_c.view(num, -1) + _, loss_idx = loss_c.sort(1, descending=True) + _, idx_rank = loss_idx.sort(1) + num_pos = pos.long().sum(1, keepdim=True) + num_neg = torch.clamp(self.negpos_ratio*num_pos, max=pos.size(1)-1) + neg = idx_rank < num_neg.expand_as(idx_rank) + + # Confidence Loss Including Positive and Negative Examples + pos_idx = pos.unsqueeze(2).expand_as(conf_data) + neg_idx = neg.unsqueeze(2).expand_as(conf_data) + conf_p = conf_data[(pos_idx+neg_idx).gt(0)].view(-1,self.num_classes) + targets_weighted = conf_t[(pos+neg).gt(0)] + loss_c = F.cross_entropy(conf_p, targets_weighted, reduction='sum') + + # Sum of losses: L(x,c,l,g) = (Lconf(x, c) + αLloc(x,l,g)) / N + N = max(num_pos.data.sum().float(), 1) + loss_l /= N + loss_c /= N + loss_landm /= N1 + + return loss_l, loss_c, loss_landm diff --git a/loss/__init__.py b/loss/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..fb8d2007cb4a3f852c7bde93fe84b0e162408d71 --- /dev/null +++ b/loss/__init__.py @@ -0,0 +1,12 @@ +import torch.nn as nn + + +def get_loss(name="cross_entropy", device="cuda:0"): + print(f"Using loss: '{LOSSES[name]}'") + return LOSSES[name].to(device) + + +LOSSES = { + "binary_ce": nn.BCEWithLogitsLoss(), + "cross_entropy": nn.CrossEntropyLoss() +} diff --git a/model/__init__.py b/model/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c8028d946b3ef30ba6b9ade62b503c3455c49ed6 --- /dev/null +++ b/model/__init__.py @@ -0,0 +1,12 @@ +from .network import * +from .common import * + +MODELS = { + "Recce": Recce +} + + +def load_model(name="Recce"): + assert name in MODELS.keys(), f"Model name can only be one of {MODELS.keys()}." + print(f"Using model: '{name}'") + return MODELS[name] diff --git a/model/__pycache__/__init__.cpython-39.pyc b/model/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..970960d4f678887fdae6d1188ff71a996ee470a9 Binary files /dev/null and b/model/__pycache__/__init__.cpython-39.pyc differ diff --git a/model/__pycache__/common.cpython-39.pyc b/model/__pycache__/common.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4202286a3984904f401717e55b7114939256adaf Binary files /dev/null and b/model/__pycache__/common.cpython-39.pyc differ diff --git a/model/common.py b/model/common.py new file mode 100644 index 0000000000000000000000000000000000000000..1c8a1b962ac277a2164d4387d60083632ce9096b --- /dev/null +++ b/model/common.py @@ -0,0 +1,200 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def freeze_weights(module): + for param in module.parameters(): + param.requires_grad = False + + +def l1_regularize(module): + reg_loss = 0. + for key, param in module.reg_params.items(): + if "weight" in key and param.requires_grad: + reg_loss += torch.sum(torch.abs(param)) + return reg_loss + + +class SeparableConv2d(nn.Module): + def __init__(self, in_channels, out_channels, kernel_size=1, stride=1, padding=0, dilation=1, bias=False): + super(SeparableConv2d, self).__init__() + + self.conv1 = nn.Conv2d(in_channels, in_channels, kernel_size, stride, padding, dilation, + groups=in_channels, bias=bias) + self.pointwise = nn.Conv2d(in_channels, out_channels, 1, 1, 0, 1, 1, bias=bias) + + def forward(self, x): + x = self.conv1(x) + x = self.pointwise(x) + return x + + +class Block(nn.Module): + def __init__(self, in_channels, out_channels, reps, strides=1, + start_with_relu=True, grow_first=True, with_bn=True): + super(Block, self).__init__() + + self.with_bn = with_bn + + if out_channels != in_channels or strides != 1: + self.skip = nn.Conv2d(in_channels, out_channels, 1, stride=strides, bias=False) + if with_bn: + self.skipbn = nn.BatchNorm2d(out_channels) + else: + self.skip = None + + rep = [] + for i in range(reps): + if grow_first: + inc = in_channels if i == 0 else out_channels + outc = out_channels + else: + inc = in_channels + outc = in_channels if i < (reps - 1) else out_channels + rep.append(nn.ReLU(inplace=True)) + rep.append(SeparableConv2d(inc, outc, 3, stride=1, padding=1)) + if with_bn: + rep.append(nn.BatchNorm2d(outc)) + + if not start_with_relu: + rep = rep[1:] + else: + rep[0] = nn.ReLU(inplace=False) + + if strides != 1: + rep.append(nn.MaxPool2d(3, strides, 1)) + self.rep = nn.Sequential(*rep) + + def forward(self, inp): + x = self.rep(inp) + + if self.skip is not None: + skip = self.skip(inp) + if self.with_bn: + skip = self.skipbn(skip) + else: + skip = inp + + x += skip + return x + + +class GraphReasoning(nn.Module): + """ Graph Reasoning Module for information aggregation. """ + + def __init__(self, va_in, va_out, vb_in, vb_out, vc_in, vc_out, spatial_ratio, drop_rate): + super(GraphReasoning, self).__init__() + self.ratio = spatial_ratio + self.va_embedding = nn.Sequential( + nn.Conv2d(va_in, va_out, 1, bias=False), + nn.ReLU(True), + nn.Conv2d(va_out, va_out, 1, bias=False), + ) + self.va_gated_b = nn.Sequential( + nn.Conv2d(va_in, va_out, 1, bias=False), + nn.Sigmoid() + ) + self.va_gated_c = nn.Sequential( + nn.Conv2d(va_in, va_out, 1, bias=False), + nn.Sigmoid() + ) + self.vb_embedding = nn.Sequential( + nn.Linear(vb_in, vb_out, bias=False), + nn.ReLU(True), + nn.Linear(vb_out, vb_out, bias=False), + ) + self.vc_embedding = nn.Sequential( + nn.Linear(vc_in, vc_out, bias=False), + nn.ReLU(True), + nn.Linear(vc_out, vc_out, bias=False), + ) + self.unfold_b = nn.Unfold(kernel_size=spatial_ratio[0], stride=spatial_ratio[0]) + self.unfold_c = nn.Unfold(kernel_size=spatial_ratio[1], stride=spatial_ratio[1]) + self.reweight_ab = nn.Sequential( + nn.Linear(va_out + vb_out, 1, bias=False), + nn.ReLU(True), + nn.Softmax(dim=1) + ) + self.reweight_ac = nn.Sequential( + nn.Linear(va_out + vc_out, 1, bias=False), + nn.ReLU(True), + nn.Softmax(dim=1) + ) + self.reproject = nn.Sequential( + nn.Conv2d(va_out + vb_out + vc_out, va_in, kernel_size=1, bias=False), + nn.ReLU(True), + nn.Conv2d(va_in, va_in, kernel_size=1, bias=False), + nn.Dropout(drop_rate) if drop_rate is not None else nn.Identity(), + ) + + def forward(self, vert_a, vert_b, vert_c): + emb_vert_a = self.va_embedding(vert_a) + emb_vert_a = emb_vert_a.reshape([emb_vert_a.shape[0], emb_vert_a.shape[1], -1]) + + gate_vert_b = 1 - self.va_gated_b(vert_a) + gate_vert_b = gate_vert_b.reshape(*emb_vert_a.shape) + gate_vert_c = 1 - self.va_gated_c(vert_a) + gate_vert_c = gate_vert_c.reshape(*emb_vert_a.shape) + + vert_b = self.unfold_b(vert_b).reshape( + [vert_b.shape[0], vert_b.shape[1], self.ratio[0] * self.ratio[0], -1]) + vert_b = vert_b.permute([0, 2, 3, 1]) + emb_vert_b = self.vb_embedding(vert_b) + + vert_c = self.unfold_c(vert_c).reshape( + [vert_c.shape[0], vert_c.shape[1], self.ratio[1] * self.ratio[1], -1]) + vert_c = vert_c.permute([0, 2, 3, 1]) + emb_vert_c = self.vc_embedding(vert_c) + + agg_vb = list() + agg_vc = list() + for j in range(emb_vert_a.shape[-1]): + # ab propagating + emb_v_a = torch.stack([emb_vert_a[:, :, j]] * (self.ratio[0] ** 2), dim=1) + emb_v_b = emb_vert_b[:, :, j, :] + emb_v_ab = torch.cat([emb_v_a, emb_v_b], dim=-1) + w = self.reweight_ab(emb_v_ab) + agg_vb.append(torch.bmm(emb_v_b.transpose(1, 2), w).squeeze() * gate_vert_b[:, :, j]) + + # ac propagating + emb_v_a = torch.stack([emb_vert_a[:, :, j]] * (self.ratio[1] ** 2), dim=1) + emb_v_c = emb_vert_c[:, :, j, :] + emb_v_ac = torch.cat([emb_v_a, emb_v_c], dim=-1) + w = self.reweight_ac(emb_v_ac) + agg_vc.append(torch.bmm(emb_v_c.transpose(1, 2), w).squeeze() * gate_vert_c[:, :, j]) + + agg_vert_b = torch.stack(agg_vb, dim=-1) + agg_vert_c = torch.stack(agg_vc, dim=-1) + agg_vert_bc = torch.cat([agg_vert_b, agg_vert_c], dim=1) + agg_vert_abc = torch.cat([agg_vert_bc, emb_vert_a], dim=1) + agg_vert_abc = torch.sigmoid(agg_vert_abc) + agg_vert_abc = agg_vert_abc.reshape(vert_a.shape[0], -1, vert_a.shape[2], vert_a.shape[3]) + return self.reproject(agg_vert_abc) + + +class GuidedAttention(nn.Module): + """ Reconstruction Guided Attention. """ + + def __init__(self, depth=728, drop_rate=0.2): + super(GuidedAttention, self).__init__() + self.depth = depth + self.gated = nn.Sequential( + nn.Conv2d(3, 3, kernel_size=3, stride=1, padding=1, bias=False), + nn.ReLU(True), + nn.Conv2d(3, 1, 1, bias=False), + nn.Sigmoid() + ) + self.h = nn.Sequential( + nn.Conv2d(depth, depth, 1, 1, bias=False), + nn.BatchNorm2d(depth), + nn.ReLU(True), + ) + self.dropout = nn.Dropout(drop_rate) + + def forward(self, x, pred_x, embedding): + residual_full = torch.abs(x - pred_x) + residual_x = F.interpolate(residual_full, size=embedding.shape[-2:], + mode='bilinear', align_corners=True) + res_map = self.gated(residual_x) + return res_map * self.h(embedding) + self.dropout(embedding) diff --git a/model/network/Recce.py b/model/network/Recce.py new file mode 100644 index 0000000000000000000000000000000000000000..eb647cfc245542108bf4de3450ee3db05c55fb6d --- /dev/null +++ b/model/network/Recce.py @@ -0,0 +1,133 @@ +from functools import partial +from timm.models import xception +from model.common import SeparableConv2d, Block +from model.common import GuidedAttention, GraphReasoning + +import torch +import torch.nn as nn +import torch.nn.functional as F + +encoder_params = { + "xception": { + "features": 2048, + "init_op": partial(xception, pretrained=True) + } +} + + +class Recce(nn.Module): + """ End-to-End Reconstruction-Classification Learning for Face Forgery Detection """ + + def __init__(self, num_classes, drop_rate=0.2): + super(Recce, self).__init__() + self.name = "xception" + self.loss_inputs = dict() + self.encoder = encoder_params[self.name]["init_op"]() + self.global_pool = nn.AdaptiveAvgPool2d((1, 1)) + self.dropout = nn.Dropout(drop_rate) + self.fc = nn.Linear(encoder_params[self.name]["features"], num_classes) + + self.attention = GuidedAttention(depth=728, drop_rate=drop_rate) + self.reasoning = GraphReasoning(728, 256, 256, 256, 128, 256, [2, 4], drop_rate) + + self.decoder1 = nn.Sequential( + nn.UpsamplingNearest2d(scale_factor=2), + SeparableConv2d(728, 256, 3, 1, 1, bias=False), + nn.BatchNorm2d(256), + nn.ReLU(inplace=True) + ) + self.decoder2 = Block(256, 256, 3, 1) + self.decoder3 = nn.Sequential( + nn.UpsamplingNearest2d(scale_factor=2), + SeparableConv2d(256, 128, 3, 1, 1, bias=False), + nn.BatchNorm2d(128), + nn.ReLU(inplace=True) + ) + self.decoder4 = Block(128, 128, 3, 1) + self.decoder5 = nn.Sequential( + nn.UpsamplingNearest2d(scale_factor=2), + SeparableConv2d(128, 64, 3, 1, 1, bias=False), + nn.BatchNorm2d(64), + nn.ReLU(inplace=True) + ) + self.decoder6 = nn.Sequential( + nn.Conv2d(64, 3, 1, 1, bias=False), + nn.Tanh() + ) + + def norm_n_corr(self, x): + norm_embed = F.normalize(self.global_pool(x), p=2, dim=1) + corr = (torch.matmul(norm_embed.squeeze(), norm_embed.squeeze().T) + 1.) / 2. + return norm_embed, corr + + @staticmethod + def add_white_noise(tensor, mean=0., std=1e-6): + rand = torch.rand([tensor.shape[0], 1, 1, 1]) + rand = torch.where(rand > 0.5, 1., 0.).to(tensor.device) + white_noise = torch.normal(mean, std, size=tensor.shape, device=tensor.device) + noise_t = tensor + white_noise * rand + noise_t = torch.clip(noise_t, -1., 1.) + return noise_t + + def forward(self, x): + # clear the loss inputs + self.loss_inputs = dict(recons=[], contra=[]) + noise_x = self.add_white_noise(x) if self.training else x + out = self.encoder.conv1(noise_x) + out = self.encoder.bn1(out) + out = self.encoder.act1(out) + out = self.encoder.conv2(out) + out = self.encoder.bn2(out) + out = self.encoder.act2(out) + out = self.encoder.block1(out) + out = self.encoder.block2(out) + out = self.encoder.block3(out) + embedding = self.encoder.block4(out) + + norm_embed, corr = self.norm_n_corr(embedding) + self.loss_inputs['contra'].append(corr) + + out = self.dropout(embedding) + out = self.decoder1(out) + out_d2 = self.decoder2(out) + + norm_embed, corr = self.norm_n_corr(out_d2) + self.loss_inputs['contra'].append(corr) + + out = self.decoder3(out_d2) + out_d4 = self.decoder4(out) + + norm_embed, corr = self.norm_n_corr(out_d4) + self.loss_inputs['contra'].append(corr) + + out = self.decoder5(out_d4) + pred = self.decoder6(out) + + recons_x = F.interpolate(pred, size=x.shape[-2:], mode='bilinear', align_corners=True) + self.loss_inputs['recons'].append(recons_x) + + embedding = self.encoder.block5(embedding) + embedding = self.encoder.block6(embedding) + embedding = self.encoder.block7(embedding) + + fusion = self.reasoning(embedding, out_d2, out_d4) + embedding + + embedding = self.encoder.block8(fusion) + img_att = self.attention(x, recons_x, embedding) + + embedding = self.encoder.block9(img_att) + embedding = self.encoder.block10(embedding) + embedding = self.encoder.block11(embedding) + embedding = self.encoder.block12(embedding) + + embedding = self.encoder.conv3(embedding) + embedding = self.encoder.bn3(embedding) + embedding = self.encoder.act3(embedding) + embedding = self.encoder.conv4(embedding) + embedding = self.encoder.bn4(embedding) + embedding = self.encoder.act4(embedding) + + embedding = self.global_pool(embedding).squeeze() + + out = self.dropout(embedding) + return self.fc(out) diff --git a/model/network/__init__.py b/model/network/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e552eb84c758d2b1ac6542725d7faed4d1fca3bd --- /dev/null +++ b/model/network/__init__.py @@ -0,0 +1 @@ +from .Recce import Recce diff --git a/model/network/__pycache__/Recce.cpython-39.pyc b/model/network/__pycache__/Recce.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7cb507ddbfcf27e0927c91771115f94ce3e9ed3e Binary files /dev/null and b/model/network/__pycache__/Recce.cpython-39.pyc differ diff --git a/model/network/__pycache__/__init__.cpython-39.pyc b/model/network/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ae6a546ed8337565048012e21a75baf57f542e64 Binary files /dev/null and b/model/network/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/__init__.py b/models/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/models/__pycache__/__init__.cpython-39.pyc b/models/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..666634bbf9512c0d1de559e4015c236aaa730a9a Binary files /dev/null and b/models/__pycache__/__init__.cpython-39.pyc differ diff --git a/models/__pycache__/net.cpython-39.pyc b/models/__pycache__/net.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0bd11a22566718d4a2c414a4a3b38fe44c0fa5cf Binary files /dev/null and b/models/__pycache__/net.cpython-39.pyc differ diff --git a/models/__pycache__/retinaface.cpython-39.pyc b/models/__pycache__/retinaface.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..533ce168cb9f7ce83315420bdbaa868910344789 Binary files /dev/null and b/models/__pycache__/retinaface.cpython-39.pyc differ diff --git a/models/net.py b/models/net.py new file mode 100644 index 0000000000000000000000000000000000000000..beb6040b24258f8b96020c1c9fc2610819718017 --- /dev/null +++ b/models/net.py @@ -0,0 +1,137 @@ +import time +import torch +import torch.nn as nn +import torchvision.models._utils as _utils +import torchvision.models as models +import torch.nn.functional as F +from torch.autograd import Variable + +def conv_bn(inp, oup, stride = 1, leaky = 0): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope=leaky, inplace=True) + ) + +def conv_bn_no_relu(inp, oup, stride): + return nn.Sequential( + nn.Conv2d(inp, oup, 3, stride, 1, bias=False), + nn.BatchNorm2d(oup), + ) + +def conv_bn1X1(inp, oup, stride, leaky=0): + return nn.Sequential( + nn.Conv2d(inp, oup, 1, stride, padding=0, bias=False), + nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope=leaky, inplace=True) + ) + +def conv_dw(inp, oup, stride, leaky=0.1): + return nn.Sequential( + nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False), + nn.BatchNorm2d(inp), + nn.LeakyReLU(negative_slope= leaky,inplace=True), + + nn.Conv2d(inp, oup, 1, 1, 0, bias=False), + nn.BatchNorm2d(oup), + nn.LeakyReLU(negative_slope= leaky,inplace=True), + ) + +class SSH(nn.Module): + def __init__(self, in_channel, out_channel): + super(SSH, self).__init__() + assert out_channel % 4 == 0 + leaky = 0 + if (out_channel <= 64): + leaky = 0.1 + self.conv3X3 = conv_bn_no_relu(in_channel, out_channel//2, stride=1) + + self.conv5X5_1 = conv_bn(in_channel, out_channel//4, stride=1, leaky = leaky) + self.conv5X5_2 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1) + + self.conv7X7_2 = conv_bn(out_channel//4, out_channel//4, stride=1, leaky = leaky) + self.conv7x7_3 = conv_bn_no_relu(out_channel//4, out_channel//4, stride=1) + + def forward(self, input): + conv3X3 = self.conv3X3(input) + + conv5X5_1 = self.conv5X5_1(input) + conv5X5 = self.conv5X5_2(conv5X5_1) + + conv7X7_2 = self.conv7X7_2(conv5X5_1) + conv7X7 = self.conv7x7_3(conv7X7_2) + + out = torch.cat([conv3X3, conv5X5, conv7X7], dim=1) + out = F.relu(out) + return out + +class FPN(nn.Module): + def __init__(self,in_channels_list,out_channels): + super(FPN,self).__init__() + leaky = 0 + if (out_channels <= 64): + leaky = 0.1 + self.output1 = conv_bn1X1(in_channels_list[0], out_channels, stride = 1, leaky = leaky) + self.output2 = conv_bn1X1(in_channels_list[1], out_channels, stride = 1, leaky = leaky) + self.output3 = conv_bn1X1(in_channels_list[2], out_channels, stride = 1, leaky = leaky) + + self.merge1 = conv_bn(out_channels, out_channels, leaky = leaky) + self.merge2 = conv_bn(out_channels, out_channels, leaky = leaky) + + def forward(self, input): + # names = list(input.keys()) + input = list(input.values()) + + output1 = self.output1(input[0]) + output2 = self.output2(input[1]) + output3 = self.output3(input[2]) + + up3 = F.interpolate(output3, size=[output2.size(2), output2.size(3)], mode="nearest") + output2 = output2 + up3 + output2 = self.merge2(output2) + + up2 = F.interpolate(output2, size=[output1.size(2), output1.size(3)], mode="nearest") + output1 = output1 + up2 + output1 = self.merge1(output1) + + out = [output1, output2, output3] + return out + + + +class MobileNetV1(nn.Module): + def __init__(self): + super(MobileNetV1, self).__init__() + self.stage1 = nn.Sequential( + conv_bn(3, 8, 2, leaky = 0.1), # 3 + conv_dw(8, 16, 1), # 7 + conv_dw(16, 32, 2), # 11 + conv_dw(32, 32, 1), # 19 + conv_dw(32, 64, 2), # 27 + conv_dw(64, 64, 1), # 43 + ) + self.stage2 = nn.Sequential( + conv_dw(64, 128, 2), # 43 + 16 = 59 + conv_dw(128, 128, 1), # 59 + 32 = 91 + conv_dw(128, 128, 1), # 91 + 32 = 123 + conv_dw(128, 128, 1), # 123 + 32 = 155 + conv_dw(128, 128, 1), # 155 + 32 = 187 + conv_dw(128, 128, 1), # 187 + 32 = 219 + ) + self.stage3 = nn.Sequential( + conv_dw(128, 256, 2), # 219 +3 2 = 241 + conv_dw(256, 256, 1), # 241 + 64 = 301 + ) + self.avg = nn.AdaptiveAvgPool2d((1,1)) + self.fc = nn.Linear(256, 1000) + + def forward(self, x): + x = self.stage1(x) + x = self.stage2(x) + x = self.stage3(x) + x = self.avg(x) + # x = self.model(x) + x = x.view(-1, 256) + x = self.fc(x) + return x + diff --git a/models/retinaface.py b/models/retinaface.py new file mode 100644 index 0000000000000000000000000000000000000000..d530bd8395bba795938ca4038476f77a98254f96 --- /dev/null +++ b/models/retinaface.py @@ -0,0 +1,127 @@ +import torch +import torch.nn as nn +import torchvision.models.detection.backbone_utils as backbone_utils +import torchvision.models._utils as _utils +import torch.nn.functional as F +from collections import OrderedDict + +from models.net import MobileNetV1 as MobileNetV1 +from models.net import FPN as FPN +from models.net import SSH as SSH + + + +class ClassHead(nn.Module): + def __init__(self,inchannels=512,num_anchors=3): + super(ClassHead,self).__init__() + self.num_anchors = num_anchors + self.conv1x1 = nn.Conv2d(inchannels,self.num_anchors*2,kernel_size=(1,1),stride=1,padding=0) + + def forward(self,x): + out = self.conv1x1(x) + out = out.permute(0,2,3,1).contiguous() + + return out.view(out.shape[0], -1, 2) + +class BboxHead(nn.Module): + def __init__(self,inchannels=512,num_anchors=3): + super(BboxHead,self).__init__() + self.conv1x1 = nn.Conv2d(inchannels,num_anchors*4,kernel_size=(1,1),stride=1,padding=0) + + def forward(self,x): + out = self.conv1x1(x) + out = out.permute(0,2,3,1).contiguous() + + return out.view(out.shape[0], -1, 4) + +class LandmarkHead(nn.Module): + def __init__(self,inchannels=512,num_anchors=3): + super(LandmarkHead,self).__init__() + self.conv1x1 = nn.Conv2d(inchannels,num_anchors*10,kernel_size=(1,1),stride=1,padding=0) + + def forward(self,x): + out = self.conv1x1(x) + out = out.permute(0,2,3,1).contiguous() + + return out.view(out.shape[0], -1, 10) + +class RetinaFace(nn.Module): + def __init__(self, cfg = None, phase = 'train'): + """ + :param cfg: Network related settings. + :param phase: train or test. + """ + super(RetinaFace,self).__init__() + self.phase = phase + backbone = None + if cfg['name'] == 'mobilenet0.25': + backbone = MobileNetV1() + if cfg['pretrain']: + checkpoint = torch.load("./weights/mobilenetV1X0.25_pretrain.tar", map_location=torch.device('cpu')) + from collections import OrderedDict + new_state_dict = OrderedDict() + for k, v in checkpoint['state_dict'].items(): + name = k[7:] # remove module. + new_state_dict[name] = v + # load params + backbone.load_state_dict(new_state_dict) + elif cfg['name'] == 'Resnet50': + import torchvision.models as models + backbone = models.resnet50(pretrained=cfg['pretrain']) + + self.body = _utils.IntermediateLayerGetter(backbone, cfg['return_layers']) + in_channels_stage2 = cfg['in_channel'] + in_channels_list = [ + in_channels_stage2 * 2, + in_channels_stage2 * 4, + in_channels_stage2 * 8, + ] + out_channels = cfg['out_channel'] + self.fpn = FPN(in_channels_list,out_channels) + self.ssh1 = SSH(out_channels, out_channels) + self.ssh2 = SSH(out_channels, out_channels) + self.ssh3 = SSH(out_channels, out_channels) + + self.ClassHead = self._make_class_head(fpn_num=3, inchannels=cfg['out_channel']) + self.BboxHead = self._make_bbox_head(fpn_num=3, inchannels=cfg['out_channel']) + self.LandmarkHead = self._make_landmark_head(fpn_num=3, inchannels=cfg['out_channel']) + + def _make_class_head(self,fpn_num=3,inchannels=64,anchor_num=2): + classhead = nn.ModuleList() + for i in range(fpn_num): + classhead.append(ClassHead(inchannels,anchor_num)) + return classhead + + def _make_bbox_head(self,fpn_num=3,inchannels=64,anchor_num=2): + bboxhead = nn.ModuleList() + for i in range(fpn_num): + bboxhead.append(BboxHead(inchannels,anchor_num)) + return bboxhead + + def _make_landmark_head(self,fpn_num=3,inchannels=64,anchor_num=2): + landmarkhead = nn.ModuleList() + for i in range(fpn_num): + landmarkhead.append(LandmarkHead(inchannels,anchor_num)) + return landmarkhead + + def forward(self,inputs): + out = self.body(inputs) + + # FPN + fpn = self.fpn(out) + + # SSH + feature1 = self.ssh1(fpn[0]) + feature2 = self.ssh2(fpn[1]) + feature3 = self.ssh3(fpn[2]) + features = [feature1, feature2, feature3] + + bbox_regressions = torch.cat([self.BboxHead[i](feature) for i, feature in enumerate(features)], dim=1) + classifications = torch.cat([self.ClassHead[i](feature) for i, feature in enumerate(features)],dim=1) + ldm_regressions = torch.cat([self.LandmarkHead[i](feature) for i, feature in enumerate(features)], dim=1) + + if self.phase == 'train': + output = (bbox_regressions, classifications, ldm_regressions) + else: + output = (bbox_regressions, F.softmax(classifications, dim=-1), ldm_regressions) + return output \ No newline at end of file diff --git a/optimizer/__init__.py b/optimizer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74db9744a1f35e9f082e3a61997025ad4c22a8e7 --- /dev/null +++ b/optimizer/__init__.py @@ -0,0 +1,30 @@ +from torch.optim import SGD +from torch.optim import Adam +from torch.optim import ASGD +from torch.optim import Adamax +from torch.optim import Adadelta +from torch.optim import Adagrad +from torch.optim import RMSprop + +key2opt = { + 'sgd': SGD, + 'adam': Adam, + 'asgd': ASGD, + 'adamax': Adamax, + 'adadelta': Adadelta, + 'adagrad': Adagrad, + 'rmsprop': RMSprop, +} + + +def get_optimizer(optimizer_name=None): + if optimizer_name is None: + print("Using default 'SGD' optimizer") + return SGD + + else: + if optimizer_name not in key2opt: + raise NotImplementedError(f"Optimizer '{optimizer_name}' not implemented") + + print(f"Using optimizer: '{optimizer_name}'") + return key2opt[optimizer_name] diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000000000000000000000000000000000000..2fb4f79272d8a3efb56074e7df3946d5e2335fb5 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,24 @@ +albumentations==1.3.1 +bbox==0.9.4 +Cython==0.29.35 +ipython==8.14.0 +matplotlib==3.4.3 +numpy==1.25.0 +numpy==1.21.1 +opencv_python==4.5.5.62 +opencv_python_headless==4.7.0.72 +Pillow==9.3.0 +Pillow==9.4.0 +Pillow==9.5.0 +PyYAML==6.0 +PyYAML==6.0 +scikit_learn==1.2.2 +scipy==1.8.0 +streamlit==1.24.0 +tensorboardX==2.6.1 +timm==0.4.12 +torch==1.10.0 +torch==2.0.1+cu117 +torchvision==0.11.1 +torchvision==0.15.2+cu117 +tqdm==4.65.0 diff --git a/scheduler/__init__.py b/scheduler/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d02f3edf959d8e10b66c08e276ced619ecfd15dd --- /dev/null +++ b/scheduler/__init__.py @@ -0,0 +1,36 @@ +from torch.optim.lr_scheduler import _LRScheduler +from torch.optim.lr_scheduler import StepLR +from torch.optim.lr_scheduler import MultiStepLR +from torch.optim.lr_scheduler import ExponentialLR +from torch.optim.lr_scheduler import CosineAnnealingLR +from torch.optim.lr_scheduler import CosineAnnealingWarmRestarts +from torch.optim.lr_scheduler import ReduceLROnPlateau + + +class ConstantLR(_LRScheduler): + def __init__(self, optimizer, last_epoch=-1): + super(ConstantLR, self).__init__(optimizer, last_epoch) + + def get_lr(self): + return [base_lr for base_lr in self.base_lrs] + + +SCHEDULERS = { + 'ConstantLR': ConstantLR, + "StepLR": StepLR, + "MultiStepLR": MultiStepLR, + "CosineAnnealingLR": CosineAnnealingLR, + "CosineAnnealingWarmRestarts": CosineAnnealingWarmRestarts, + "ExponentialLR": ExponentialLR, + "ReduceLROnPlateau": ReduceLROnPlateau +} + + +def get_scheduler(optimizer, kwargs): + if kwargs is None: + print("No lr scheduler is used.") + return ConstantLR(optimizer) + name = kwargs["name"] + kwargs.pop("name") + print("Using scheduler: '%s' with params: %s" % (name, kwargs)) + return SCHEDULERS[name](optimizer, **kwargs) diff --git a/trainer/__init__.py b/trainer/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..d7f4da6b97a20cf6c6dae6761dffea145a336cea --- /dev/null +++ b/trainer/__init__.py @@ -0,0 +1,5 @@ +from .abstract_trainer import AbstractTrainer, LEGAL_METRIC +from .exp_mgpu_trainer import ExpMultiGpuTrainer +from .exp_tester import ExpTester +from .utils import center_print, reduce_tensor +from .utils import exp_recons_loss diff --git a/trainer/abstract_trainer.py b/trainer/abstract_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..4e1354a94ec55db2fcad0d951ecaca2f7b804fbe --- /dev/null +++ b/trainer/abstract_trainer.py @@ -0,0 +1,100 @@ +import os +import torch +import random +from collections import OrderedDict +from torchvision.utils import make_grid + +LEGAL_METRIC = ['Acc', 'AUC', 'LogLoss'] + + +class AbstractTrainer(object): + def __init__(self, config, stage="Train"): + feasible_stage = ["Train", "Test"] + if stage not in feasible_stage: + raise ValueError(f"stage should be in {feasible_stage}, but found '{stage}'") + + self.config = config + model_cfg = config.get("model", None) + data_cfg = config.get("data", None) + config_cfg = config.get("config", None) + + self.model_name = model_cfg.pop("name") + + self.gpu = None + self.dir = None + self.debug = None + self.device = None + self.resume = None + self.local_rank = None + self.num_classes = None + + self.best_metric = 0.0 + self.best_step = 1 + self.start_step = 1 + + self._initiated_settings(model_cfg, data_cfg, config_cfg) + + if stage == 'Train': + self._train_settings(model_cfg, data_cfg, config_cfg) + if stage == 'Test': + self._test_settings(model_cfg, data_cfg, config_cfg) + + def _initiated_settings(self, model_cfg, data_cfg, config_cfg): + raise NotImplementedError("Not implemented in abstract class.") + + def _train_settings(self, model_cfg, data_cfg, config_cfg): + raise NotImplementedError("Not implemented in abstract class.") + + def _test_settings(self, model_cfg, data_cfg, config_cfg): + raise NotImplementedError("Not implemented in abstract class.") + + def _save_ckpt(self, step, best=False): + raise NotImplementedError("Not implemented in abstract class.") + + def _load_ckpt(self, best=False, train=False): + raise NotImplementedError("Not implemented in abstract class.") + + def to_device(self, items): + return [obj.to(self.device) for obj in items] + + @staticmethod + def fixed_randomness(): + random.seed(0) + torch.manual_seed(0) + torch.cuda.manual_seed(0) + torch.cuda.manual_seed_all(0) + + def train(self): + raise NotImplementedError("Not implemented in abstract class.") + + def validate(self, epoch, step, timer, writer): + raise NotImplementedError("Not implemented in abstract class.") + + def test(self): + raise NotImplementedError("Not implemented in abstract class.") + + def plot_figure(self, images, pred, gt, nrow, categories=None, show=True): + import matplotlib.pyplot as plt + plot = make_grid( + images, nrow, padding=4, normalize=True, scale_each=True, pad_value=1) + if self.num_classes == 1: + pred = (pred >= 0.5).cpu().numpy() + else: + pred = pred.argmax(1).cpu().numpy() + gt = gt.cpu().numpy() + if categories is not None: + pred = [categories[i] for i in pred] + gt = [categories[i] for i in gt] + plot = plot.permute([1, 2, 0]) + plot = plot.cpu().numpy() + ret = plt.figure() + plt.imshow(plot) + plt.title("pred: %s\ngt: %s" % (pred, gt)) + plt.axis("off") + if show: + plt.savefig(os.path.join(self.dir, "test_image.png"), dpi=300) + plt.show() + plt.close() + else: + plt.close() + return ret diff --git a/trainer/exp_mgpu_trainer.py b/trainer/exp_mgpu_trainer.py new file mode 100644 index 0000000000000000000000000000000000000000..f9e2d43c4e040b4390870b7ca5af07809f5e30c3 --- /dev/null +++ b/trainer/exp_mgpu_trainer.py @@ -0,0 +1,370 @@ +import os +import sys +import time +import math +import yaml +import torch +import random +import numpy as np + +from tqdm import tqdm +from pprint import pprint +from torch.utils import data +import torch.distributed as dist +from torch.cuda.amp import autocast, GradScaler +from tensorboardX import SummaryWriter + +from dataset import load_dataset +from loss import get_loss +from model import load_model +from optimizer import get_optimizer +from scheduler import get_scheduler +from trainer import AbstractTrainer, LEGAL_METRIC +from trainer.utils import exp_recons_loss, MLLoss, reduce_tensor, center_print +from trainer.utils import MODELS_PATH, AccMeter, AUCMeter, AverageMeter, Logger, Timer + + +class ExpMultiGpuTrainer(AbstractTrainer): + def __init__(self, config, stage="Train"): + super(ExpMultiGpuTrainer, self).__init__(config, stage) + np.random.seed(2021) + + def _mprint(self, content=""): + if self.local_rank == 0: + print(content) + + def _initiated_settings(self, model_cfg=None, data_cfg=None, config_cfg=None): + self.local_rank = config_cfg["local_rank"] + + def _train_settings(self, model_cfg, data_cfg, config_cfg): + # debug mode: no log dir, no train_val operation. + self.debug = config_cfg["debug"] + self._mprint(f"Using debug mode: {self.debug}.") + self._mprint("*" * 20) + + self.eval_metric = config_cfg["metric"] + if self.eval_metric not in LEGAL_METRIC: + raise ValueError(f"Evaluation metric must be in {LEGAL_METRIC}, but found " + f"{self.eval_metric}.") + if self.eval_metric == LEGAL_METRIC[-1]: + self.best_metric = 1.0e8 + + # distribution + dist.init_process_group(config_cfg["distribute"]["backend"]) + + # load training dataset + train_dataset = data_cfg["file"] + branch = data_cfg["train_branch"] + name = data_cfg["name"] + with open(train_dataset, "r") as f: + options = yaml.load(f, Loader=yaml.FullLoader) + train_options = options[branch] + self.train_set = load_dataset(name)(train_options) + # define training sampler + self.train_sampler = data.distributed.DistributedSampler(self.train_set) + # wrapped with data loader + self.train_loader = data.DataLoader(self.train_set, shuffle=False, + sampler=self.train_sampler, + num_workers=data_cfg.get("num_workers", 4), + batch_size=data_cfg["train_batch_size"]) + + if self.local_rank == 0: + # load validation dataset + val_options = options[data_cfg["val_branch"]] + self.val_set = load_dataset(name)(val_options) + # wrapped with data loader + self.val_loader = data.DataLoader(self.val_set, shuffle=True, + num_workers=data_cfg.get("num_workers", 4), + batch_size=data_cfg["val_batch_size"]) + + self.resume = config_cfg.get("resume", False) + + if not self.debug: + time_format = "%Y-%m-%d...%H.%M.%S" + run_id = time.strftime(time_format, time.localtime(time.time())) + self.run_id = config_cfg.get("id", run_id) + self.dir = os.path.join("runs", self.model_name, self.run_id) + + if self.local_rank == 0: + if not self.resume: + if os.path.exists(self.dir): + raise ValueError("Error: given id '%s' already exists." % self.run_id) + os.makedirs(self.dir, exist_ok=True) + print(f"Writing config file to file directory: {self.dir}.") + yaml.dump({"config": self.config, + "train_data": train_options, + "val_data": val_options}, + open(os.path.join(self.dir, 'train_config.yml'), 'w')) + # copy the script for the training model + model_file = MODELS_PATH[self.model_name] + os.system("cp " + model_file + " " + self.dir) + else: + print(f"Resuming the history in file directory: {self.dir}.") + + print(f"Logging directory: {self.dir}.") + + # redirect the std out stream + sys.stdout = Logger(os.path.join(self.dir, 'records.txt')) + center_print('Train configurations begins.') + pprint(self.config) + pprint(train_options) + pprint(val_options) + center_print('Train configurations ends.') + + # load model + self.num_classes = model_cfg["num_classes"] + self.device = "cuda:" + str(self.local_rank) + self.model = load_model(self.model_name)(**model_cfg) + self.model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(self.model).to(self.device) + self._mprint(f"Using SyncBatchNorm.") + self.model = torch.nn.parallel.DistributedDataParallel( + self.model, device_ids=[self.local_rank], find_unused_parameters=True) + + # load optimizer + optim_cfg = config_cfg.get("optimizer", None) + optim_name = optim_cfg.pop("name") + self.optimizer = get_optimizer(optim_name)(self.model.parameters(), **optim_cfg) + # load scheduler + self.scheduler = get_scheduler(self.optimizer, config_cfg.get("scheduler", None)) + # load loss + self.loss_criterion = get_loss(config_cfg.get("loss", None), device=self.device) + + # total number of steps (or epoch) to train + self.num_steps = train_options["num_steps"] + self.num_epoch = math.ceil(self.num_steps / len(self.train_loader)) + + # the number of steps to write down a log + self.log_steps = train_options["log_steps"] + # the number of steps to validate on val dataset once + self.val_steps = train_options["val_steps"] + + # balance coefficients + self.lambda_1 = config_cfg["lambda_1"] + self.lambda_2 = config_cfg["lambda_2"] + self.warmup_step = config_cfg.get('warmup_step', 0) + + self.contra_loss = MLLoss() + self.acc_meter = AccMeter() + self.loss_meter = AverageMeter() + self.recons_loss_meter = AverageMeter() + self.contra_loss_meter = AverageMeter() + + if self.resume and self.local_rank == 0: + self._load_ckpt(best=config_cfg.get("resume_best", False), train=True) + + def _test_settings(self, model_cfg, data_cfg, config_cfg): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def _load_ckpt(self, best=False, train=False): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def _save_ckpt(self, step, best=False): + save_dir = os.path.join(self.dir, f"best_model_{step}.bin" if best else "latest_model.bin") + torch.save({ + "step": step, + "best_step": self.best_step, + "best_metric": self.best_metric, + "eval_metric": self.eval_metric, + "model": self.model.module.state_dict(), + "optimizer": self.optimizer.state_dict(), + "scheduler": self.scheduler.state_dict(), + }, save_dir) + + def train(self): + try: + timer = Timer() + grad_scalar = GradScaler(2 ** 10) + if self.local_rank == 0: + writer = None if self.debug else SummaryWriter(log_dir=self.dir) + center_print("Training begins......") + else: + writer = None + start_epoch = self.start_step // len(self.train_loader) + 1 + for epoch_idx in range(start_epoch, self.num_epoch + 1): + # set sampler + self.train_sampler.set_epoch(epoch_idx) + + # reset meter + self.acc_meter.reset() + self.loss_meter.reset() + self.recons_loss_meter.reset() + self.contra_loss_meter.reset() + self.optimizer.step() + + train_generator = enumerate(self.train_loader, 1) + # wrap train generator with tqdm for process 0 + if self.local_rank == 0: + train_generator = tqdm(train_generator, position=0, leave=True) + + for batch_idx, train_data in train_generator: + global_step = (epoch_idx - 1) * len(self.train_loader) + batch_idx + self.model.train() + I, Y = train_data + I = self.train_loader.dataset.load_item(I) + in_I, Y = self.to_device((I, Y)) + + # warm-up lr + if self.warmup_step != 0 and global_step <= self.warmup_step: + lr = self.config['config']['optimizer']['lr'] * float(global_step) / self.warmup_step + for param_group in self.optimizer.param_groups: + param_group['lr'] = lr + + self.optimizer.zero_grad() + with autocast(): + Y_pre = self.model(in_I) + + # for BCE Setting: + if self.num_classes == 1: + Y_pre = Y_pre.squeeze() + loss = self.loss_criterion(Y_pre, Y.float()) + Y_pre = torch.sigmoid(Y_pre) + else: + loss = self.loss_criterion(Y_pre, Y) + + # flood + loss = (loss - 0.04).abs() + 0.04 + recons_loss = exp_recons_loss(self.model.module.loss_inputs['recons'], (in_I, Y)) + contra_loss = self.contra_loss(self.model.module.loss_inputs['contra'], Y) + loss += self.lambda_1 * recons_loss + self.lambda_2 * contra_loss + + grad_scalar.scale(loss).backward() + grad_scalar.step(self.optimizer) + grad_scalar.update() + if self.warmup_step == 0 or global_step > self.warmup_step: + self.scheduler.step() + + self.acc_meter.update(Y_pre, Y, self.num_classes == 1) + self.loss_meter.update(reduce_tensor(loss).item()) + self.recons_loss_meter.update(reduce_tensor(recons_loss).item()) + self.contra_loss_meter.update(reduce_tensor(contra_loss).item()) + iter_acc = reduce_tensor(self.acc_meter.mean_acc()).item() + + if self.local_rank == 0: + if global_step % self.log_steps == 0 and writer is not None: + writer.add_scalar("train/Acc", iter_acc, global_step) + writer.add_scalar("train/Loss", self.loss_meter.avg, global_step) + writer.add_scalar("train/Recons_Loss", + self.recons_loss_meter.avg if self.lambda_1 != 0 else 0., + global_step) + writer.add_scalar("train/Contra_Loss", self.contra_loss_meter.avg, global_step) + writer.add_scalar("train/LR", self.scheduler.get_last_lr()[0], global_step) + + # log training step + train_generator.set_description( + "Train Epoch %d (%d/%d), Global Step %d, Loss %.4f, Recons %.4f, con %.4f, " + "ACC %.4f, LR %.6f" % ( + epoch_idx, batch_idx, len(self.train_loader), global_step, + self.loss_meter.avg, self.recons_loss_meter.avg, self.contra_loss_meter.avg, + iter_acc, self.scheduler.get_last_lr()[0]) + ) + + # validating process + if global_step % self.val_steps == 0 and not self.debug: + print() + self.validate(epoch_idx, global_step, timer, writer) + + # when num_steps has been set and the training process will + # be stopped earlier than the specified num_epochs, then stop. + if self.num_steps is not None and global_step == self.num_steps: + if writer is not None: + writer.close() + if self.local_rank == 0: + print() + center_print("Training process ends.") + dist.destroy_process_group() + return + # close the tqdm bar when one epoch ends + if self.local_rank == 0: + train_generator.close() + print() + # training ends with integer epochs + if self.local_rank == 0: + if writer is not None: + writer.close() + center_print("Training process ends.") + dist.destroy_process_group() + except Exception as e: + dist.destroy_process_group() + raise e + + def validate(self, epoch, step, timer, writer): + v_idx = random.randint(1, len(self.val_loader) + 1) + categories = self.val_loader.dataset.categories + self.model.eval() + with torch.no_grad(): + acc = AccMeter() + auc = AUCMeter() + loss_meter = AverageMeter() + cur_acc = 0.0 # Higher is better + cur_auc = 0.0 # Higher is better + cur_loss = 1e8 # Lower is better + val_generator = tqdm(enumerate(self.val_loader, 1), position=0, leave=True) + for val_idx, val_data in val_generator: + I, Y = val_data + I = self.val_loader.dataset.load_item(I) + in_I, Y = self.to_device((I, Y)) + Y_pre = self.model(in_I) + + # for BCE Setting: + if self.num_classes == 1: + Y_pre = Y_pre.squeeze() + loss = self.loss_criterion(Y_pre, Y.float()) + Y_pre = torch.sigmoid(Y_pre) + else: + loss = self.loss_criterion(Y_pre, Y) + + acc.update(Y_pre, Y, self.num_classes == 1) + auc.update(Y_pre, Y, self.num_classes == 1) + loss_meter.update(loss.item()) + + cur_acc = acc.mean_acc() + cur_loss = loss_meter.avg + + val_generator.set_description( + "Eval Epoch %d (%d/%d), Global Step %d, Loss %.4f, ACC %.4f" % ( + epoch, val_idx, len(self.val_loader), step, + cur_loss, cur_acc) + ) + + if val_idx == v_idx or val_idx == 1: + sample_recons = list() + for _ in self.model.module.loss_inputs['recons']: + sample_recons.append(_[:4].to("cpu")) + # show images + images = I[:4] + images = torch.cat([images, *sample_recons], dim=0) + pred = Y_pre[:4] + gt = Y[:4] + figure = self.plot_figure(images, pred, gt, 4, categories, show=False) + + cur_auc = auc.mean_auc() + print("Eval Epoch %d, Loss %.4f, ACC %.4f, AUC %.4f" % (epoch, cur_loss, cur_acc, cur_auc)) + if writer is not None: + writer.add_scalar("val/Loss", cur_loss, step) + writer.add_scalar("val/Acc", cur_acc, step) + writer.add_scalar("val/AUC", cur_auc, step) + writer.add_figure("val/Figures", figure, step) + # record the best acc and the corresponding step + if self.eval_metric == 'Acc' and cur_acc >= self.best_metric: + self.best_metric = cur_acc + self.best_step = step + self._save_ckpt(step, best=True) + elif self.eval_metric == 'AUC' and cur_auc >= self.best_metric: + self.best_metric = cur_auc + self.best_step = step + self._save_ckpt(step, best=True) + elif self.eval_metric == 'LogLoss' and cur_loss <= self.best_metric: + self.best_metric = cur_loss + self.best_step = step + self._save_ckpt(step, best=True) + print("Best Step %d, Best %s %.4f, Running Time: %s, Estimated Time: %s" % ( + self.best_step, self.eval_metric, self.best_metric, + timer.measure(), timer.measure(step / self.num_steps) + )) + self._save_ckpt(step, best=False) + + def test(self): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") diff --git a/trainer/exp_tester.py b/trainer/exp_tester.py new file mode 100644 index 0000000000000000000000000000000000000000..ee4f219f0ba60c6ded44e3b7c95eb7b21fc0be46 --- /dev/null +++ b/trainer/exp_tester.py @@ -0,0 +1,144 @@ +import os +import sys +import yaml +import torch +import random + +from tqdm import tqdm +from pprint import pprint +from torch.utils import data + +from dataset import load_dataset +from loss import get_loss +from model import load_model +from model.common import freeze_weights +from trainer import AbstractTrainer +from trainer.utils import AccMeter, AUCMeter, AverageMeter, Logger, center_print + + +class ExpTester(AbstractTrainer): + def __init__(self, config, stage="Test"): + super(ExpTester, self).__init__(config, stage) + + if torch.cuda.is_available() and self.device is not None: + print(f"Using cuda device: {self.device}.") + self.gpu = True + self.model = self.model.to(self.device) + else: + print("Using cpu device.") + self.device = torch.device("cpu") + + def _initiated_settings(self, model_cfg=None, data_cfg=None, config_cfg=None): + self.gpu = False + self.device = config_cfg.get("device", None) + + def _train_settings(self, model_cfg=None, data_cfg=None, config_cfg=None): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def _test_settings(self, model_cfg=None, data_cfg=None, config_cfg=None): + # load test dataset + test_dataset = data_cfg["file"] + branch = data_cfg["test_branch"] + name = data_cfg["name"] + with open(test_dataset, "r") as f: + options = yaml.load(f, Loader=yaml.FullLoader) + test_options = options[branch] + self.test_set = load_dataset(name)(test_options) + # wrapped with data loader + self.test_batch_size = data_cfg["test_batch_size"] + self.test_loader = data.DataLoader(self.test_set, shuffle=False, + batch_size=self.test_batch_size) + self.run_id = config_cfg["id"] + self.ckpt_fold = config_cfg.get("ckpt_fold", "runs") + self.dir = os.path.join(self.ckpt_fold, self.model_name, self.run_id) + + # load model + self.num_classes = model_cfg["num_classes"] + self.model = load_model(self.model_name)(**model_cfg) + + # load loss + self.loss_criterion = get_loss(config_cfg.get("loss", None)) + + # redirect the std out stream + sys.stdout = Logger(os.path.join(self.dir, "test_result.txt")) + print('Run dir: {}'.format(self.dir)) + + center_print('Test configurations begins') + pprint(self.config) + pprint(test_options) + center_print('Test configurations ends') + + self.ckpt = config_cfg.get("ckpt", "best_model") + self._load_ckpt(best=True, train=False) + + def _save_ckpt(self, step, best=False): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def _load_ckpt(self, best=False, train=False): + load_dir = os.path.join(self.dir, self.ckpt + ".bin" if best else "latest_model.bin") + load_dict = torch.load(load_dir, map_location=self.device) + self.start_step = load_dict["step"] + self.best_step = load_dict["best_step"] + self.best_metric = load_dict.get("best_metric", None) + if self.best_metric is None: + self.best_metric = load_dict.get("best_acc") + self.eval_metric = load_dict.get("eval_metric", None) + if self.eval_metric is None: + self.eval_metric = load_dict.get("Acc") + self.model.load_state_dict(load_dict["model"]) + print(f"Loading checkpoint from {load_dir}, best step: {self.best_step}, " + f"best {self.eval_metric}: {round(self.best_metric.item(), 4)}.") + + def train(self): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def validate(self, epoch, step, timer, writer): + # Not used. + raise NotImplementedError("The function is not intended to be used here.") + + def test(self, display_images=False): + freeze_weights(self.model) + t_idx = random.randint(1, len(self.test_loader) + 1) + self.fixed_randomness() # for reproduction + + acc = AccMeter() + auc = AUCMeter() + logloss = AverageMeter() + test_generator = tqdm(enumerate(self.test_loader, 1)) + categories = self.test_loader.dataset.categories + for idx, test_data in test_generator: + self.model.eval() + I, Y = test_data + I = self.test_loader.dataset.load_item(I) + if self.gpu: + in_I, Y = self.to_device((I, Y)) + else: + in_I, Y = (I, Y) + Y_pre = self.model(in_I) + + # for BCE Setting: + if self.num_classes == 1: + Y_pre = Y_pre.squeeze() + loss = self.loss_criterion(Y_pre, Y.float()) + Y_pre = torch.sigmoid(Y_pre) + else: + loss = self.loss_criterion(Y_pre, Y) + + acc.update(Y_pre, Y, use_bce=self.num_classes == 1) + auc.update(Y_pre, Y, use_bce=self.num_classes == 1) + logloss.update(loss.item()) + + test_generator.set_description("Test %d/%d" % (idx, len(self.test_loader))) + if display_images and idx == t_idx: + # show images + images = I[:4] + pred = Y_pre[:4] + gt = Y[:4] + self.plot_figure(images, pred, gt, 2, categories) + + print("Test, FINAL LOSS %.4f, FINAL ACC %.4f, FINAL AUC %.4f" % + (logloss.avg, acc.mean_acc(), auc.mean_auc())) + auc.curve(self.dir) diff --git a/trainer/utils.py b/trainer/utils.py new file mode 100644 index 0000000000000000000000000000000000000000..fcf1c4a864d34a18133667293c9dc76215af1011 --- /dev/null +++ b/trainer/utils.py @@ -0,0 +1,183 @@ +import os +import sys +import time +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.distributed as dist +from collections import OrderedDict + +import numpy as np +from sklearn.metrics import roc_auc_score, roc_curve +from scipy.optimize import brentq +from scipy.interpolate import interp1d + +# Tracking the path to the definition of the model. +MODELS_PATH = { + "Recce": "model/network/Recce.py" +} + + +def exp_recons_loss(recons, x): + x, y = x + loss = torch.tensor(0., device=y.device) + real_index = torch.where(1 - y)[0] + for r in recons: + if real_index.numel() > 0: + real_x = torch.index_select(x, dim=0, index=real_index) + real_rec = torch.index_select(r, dim=0, index=real_index) + real_rec = F.interpolate(real_rec, size=x.shape[-2:], mode='bilinear', align_corners=True) + loss += torch.mean(torch.abs(real_rec - real_x)) + return loss + + +def center_print(content, around='*', repeat_around=10): + num = repeat_around + s = around + print(num * s + ' %s ' % content + num * s) + + +def reduce_tensor(t): + rt = t.clone() + dist.all_reduce(rt) + rt /= float(dist.get_world_size()) + return rt + + +def tensor2image(tensor): + image = tensor.permute([1, 2, 0]).cpu().detach().numpy() + return (image - np.min(image)) / (np.max(image) - np.min(image)) + + +def state_dict(state_dict): + """ Remove 'module' keyword in state dictionary. """ + weights = OrderedDict() + for k, v in state_dict.items(): + weights.update({k.replace("module.", ""): v}) + return weights + + +class Logger(object): + def __init__(self, filename): + self.terminal = sys.stdout + self.log = open(filename, "a") + + def write(self, message): + self.terminal.write(message) + self.log.write(message) + self.log.flush() + + def flush(self): + pass + + +class Timer(object): + """The class for timer.""" + + def __init__(self): + self.o = time.time() + + def measure(self, p=1): + x = (time.time() - self.o) / p + x = int(x) + if x >= 3600: + return '{:.1f}h'.format(x / 3600) + if x >= 60: + return '{}m'.format(round(x / 60)) + return '{}s'.format(x) + + +class MLLoss(nn.Module): + def __init__(self): + super(MLLoss, self).__init__() + + def forward(self, input, target, eps=1e-6): + # 0 - real; 1 - fake. + loss = torch.tensor(0., device=target.device) + batch_size = target.shape[0] + mat_1 = torch.hstack([target.unsqueeze(-1)] * batch_size) + mat_2 = torch.vstack([target] * batch_size) + diff_mat = torch.logical_xor(mat_1, mat_2).float() + or_mat = torch.logical_or(mat_1, mat_2) + eye = torch.eye(batch_size, device=target.device) + or_mat = torch.logical_or(or_mat, eye).float() + sim_mat = 1. - or_mat + for _ in input: + diff = torch.sum(_ * diff_mat, dim=[0, 1]) / (torch.sum(diff_mat, dim=[0, 1]) + eps) + sim = torch.sum(_ * sim_mat, dim=[0, 1]) / (torch.sum(sim_mat, dim=[0, 1]) + eps) + partial_loss = 1. - sim + diff + loss += max(partial_loss, torch.zeros_like(partial_loss)) + return loss + + +class AccMeter(object): + def __init__(self): + self.nums = 0 + self.acc = 0 + + def reset(self): + self.nums = 0 + self.acc = 0 + + def update(self, pred, target, use_bce=False): + if use_bce: + pred = (pred >= 0.5).int() + else: + pred = pred.argmax(1) + self.nums += target.shape[0] + self.acc += torch.sum(pred == target) + + def mean_acc(self): + return self.acc / self.nums + + +class AUCMeter(object): + def __init__(self): + self.score = None + self.true = None + + def reset(self): + self.score = None + self.true = None + + def update(self, score, true, use_bce=False): + if use_bce: + score = score.detach().cpu().numpy() + else: + score = torch.softmax(score.detach(), dim=-1) + score = torch.select(score, 1, 1).cpu().numpy() + true = true.flatten().cpu().numpy() + self.score = score if self.score is None else np.concatenate([self.score, score]) + self.true = true if self.true is None else np.concatenate([self.true, true]) + + def mean_auc(self): + return roc_auc_score(self.true, self.score) + + def curve(self, prefix): + fpr, tpr, thresholds = roc_curve(self.true, self.score, pos_label=1) + eer = brentq(lambda x: 1. - x - interp1d(fpr, tpr)(x), 0., 1.) + thresh = interp1d(fpr, thresholds)(eer) + print(f"# EER: {eer:.4f}(thresh: {thresh:.4f})") + torch.save([fpr, tpr, thresholds], os.path.join(prefix, "roc_curve.pickle")) + + +class AverageMeter(object): + """Computes and stores the average and current value""" + + def __init__(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def reset(self): + self.val = 0 + self.avg = 0 + self.sum = 0 + self.count = 0 + + def update(self, val, n=1): + self.val = val + self.sum += val * n + self.count += n + self.avg = self.sum / self.count diff --git a/utils/__init__.py b/utils/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/utils/__pycache__/__init__.cpython-39.pyc b/utils/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..89056f4c7c1083fe4de887aaa7c47db0dcae83b0 Binary files /dev/null and b/utils/__pycache__/__init__.cpython-39.pyc differ diff --git a/utils/__pycache__/box_utils.cpython-39.pyc b/utils/__pycache__/box_utils.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c48f6166be6b833d39fa64927b2783c4d3396eb Binary files /dev/null and b/utils/__pycache__/box_utils.cpython-39.pyc differ diff --git a/utils/box_utils.py b/utils/box_utils.py new file mode 100644 index 0000000000000000000000000000000000000000..c1d12bc612ae3ba3ea9d138bfc5997a2b15d8dd9 --- /dev/null +++ b/utils/box_utils.py @@ -0,0 +1,330 @@ +import torch +import numpy as np + + +def point_form(boxes): + """ Convert prior_boxes to (xmin, ymin, xmax, ymax) + representation for comparison to point form ground truth data. + Args: + boxes: (tensor) center-size default boxes from priorbox layers. + Return: + boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. + """ + return torch.cat((boxes[:, :2] - boxes[:, 2:]/2, # xmin, ymin + boxes[:, :2] + boxes[:, 2:]/2), 1) # xmax, ymax + + +def center_size(boxes): + """ Convert prior_boxes to (cx, cy, w, h) + representation for comparison to center-size form ground truth data. + Args: + boxes: (tensor) point_form boxes + Return: + boxes: (tensor) Converted xmin, ymin, xmax, ymax form of boxes. + """ + return torch.cat((boxes[:, 2:] + boxes[:, :2])/2, # cx, cy + boxes[:, 2:] - boxes[:, :2], 1) # w, h + + +def intersect(box_a, box_b): + """ We resize both tensors to [A,B,2] without new malloc: + [A,2] -> [A,1,2] -> [A,B,2] + [B,2] -> [1,B,2] -> [A,B,2] + Then we compute the area of intersect between box_a and box_b. + Args: + box_a: (tensor) bounding boxes, Shape: [A,4]. + box_b: (tensor) bounding boxes, Shape: [B,4]. + Return: + (tensor) intersection area, Shape: [A,B]. + """ + A = box_a.size(0) + B = box_b.size(0) + max_xy = torch.min(box_a[:, 2:].unsqueeze(1).expand(A, B, 2), + box_b[:, 2:].unsqueeze(0).expand(A, B, 2)) + min_xy = torch.max(box_a[:, :2].unsqueeze(1).expand(A, B, 2), + box_b[:, :2].unsqueeze(0).expand(A, B, 2)) + inter = torch.clamp((max_xy - min_xy), min=0) + return inter[:, :, 0] * inter[:, :, 1] + + +def jaccard(box_a, box_b): + """Compute the jaccard overlap of two sets of boxes. The jaccard overlap + is simply the intersection over union of two boxes. Here we operate on + ground truth boxes and default boxes. + E.g.: + A ∩ B / A ∪ B = A ∩ B / (area(A) + area(B) - A ∩ B) + Args: + box_a: (tensor) Ground truth bounding boxes, Shape: [num_objects,4] + box_b: (tensor) Prior boxes from priorbox layers, Shape: [num_priors,4] + Return: + jaccard overlap: (tensor) Shape: [box_a.size(0), box_b.size(0)] + """ + inter = intersect(box_a, box_b) + area_a = ((box_a[:, 2]-box_a[:, 0]) * + (box_a[:, 3]-box_a[:, 1])).unsqueeze(1).expand_as(inter) # [A,B] + area_b = ((box_b[:, 2]-box_b[:, 0]) * + (box_b[:, 3]-box_b[:, 1])).unsqueeze(0).expand_as(inter) # [A,B] + union = area_a + area_b - inter + return inter / union # [A,B] + + +def matrix_iou(a, b): + """ + return iou of a and b, numpy version for data augenmentation + """ + lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) + rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) + + area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) + area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) + area_b = np.prod(b[:, 2:] - b[:, :2], axis=1) + return area_i / (area_a[:, np.newaxis] + area_b - area_i) + + +def matrix_iof(a, b): + """ + return iof of a and b, numpy version for data augenmentation + """ + lt = np.maximum(a[:, np.newaxis, :2], b[:, :2]) + rb = np.minimum(a[:, np.newaxis, 2:], b[:, 2:]) + + area_i = np.prod(rb - lt, axis=2) * (lt < rb).all(axis=2) + area_a = np.prod(a[:, 2:] - a[:, :2], axis=1) + return area_i / np.maximum(area_a[:, np.newaxis], 1) + + +def match(threshold, truths, priors, variances, labels, landms, loc_t, conf_t, landm_t, idx): + """Match each prior box with the ground truth box of the highest jaccard + overlap, encode the bounding boxes, then return the matched indices + corresponding to both confidence and location preds. + Args: + threshold: (float) The overlap threshold used when mathing boxes. + truths: (tensor) Ground truth boxes, Shape: [num_obj, 4]. + priors: (tensor) Prior boxes from priorbox layers, Shape: [n_priors,4]. + variances: (tensor) Variances corresponding to each prior coord, + Shape: [num_priors, 4]. + labels: (tensor) All the class labels for the image, Shape: [num_obj]. + landms: (tensor) Ground truth landms, Shape [num_obj, 10]. + loc_t: (tensor) Tensor to be filled w/ endcoded location targets. + conf_t: (tensor) Tensor to be filled w/ matched indices for conf preds. + landm_t: (tensor) Tensor to be filled w/ endcoded landm targets. + idx: (int) current batch index + Return: + The matched indices corresponding to 1)location 2)confidence 3)landm preds. + """ + # jaccard index + overlaps = jaccard( + truths, + point_form(priors) + ) + # (Bipartite Matching) + # [1,num_objects] best prior for each ground truth + best_prior_overlap, best_prior_idx = overlaps.max(1, keepdim=True) + + # ignore hard gt + valid_gt_idx = best_prior_overlap[:, 0] >= 0.2 + best_prior_idx_filter = best_prior_idx[valid_gt_idx, :] + if best_prior_idx_filter.shape[0] <= 0: + loc_t[idx] = 0 + conf_t[idx] = 0 + return + + # [1,num_priors] best ground truth for each prior + best_truth_overlap, best_truth_idx = overlaps.max(0, keepdim=True) + best_truth_idx.squeeze_(0) + best_truth_overlap.squeeze_(0) + best_prior_idx.squeeze_(1) + best_prior_idx_filter.squeeze_(1) + best_prior_overlap.squeeze_(1) + best_truth_overlap.index_fill_(0, best_prior_idx_filter, 2) # ensure best prior + # TODO refactor: index best_prior_idx with long tensor + # ensure every gt matches with its prior of max overlap + for j in range(best_prior_idx.size(0)): # 判别此anchor是预测哪一个boxes + best_truth_idx[best_prior_idx[j]] = j + matches = truths[best_truth_idx] # Shape: [num_priors,4] 此处为每一个anchor对应的bbox取出来 + conf = labels[best_truth_idx] # Shape: [num_priors] 此处为每一个anchor对应的label取出来 + conf[best_truth_overlap < threshold] = 0 # label as background overlap<0.35的全部作为负样本 + loc = encode(matches, priors, variances) + + matches_landm = landms[best_truth_idx] + landm = encode_landm(matches_landm, priors, variances) + loc_t[idx] = loc # [num_priors,4] encoded offsets to learn + conf_t[idx] = conf # [num_priors] top class label for each prior + landm_t[idx] = landm + + +def encode(matched, priors, variances): + """Encode the variances from the priorbox layers into the ground truth boxes + we have matched (based on jaccard overlap) with the prior boxes. + Args: + matched: (tensor) Coords of ground truth for each prior in point-form + Shape: [num_priors, 4]. + priors: (tensor) Prior boxes in center-offset form + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + encoded boxes (tensor), Shape: [num_priors, 4] + """ + + # dist b/t match center and prior's center + g_cxcy = (matched[:, :2] + matched[:, 2:])/2 - priors[:, :2] + # encode variance + g_cxcy /= (variances[0] * priors[:, 2:]) + # match wh / prior wh + g_wh = (matched[:, 2:] - matched[:, :2]) / priors[:, 2:] + g_wh = torch.log(g_wh) / variances[1] + # return target for smooth_l1_loss + return torch.cat([g_cxcy, g_wh], 1) # [num_priors,4] + +def encode_landm(matched, priors, variances): + """Encode the variances from the priorbox layers into the ground truth boxes + we have matched (based on jaccard overlap) with the prior boxes. + Args: + matched: (tensor) Coords of ground truth for each prior in point-form + Shape: [num_priors, 10]. + priors: (tensor) Prior boxes in center-offset form + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + encoded landm (tensor), Shape: [num_priors, 10] + """ + + # dist b/t match center and prior's center + matched = torch.reshape(matched, (matched.size(0), 5, 2)) + priors_cx = priors[:, 0].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_cy = priors[:, 1].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_w = priors[:, 2].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors_h = priors[:, 3].unsqueeze(1).expand(matched.size(0), 5).unsqueeze(2) + priors = torch.cat([priors_cx, priors_cy, priors_w, priors_h], dim=2) + g_cxcy = matched[:, :, :2] - priors[:, :, :2] + # encode variance + g_cxcy /= (variances[0] * priors[:, :, 2:]) + # g_cxcy /= priors[:, :, 2:] + g_cxcy = g_cxcy.reshape(g_cxcy.size(0), -1) + # return target for smooth_l1_loss + return g_cxcy + + +# Adapted from https://github.com/Hakuyume/chainer-ssd +def decode(loc, priors, variances): + """Decode locations from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + loc (tensor): location predictions for loc layers, + Shape: [num_priors,4] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded bounding box predictions + """ + + boxes = torch.cat(( + priors[:, :2] + loc[:, :2] * variances[0] * priors[:, 2:], + priors[:, 2:] * torch.exp(loc[:, 2:] * variances[1])), 1) + boxes[:, :2] -= boxes[:, 2:] / 2 + boxes[:, 2:] += boxes[:, :2] + return boxes + +def decode_landm(pre, priors, variances): + """Decode landm from predictions using priors to undo + the encoding we did for offset regression at train time. + Args: + pre (tensor): landm predictions for loc layers, + Shape: [num_priors,10] + priors (tensor): Prior boxes in center-offset form. + Shape: [num_priors,4]. + variances: (list[float]) Variances of priorboxes + Return: + decoded landm predictions + """ + landms = torch.cat((priors[:, :2] + pre[:, :2] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 2:4] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 4:6] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 6:8] * variances[0] * priors[:, 2:], + priors[:, :2] + pre[:, 8:10] * variances[0] * priors[:, 2:], + ), dim=1) + return landms + + +def log_sum_exp(x): + """Utility function for computing log_sum_exp while determining + This will be used to determine unaveraged confidence loss across + all examples in a batch. + Args: + x (Variable(tensor)): conf_preds from conf layers + """ + x_max = x.data.max() + return torch.log(torch.sum(torch.exp(x-x_max), 1, keepdim=True)) + x_max + + +# Original author: Francisco Massa: +# https://github.com/fmassa/object-detection.torch +# Ported to PyTorch by Max deGroot (02/01/2017) +def nms(boxes, scores, overlap=0.5, top_k=200): + """Apply non-maximum suppression at test time to avoid detecting too many + overlapping bounding boxes for a given object. + Args: + boxes: (tensor) The location preds for the img, Shape: [num_priors,4]. + scores: (tensor) The class predscores for the img, Shape:[num_priors]. + overlap: (float) The overlap thresh for suppressing unnecessary boxes. + top_k: (int) The Maximum number of box preds to consider. + Return: + The indices of the kept boxes with respect to num_priors. + """ + + keep = torch.Tensor(scores.size(0)).fill_(0).long() + if boxes.numel() == 0: + return keep + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + area = torch.mul(x2 - x1, y2 - y1) + v, idx = scores.sort(0) # sort in ascending order + # I = I[v >= 0.01] + idx = idx[-top_k:] # indices of the top-k largest vals + xx1 = boxes.new() + yy1 = boxes.new() + xx2 = boxes.new() + yy2 = boxes.new() + w = boxes.new() + h = boxes.new() + + # keep = torch.Tensor() + count = 0 + while idx.numel() > 0: + i = idx[-1] # index of current largest val + # keep.append(i) + keep[count] = i + count += 1 + if idx.size(0) == 1: + break + idx = idx[:-1] # remove kept element from view + # load bboxes of next highest vals + torch.index_select(x1, 0, idx, out=xx1) + torch.index_select(y1, 0, idx, out=yy1) + torch.index_select(x2, 0, idx, out=xx2) + torch.index_select(y2, 0, idx, out=yy2) + # store element-wise max with next highest score + xx1 = torch.clamp(xx1, min=x1[i]) + yy1 = torch.clamp(yy1, min=y1[i]) + xx2 = torch.clamp(xx2, max=x2[i]) + yy2 = torch.clamp(yy2, max=y2[i]) + w.resize_as_(xx2) + h.resize_as_(yy2) + w = xx2 - xx1 + h = yy2 - yy1 + # check sizes of xx1 and xx2.. after each iteration + w = torch.clamp(w, min=0.0) + h = torch.clamp(h, min=0.0) + inter = w*h + # IoU = i / (area(a) + area(b) - i) + rem_areas = torch.index_select(area, 0, idx) # load remaining areas) + union = (rem_areas - inter) + area[i] + IoU = inter/union # store result in iou + # keep only elements with an IoU <= overlap + idx = idx[IoU.le(overlap)] + return keep, count + + diff --git a/utils/nms/__init__.py b/utils/nms/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/utils/nms/__pycache__/__init__.cpython-39.pyc b/utils/nms/__pycache__/__init__.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c1162ba5eeb05a7b5807a4a89552855a84c124a Binary files /dev/null and b/utils/nms/__pycache__/__init__.cpython-39.pyc differ diff --git a/utils/nms/__pycache__/py_cpu_nms.cpython-39.pyc b/utils/nms/__pycache__/py_cpu_nms.cpython-39.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2e692f9eb5a7632021787f5f3771faa3fb14e320 Binary files /dev/null and b/utils/nms/__pycache__/py_cpu_nms.cpython-39.pyc differ diff --git a/utils/nms/py_cpu_nms.py b/utils/nms/py_cpu_nms.py new file mode 100644 index 0000000000000000000000000000000000000000..54e7b25fef72b518df6dcf8d6fb78b986796c6e3 --- /dev/null +++ b/utils/nms/py_cpu_nms.py @@ -0,0 +1,38 @@ +# -------------------------------------------------------- +# Fast R-CNN +# Copyright (c) 2015 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ross Girshick +# -------------------------------------------------------- + +import numpy as np + +def py_cpu_nms(dets, thresh): + """Pure Python NMS baseline.""" + x1 = dets[:, 0] + y1 = dets[:, 1] + x2 = dets[:, 2] + y2 = dets[:, 3] + scores = dets[:, 4] + + areas = (x2 - x1 + 1) * (y2 - y1 + 1) + order = scores.argsort()[::-1] + + keep = [] + while order.size > 0: + i = order[0] + keep.append(i) + xx1 = np.maximum(x1[i], x1[order[1:]]) + yy1 = np.maximum(y1[i], y1[order[1:]]) + xx2 = np.minimum(x2[i], x2[order[1:]]) + yy2 = np.minimum(y2[i], y2[order[1:]]) + + w = np.maximum(0.0, xx2 - xx1 + 1) + h = np.maximum(0.0, yy2 - yy1 + 1) + inter = w * h + ovr = inter / (areas[i] + areas[order[1:]] - inter) + + inds = np.where(ovr <= thresh)[0] + order = order[inds + 1] + + return keep diff --git a/utils/timer.py b/utils/timer.py new file mode 100644 index 0000000000000000000000000000000000000000..e4b3b8098a5ad41f8d18d42b6b2fedb694aa5508 --- /dev/null +++ b/utils/timer.py @@ -0,0 +1,40 @@ +# -------------------------------------------------------- +# Fast R-CNN +# Copyright (c) 2015 Microsoft +# Licensed under The MIT License [see LICENSE for details] +# Written by Ross Girshick +# -------------------------------------------------------- + +import time + + +class Timer(object): + """A simple timer.""" + def __init__(self): + self.total_time = 0. + self.calls = 0 + self.start_time = 0. + self.diff = 0. + self.average_time = 0. + + def tic(self): + # using time.time instead of time.clock because time time.clock + # does not normalize for multithreading + self.start_time = time.time() + + def toc(self, average=True): + self.diff = time.time() - self.start_time + self.total_time += self.diff + self.calls += 1 + self.average_time = self.total_time / self.calls + if average: + return self.average_time + else: + return self.diff + + def clear(self): + self.total_time = 0. + self.calls = 0 + self.start_time = 0. + self.diff = 0. + self.average_time = 0. diff --git a/weights/Resnet50_Final.pth b/weights/Resnet50_Final.pth new file mode 100644 index 0000000000000000000000000000000000000000..16546738ce0a00a9fd47585e0fc52744d31cc117 --- /dev/null +++ b/weights/Resnet50_Final.pth @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6d1de9c2944f2ccddca5f5e010ea5ae64a39845a86311af6fdf30841b0a5a16d +size 109497761 diff --git a/weights/model_params_ffpp_c23.pickle b/weights/model_params_ffpp_c23.pickle new file mode 100644 index 0000000000000000000000000000000000000000..940153d2af3c6d3eff68763fb6ebdecd9a51063b --- /dev/null +++ b/weights/model_params_ffpp_c23.pickle @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef932660d7ac239c496712565f4653da8d7c2e0d4a0f024ec7beafc41776018b +size 103686889