File size: 68,754 Bytes
7b773e7
 
 
fd2ce95
4b9057c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd2ce95
 
 
 
 
 
 
96b5b31
ac436f8
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
0444076
 
 
 
 
 
 
ac436f8
fd2ce95
 
 
 
 
 
 
 
 
 
 
 
 
cd5e6c2
 
ac436f8
 
 
 
 
 
 
 
cd5e6c2
 
9fa91d7
fd2ce95
4b9057c
7b773e7
4b9057c
ac436f8
7b773e7
 
 
 
 
 
 
ac436f8
7b773e7
ac436f8
7b773e7
 
 
 
ac436f8
7b773e7
ac436f8
7b773e7
 
 
 
 
 
 
 
940f220
ac436f8
4b9057c
fd2ce95
 
 
 
 
 
 
 
 
4b9057c
fd2ce95
 
 
 
 
 
4b9057c
fd2ce95
 
 
 
 
 
 
4b9057c
fd2ce95
4b9057c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fa91d7
ac436f8
4b9057c
fd2ce95
ac436f8
 
 
 
fd2ce95
 
4b9057c
fd2ce95
ac436f8
 
 
 
 
 
7dc96a3
fb399cc
 
 
 
 
 
 
 
 
 
6b8a747
fb399cc
 
fd2ce95
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
 
 
 
 
 
 
 
 
7dc96a3
 
 
 
 
 
 
f81cb89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a2226b
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
6b8a747
ac436f8
 
fb399cc
 
 
 
 
 
5a2226b
4ce7f57
5a2226b
ac436f8
5a2226b
 
 
 
 
 
 
 
4ce7f57
5a2226b
 
ac436f8
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9e60db
 
5a2226b
b9e60db
 
 
 
 
5a2226b
b9e60db
5a2226b
b9e60db
 
5a2226b
 
 
b9e60db
 
 
 
 
 
 
fb399cc
fd2ce95
 
 
 
 
 
 
fb399cc
 
 
5a2226b
83b1d34
fb399cc
 
 
 
 
 
1f8c849
7dc96a3
fb399cc
 
b9e60db
0f88c1d
 
 
 
 
 
7dc96a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
0f88c1d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b9e60db
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
 
b9e60db
0f88c1d
b9e60db
0f88c1d
 
 
 
b9e60db
 
 
 
0f88c1d
 
 
b9e60db
0f88c1d
 
 
 
 
 
b9e60db
0f88c1d
b9e60db
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
 
 
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
 
b9e60db
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
0f88c1d
b9e60db
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
 
0f88c1d
b9e60db
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
 
0f88c1d
b9e60db
 
 
 
 
 
0f88c1d
b9e60db
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
0f88c1d
b9e60db
0f88c1d
 
14386df
fb399cc
 
 
 
fd2ce95
5a2226b
 
 
 
 
 
 
fd2ce95
 
5a2226b
fb399cc
5a2226b
 
 
ac436f8
5a2226b
 
 
fb399cc
fd2ce95
fb399cc
 
 
 
 
 
fd2ce95
5a2226b
fd2ce95
fb399cc
 
 
 
fd2ce95
fb399cc
fd2ce95
 
5a2226b
fd2ce95
 
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09565a0
ac436f8
 
 
 
 
aaa2984
ac436f8
 
 
 
5b830c2
ac436f8
 
 
 
 
 
 
4b9057c
9fa91d7
 
 
 
 
7e696de
ac436f8
 
 
4b9057c
 
ac436f8
4ce7f57
 
 
ac436f8
 
 
4ce7f57
 
 
 
 
 
 
 
 
 
 
ac436f8
4b9057c
9fa91d7
39a7a36
4ce7f57
 
 
 
ac436f8
 
 
 
4ce7f57
ac436f8
 
 
4ce7f57
 
5b830c2
 
4ce7f57
 
 
ac436f8
9ad3033
7e696de
 
 
 
ac436f8
7e696de
ac436f8
7e696de
ac436f8
7e696de
 
 
 
ac436f8
7e696de
ac436f8
7e696de
ac436f8
7e696de
ac436f8
 
7e696de
 
 
 
 
 
 
 
 
 
 
 
ac436f8
7e696de
 
 
 
 
 
 
 
d9324de
7e696de
 
d9324de
 
 
ac436f8
 
 
 
 
 
 
 
7e696de
 
 
 
 
 
 
d9324de
 
 
 
 
ac436f8
9fa91d7
 
39a7a36
 
 
9fa91d7
 
ac436f8
9fa91d7
6b8a747
 
ac436f8
6b8a747
 
d9324de
39a7a36
ac436f8
 
6b8a747
 
 
ac436f8
9fa91d7
2ee6b58
 
 
 
39a7a36
 
2ee6b58
 
fd2ce95
 
 
 
 
 
 
 
ac436f8
 
fd2ce95
 
4b9057c
 
ac436f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
import streamlit as st
st.set_page_config(page_title="Advanced File Downloader", layout="wide")

# Core imports
import os
import subprocess
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeoutError
import asyncio
import logging
from urllib.parse import urlparse
import re
from pathlib import Path
from io import BytesIO
import random
from bs4 import BeautifulSoup
from PyPDF2 import PdfReader
import zipfile
import tempfile
import mimetypes
import requests
import datetime
import spacy
import spacy.cli
from spacy.language import Language
import google_auth_oauthlib.flow
import googleapiclient.discovery
import google.auth.transport.requests
from async_timeout import timeout as async_timeout
import pandas as pd
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import schedule
import threading
import time
import hashlib
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from sklearn.cluster import KMeans
import numpy as np
import base64
import shutil
from PIL import Image  # Make sure to pip install Pillow
from reportlab.pdfgen import canvas

# -------------------- Logging Setup --------------------
logging.basicConfig(
    filename='advanced_download_log.txt',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

GOOGLE_OAUTH_CONFIG = {
    "web": {
        "client_id": "90798824947-u25obg1q844qeikjoh4jdmi579kn9p1c.apps.googleusercontent.com",
        "project_id": "huggingface-449214",
        "auth_uri": "https://accounts.google.com/o/oauth2/auth",
        "token_uri": "https://oauth2.googleapis.com/token",
        "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
        "client_secret": "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",
        "redirect_uris": ["https://euler314-craw-web.hf.space/"]
    }
}

# Playwright Setup
def install_playwright_dependencies():
    os.environ['PLAYWRIGHT_BROWSERS_PATH'] = os.path.expanduser("~/.cache/ms-playwright")
    subprocess.run(['apt-get', 'update', '-y'], check=True)
    packages = [
        'libnss3', 'libnss3-tools', 'libnspr4', 'libatk1.0-0',
        'libatk-bridge2.0-0', 'libatspi2.0-0', 'libcups2', 'libxcomposite1',
        'libxdamage1', 'libdrm2', 'libgbm1', 'libpango-1.0-0'
    ]
    subprocess.run(['apt-get', 'install', '-y', '--no-install-recommends'] + packages, check=True)
    subprocess.run(['python3', '-m', 'playwright', 'install', 'chromium'], check=True)

install_playwright_dependencies()

# Model Loading
@st.cache_resource
def load_models():
    try:
        # Load spaCy model
        try:
            nlp = spacy.load("en_core_web_sm")
        except OSError:
            st.info("Downloading spaCy model...")
            spacy.cli.download("en_core_web_sm")
            nlp = spacy.load("en_core_web_sm")

        # Load SentenceTransformer
        try:
            semantic_model = SentenceTransformer('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B')
        except Exception as e:
            st.error(f"Error loading SentenceTransformer: {e}")
            semantic_model = None

        # Load Transformers pipeline
        try:
            summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
        except Exception as e:
            st.error(f"Error loading Transformers: {e}")
            summarizer = None

        return nlp, semantic_model, summarizer
    except Exception as e:
        st.error(f"Error loading models: {e}")
        return None, None, None

nlp_model, semantic_model, summarizer = load_models()

# Utility Functions
def get_random_user_agent():
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_6_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:115.0) Gecko/20100101 Firefox/115.0',
    ]
    return random.choice(USER_AGENTS)

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
        if abs(num) < 1024.0:
            return f"{num:3.1f}{unit}{suffix}"
        num /= 1024.0
    return f"{num:.1f}Y{suffix}"

def create_zip_file(file_paths, output_dir):
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    zip_path = os.path.join(output_dir, f"downloads_{timestamp}.zip")
    with zipfile.ZipFile(zip_path, 'w') as zipf:
        for file_path in file_paths:
            zipf.write(file_path, os.path.basename(file_path))
    return zip_path

# Google Drive Functions
def get_google_auth_url():
    client_config = GOOGLE_OAUTH_CONFIG["web"]
    flow = google_auth_oauthlib.flow.Flow.from_client_config(
        {"web": client_config},
        scopes=["https://www.googleapis.com/auth/drive.file"]
    )
    flow.redirect_uri = client_config["redirect_uris"][0]
    authorization_url, _ = flow.authorization_url(
        access_type="offline",
        include_granted_scopes="true",
        prompt="consent"
    )
    return authorization_url

def exchange_code_for_credentials(auth_code):
    if not auth_code.strip():
        return None, "No code provided."
    try:
        client_config = GOOGLE_OAUTH_CONFIG["web"]
        flow = google_auth_oauthlib.flow.Flow.from_client_config(
            {"web": client_config},
            scopes=["https://www.googleapis.com/auth/drive.file"]
        )
        flow.redirect_uri = client_config["redirect_uris"][0]
        flow.fetch_token(code=auth_code.strip())
        creds = flow.credentials
        if not creds or not creds.valid:
            return None, "Could not validate credentials. Check code and try again."
        return creds, "Google Sign-In successful!"
    except Exception as e:
        return None, f"Error during token exchange: {e}"

def google_drive_upload(file_path, credentials, folder_id=None):
    try:
        drive_service = googleapiclient.discovery.build("drive", "v3", credentials=credentials)
        file_metadata = {'name': os.path.basename(file_path)}
        if folder_id:
            file_metadata['parents'] = [folder_id]
        media = googleapiclient.http.MediaFileUpload(file_path, resumable=True)
        created = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute()
        return created.get("id", "")
    except Exception as e:
        return f"Error uploading to Drive: {str(e)}"

def create_drive_folder(drive_service, name):
    folder_metadata = {'name': name, 'mimeType': 'application/vnd.google-apps.folder'}
    folder = drive_service.files().create(body=folder_metadata, fields='id').execute()
    return folder.get('id')

# DownloadManager Class
class DownloadManager:
    def __init__(self, use_proxy=False, proxy=None, query=None, num_results=5):
        self.use_proxy = use_proxy
        self.proxy = proxy
        self.query = query
        self.num_results = num_results
        self.playwright = None
        self.browser = None
        self.context = None
        self.page = None

    async def __aenter__(self):
        self.playwright = await async_playwright().start()
        opts = {
            "headless": True,
            "args": [
                '--no-sandbox',
                '--disable-setuid-sandbox',
                '--disable-dev-shm-usage',
                '--disable-gpu',
                '--no-zygote',
                '--single-process'
            ]
        }
        if self.use_proxy and self.proxy:
            opts["proxy"] = {"server": self.proxy}
        self.browser = await self.playwright.chromium.launch(**opts)
        self.context = await self.browser.new_context(user_agent=get_random_user_agent())
        self.page = await self.context.new_page()
        await self.page.set_extra_http_headers({
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Referer': 'https://www.bing.com/'
        })
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()

    async def search_bing(self):
        urls = []
        try:
            search_url = f"https://www.bing.com/search?q={self.query}"
            await self.page.goto(search_url, timeout=30000)
            await self.page.wait_for_load_state('networkidle')
            links = await self.page.query_selector_all("li.b_algo h2 a")
            for link in links[:self.num_results]:
                href = await link.get_attribute('href')
                if href:
                    urls.append(href)
            return urls
        except Exception as e:
            logger.error(f"Error searching Bing: {e}")
            return []

    async def get_file_size(self, url):
        try:
            async with self.context.new_page() as page:
                response = await page.request.head(url, timeout=15000)
                length = response.headers.get('Content-Length', None)
                if length:
                    return sizeof_fmt(int(length))
                else:
                    return "Unknown Size"
        except Exception:
            return "Unknown Size"

    async def get_pdf_metadata(self, url):
        try:
            async with self.context.new_page() as page:
                resp = await page.request.get(url, timeout=15000)
                if resp.ok:
                    content = await resp.body()
                    pdf = BytesIO(content)
                    reader = PdfReader(pdf)
                    return {
                        'Title': reader.metadata.get('/Title', 'N/A') if reader.metadata else 'N/A',
                        'Author': reader.metadata.get('/Author', 'N/A') if reader.metadata else 'N/A',
                        'Pages': len(reader.pages),
                    }
                else:
                    return {}
        except Exception:
            return {}

    async def extract_real_download_url(self, url):
        try:
            async with self.context.new_page() as page:
                response = await page.goto(url, wait_until='networkidle', timeout=30000)
                if response and response.headers.get('location'):
                    return response.headers['location']
                return page.url
        except Exception as e:
            logger.error(f"Error extracting real download URL: {e}")
            return url

    async def extract_downloadable_files(self, url, custom_ext_list):
        found_files = []
        try:
            response = await self.page.goto(url, timeout=30000, wait_until='networkidle')
            if not response:
                return []

            final_url = self.page.url
            if '.php' in final_url or 'download' in final_url:
                real_url = await self.extract_real_download_url(final_url)
                if real_url != final_url:
                    found_files.append({
                        'url': real_url,
                        'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
                        'size': await self.get_file_size(real_url),
                        'metadata': {}
                    })
                    return found_files

            await self.page.wait_for_load_state('networkidle', timeout=30000)
            content = await self.page.content()
            soup = BeautifulSoup(content, 'html.parser')
            
            default_exts = ['.pdf', '.docx', '.doc', '.zip', '.rar', '.mp3', '.mp4', 
                            '.avi', '.mkv', '.png', '.jpg', '.jpeg', '.gif', '.xlsx', 
                            '.pptx', '.odt', '.txt']
            all_exts = set(default_exts + [ext.strip().lower() for ext in custom_ext_list if ext.strip()])
            
            parsed_base = urlparse(final_url)
            base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
            
            for a in soup.find_all('a', href=True):
                href = a['href'].strip()
                
                if '.php' in href.lower() or 'download' in href.lower():
                    full_url = href if href.startswith('http') else f"{base_url}{href}"
                    real_url = await self.extract_real_download_url(full_url)
                    if real_url and real_url != full_url:
                        found_files.append({
                            'url': real_url,
                            'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
                            'size': await self.get_file_size(real_url),
                            'metadata': {}
                        })
                        continue

                if any(href.lower().endswith(ext) for ext in all_exts):
                    file_url = href if href.startswith('http') else f"{base_url}{href}"
                    size_str = await self.get_file_size(file_url)
                    meta = {}
                    if file_url.lower().endswith('.pdf'):
                        meta = await self.get_pdf_metadata(file_url)
                    found_files.append({
                        'url': file_url,
                        'filename': os.path.basename(file_url.split('?')[0]),
                        'size': size_str,
                        'metadata': meta
                    })

                # Handle Google Drive links
                elif ("drive.google.com" in href) or ("docs.google.com" in href):
                    file_id = None
                    for pattern in [r'/file/d/([^/]+)', r'id=([^&]+)', r'open\?id=([^&]+)']:
                        match = re.search(pattern, href)
                        if match:
                            file_id = match.group(1)
                            break
                    if file_id:
                        # We'll detect file type during download, so just use the ID for filename initially
                        filename = f"gdrive_{file_id}"
                        try:
                            # Get file info to determine type and size
                            file_type, is_view_only = await self.get_google_drive_file_info(file_id)
                            if file_type:
                                filename = f"{filename}.{file_type}"
                            
                            found_files.append({
                                'url': href,  # Use original URL, as we'll process it specially
                                'filename': filename,
                                'size': "View-only" if is_view_only else await self.get_file_size(f"https://drive.google.com/uc?export=download&id={file_id}"),
                                'metadata': {'view_only': is_view_only, 'file_type': file_type, 'file_id': file_id}
                            })
                        except Exception as e:
                            logger.error(f"Error processing Google Drive link: {e}")
                            # Fallback if we can't get info
                            found_files.append({
                                'url': href,
                                'filename': filename,
                                'size': "Unknown Size",
                                'metadata': {'file_id': file_id}
                            })
            
            seen_urls = set()
            unique_files = []
            for f in found_files:
                if f['url'] not in seen_urls:
                    seen_urls.add(f['url'])
                    unique_files.append(f)
            return unique_files
        except Exception as e:
            logger.error(f"Error extracting files from {url}: {e}")
            return []

    async def download_file(self, file_info, save_dir, referer):
        file_url = file_info['url']
        fname = file_info['filename']
        path = os.path.join(save_dir, fname)
        base, ext = os.path.splitext(fname)
        counter = 1
        while os.path.exists(path):
            path = os.path.join(save_dir, f"{base}_{counter}{ext}")
            counter += 1
        os.makedirs(save_dir, exist_ok=True)
        try:
            # Special handling for Google Drive files
            if "drive.google.com" in file_url or "docs.google.com" in file_url:
                # Use enhanced Google Drive downloader
                success = await self.download_from_google_drive(file_url, path)
                return path if success else None
                
            # Original code for non-Google Drive downloads
            async with self.context.new_page() as page:
                headers = {
                    'Accept': '*/*',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Referer': referer
                }
                response = await page.request.get(file_url, headers=headers, timeout=30000)
                if response.status == 200:
                    content = await response.body()
                    with open(path, 'wb') as f:
                        f.write(content)
                    return path
                else:
                    logger.error(f"Download failed with status {response.status}: {file_url}")
                    return None
        except Exception as e:
            logger.error(f"Error downloading {file_url}: {e}")
            return None

    async def download_from_google_drive(self, url, save_path):
        """Enhanced method to download from Google Drive with multiple fallback approaches"""
        # Extract the file ID from different URL formats
        file_id = None
        url_patterns = [
            r'drive\.google\.com/file/d/([^/]+)',
            r'drive\.google\.com/open\?id=([^&]+)',
            r'docs\.google\.com/\w+/d/([^/]+)',
            r'id=([^&]+)',
            r'drive\.google\.com/uc\?id=([^&]+)',
        ]
        
        for pattern in url_patterns:
            match = re.search(pattern, url)
            if match:
                file_id = match.group(1)
                break
        
        if not file_id:
            logger.error(f"Could not extract file ID from URL: {url}")
            return False
        
        # Determine file type first (important for handling different file types)
        file_type, is_view_only = await self.get_google_drive_file_info(file_id)
        logger.info(f"Google Drive file type: {file_type}, View-only: {is_view_only}")
        
        base, ext = os.path.splitext(save_path)
        if not ext and file_type:
            # Add the correct extension if missing
            save_path = f"{base}.{file_type}"
        
        # For view-only files, use specialized approaches
        if is_view_only:
            # Approach 1: For PDFs, use the JS method
            if file_type == 'pdf':
                success = await self.download_viewonly_pdf_with_js(file_id, save_path)
                if success:
                    return True
                    
            # Approach 2: For Google Docs, Sheets, etc., use export API
            if file_type in ['doc', 'docx', 'sheet', 'ppt', 'xlsx', 'pptx']:
                success = await self.export_google_doc(file_id, file_type, save_path)
                if success:
                    return True
                    
            # Approach 3: Try the direct screenshot method for any view-only file
            success = await self.download_viewonly_with_screenshots(file_id, save_path, file_type)
            if success:
                return True
        
        # Try standard approaches for non-view-only files
        try:
            # Try with gdown first
            import gdown
            output = gdown.download(f"https://drive.google.com/uc?id={file_id}", save_path, quiet=False, fuzzy=True)
            if output and os.path.exists(save_path) and os.path.getsize(save_path) > 0:
                with open(save_path, 'rb') as f:
                    content = f.read(100)  # Read first 100 bytes
                    if b'<!DOCTYPE html>' not in content:  # Check not HTML error page
                        logger.info(f"Successfully downloaded with gdown: {url}")
                        return True
        except Exception as e:
            logger.warning(f"gdown download failed: {e}")
        
        # Try with requests and session cookies
        try:
            session = requests.Session()
            session.headers.update({'User-Agent': get_random_user_agent()})
            
            # Visit the page first to get cookies
            session.get(f"https://drive.google.com/file/d/{file_id}/view", timeout=30)
            
            # Try download
            url = f"https://drive.google.com/uc?id={file_id}&export=download"
            response = session.get(url, stream=True, timeout=30)
            
            # Check for confirmation token
            confirmation_token = None
            for k, v in response.cookies.items():
                if k.startswith('download_warning'):
                    confirmation_token = v
                    break
            
            # Use confirmation token if found
            if confirmation_token:
                url = f"{url}&confirm={confirmation_token}"
                response = session.get(url, stream=True, timeout=60)
            
            # Check if we're getting HTML instead of the file
            content_type = response.headers.get('Content-Type', '')
            if 'text/html' in content_type:
                logger.warning("Received HTML instead of file - likely download restriction")
            else:
                with open(save_path, 'wb') as f:
                    for chunk in response.iter_content(chunk_size=1024*1024):
                        if chunk:
                            f.write(chunk)
                
                if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
                    with open(save_path, 'rb') as f:
                        content = f.read(100)
                        if b'<!DOCTYPE html>' not in content:
                            logger.info("Successfully downloaded with requests session")
                            return True
        except Exception as e:
            logger.warning(f"Requests session download failed: {e}")
        
        # If all methods failed for view-only file, try one last approach
        if is_view_only:
            try:
                # Try a direct headless browser download
                async with self.context.new_page() as page:
                    await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle', timeout=60000)
                    
                    # Try to capture the content directly from viewer
                    file_content = await page.evaluate("""
                        () => {
                            // Try to find the actual viewer content
                            const viewerContent = document.querySelector('.drive-viewer-paginated-content');
                            if (viewerContent) {
                                return viewerContent.innerHTML;
                            }
                            return document.documentElement.innerHTML;
                        }
                    """)
                    
                    if file_content:
                        # Save as HTML and then we can convert it if needed
                        html_path = f"{base}.html"
                        with open(html_path, 'w', encoding='utf-8') as f:
                            f.write(f"""
                            <!DOCTYPE html>
                            <html>
                            <head><title>Google Drive Extracted Content</title></head>
                            <body>
                            {file_content}
                            </body>
                            </html>
                            """)
                        
                        # If requested a PDF, convert HTML to PDF
                        if file_type == 'pdf' or ext.lower() == '.pdf':
                            try:
                                import pdfkit
                                pdfkit.from_file(html_path, save_path)
                                os.remove(html_path)  # Clean up HTML file
                                return True
                            except Exception as pdf_err:
                                logger.warning(f"Error converting HTML to PDF: {pdf_err}")
                                # Keep the HTML file as fallback
                                shutil.copy(html_path, save_path)
                                return True
                        else:
                            # Just use the HTML file
                            shutil.copy(html_path, save_path)
                            return True
            except Exception as e:
                logger.warning(f"Final direct browser capture failed: {e}")
        
        # All methods failed
        logger.error(f"All download approaches failed for Google Drive file: {file_id}")
        return False

    async def get_google_drive_file_info(self, file_id):
        """Get file type and view-only status from Google Drive"""
        file_type = None
        is_view_only = False
        
        try:
            async with self.context.new_page() as page:
                await page.goto(f"https://drive.google.com/file/d/{file_id}/view", timeout=30000)
                
                # Check if view-only
                view_only_text = await page.query_selector('text="the owner has not granted you permission to download this file"')
                is_view_only = view_only_text is not None
                
                # Check for Google Docs viewer
                gdocs_viewer = await page.query_selector('iframe[src*="docs.google.com/document"]')
                gsheets_viewer = await page.query_selector('iframe[src*="docs.google.com/spreadsheets"]')
                gslides_viewer = await page.query_selector('iframe[src*="docs.google.com/presentation"]')
                
                if gdocs_viewer:
                    file_type = 'docx'
                elif gsheets_viewer:
                    file_type = 'xlsx'
                elif gslides_viewer:
                    file_type = 'pptx'
                else:
                    # Check for PDF viewer
                    pdf_viewer = await page.query_selector('embed[type="application/pdf"]')
                    if pdf_viewer:
                        file_type = 'pdf'
                    else:
                        # Check for image viewer
                        img_viewer = await page.query_selector('img[src*="googleusercontent.com"]')
                        if img_viewer:
                            # Get image type from src
                            img_src = await img_viewer.get_attribute('src')
                            if 'jpg' in img_src or 'jpeg' in img_src:
                                file_type = 'jpg'
                            elif 'png' in img_src:
                                file_type = 'png'
                            else:
                                file_type = 'jpg'  # Default to jpg
                        else:
                            # Generic file type fallback
                            file_type = 'pdf'  # Default to PDF
                
                # If still no type, check filename
                if not file_type:
                    title_element = await page.query_selector('div[role="heading"]')
                    if title_element:
                        title = await title_element.text_content()
                        if title:
                            ext_match = re.search(r'\.([a-zA-Z0-9]+)$', title)
                            if ext_match:
                                file_type = ext_match.group(1).lower()
        
        except Exception as e:
            logger.error(f"Error getting Google Drive file info: {e}")
            file_type = 'pdf'  # Default to PDF if we can't determine
        
        return file_type, is_view_only

    async def download_viewonly_pdf_with_js(self, file_id, save_path):
        """Download view-only PDF using JavaScript approach - improved version"""
        try:
            async with self.context.new_page() as page:
                # Set viewport size to ensure we capture full pages
                await page.set_viewport_size({"width": 1200, "height": 1600})
                
                # Visit the file
                view_url = f"https://drive.google.com/file/d/{file_id}/view"
                await page.goto(view_url, wait_until='networkidle', timeout=60000)
                
                # Wait for rendering
                await page.wait_for_timeout(2000)
                
                # Inject required libraries - use CDN for jsPDF
                await page.evaluate("""
                    async function injectLibraries() {
                        // Add jsPDF
                        return new Promise((resolve) => {
                            const jspdfScript = document.createElement('script');
                            jspdfScript.src = 'https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js';
                            jspdfScript.onload = () => resolve(true);
                            document.head.appendChild(jspdfScript);
                        });
                    }
                    return injectLibraries();
                """)
                
                # Wait for libraries to load
                await page.wait_for_timeout(2000)
                
                # Scroll through document to load all pages
                await page.evaluate("""
                    async function scrollThroughDocument() {
                        const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
                        const container = document.querySelector('.drive-viewer-paginated-scrollable');
                        if (!container) return false;
                        
                        const scrollHeight = container.scrollHeight;
                        const viewportHeight = container.clientHeight;
                        const scrollStep = viewportHeight / 2;
                        
                        for (let scrollPos = 0; scrollPos < scrollHeight; scrollPos += scrollStep) {
                            container.scrollTo(0, scrollPos);
                            await delay(500);
                        }
                        
                        // One final scroll to bottom to ensure everything is loaded
                        container.scrollTo(0, scrollHeight);
                        await delay(1000);
                        
                        // Scroll back to top for PDF creation
                        container.scrollTo(0, 0);
                        await delay(500);
                        
                        return true;
                    }
                    return scrollThroughDocument();
                """)
                
                # Wait after scrolling
                await page.wait_for_timeout(2000)
                
                # Use the improved PDF creation script that captures all pages 
                pdf_base64 = await page.evaluate("""
                    async function createPDF() {
                        try {
                            // Make sure jsPDF is loaded
                            if (typeof window.jspdf === 'undefined') {
                                console.error('jsPDF not loaded');
                                return null;
                            }
                            
                            const { jsPDF } = window.jspdf;
                            const pdf = new jsPDF();
                            
                            // Get all page elements
                            const pages = document.querySelectorAll('.drive-viewer-paginated-page');
                            console.log('Found pages:', pages.length);
                            
                            if (pages.length === 0) {
                                // Alternative: try to find images directly
                                const images = Array.from(document.querySelectorAll('img')).filter(img => 
                                    img.src.startsWith('blob:') && img.width > 100 && img.height > 100
                                );
                                
                                console.log('Found images:', images.length);
                                
                                if (images.length === 0) {
                                    return null;
                                }
                                
                                // Process each image
                                for (let i = 0; i < images.length; i++) {
                                    const img = images[i];
                                    
                                    if (i > 0) {
                                        pdf.addPage();
                                    }
                                    
                                    // Create canvas and draw image
                                    const canvas = document.createElement('canvas');
                                    canvas.width = img.width;
                                    canvas.height = img.height;
                                    const ctx = canvas.getContext('2d');
                                    ctx.drawImage(img, 0, 0, img.width, img.height);
                                    
                                    // Add to PDF
                                    const imgData = canvas.toDataURL('image/jpeg', 0.95);
                                    
                                    // Calculate dimensions
                                    const pageWidth = pdf.internal.pageSize.getWidth();
                                    const pageHeight = pdf.internal.pageSize.getHeight();
                                    const imgRatio = img.height / img.width;
                                    
                                    let imgWidth = pageWidth - 10;
                                    let imgHeight = imgWidth * imgRatio;
                                    
                                    if (imgHeight > pageHeight - 10) {
                                        imgHeight = pageHeight - 10;
                                        imgWidth = imgHeight / imgRatio;
                                    }
                                    
                                    // Center on page
                                    const x = (pageWidth - imgWidth) / 2;
                                    const y = (pageHeight - imgHeight) / 2;
                                    
                                    pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
                                }
                            } else {
                                // Process each page
                                const container = document.querySelector('.drive-viewer-paginated-scrollable');
                                const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
                                
                                for (let i = 0; i < pages.length; i++) {
                                    // Add a new page for each page after the first
                                    if (i > 0) {
                                        pdf.addPage();
                                    }
                                    
                                    // Scroll to the page and wait for it to render
                                    pages[i].scrollIntoView();
                                    await delay(300);
                                    
                                    // Find the image element inside the page
                                    const pageImages = pages[i].querySelectorAll('img');
                                    let targetImage = null;
                                    
                                    for (const img of pageImages) {
                                        if (img.src.startsWith('blob:') && img.width > 50 && img.height > 50) {
                                            targetImage = img;
                                            break;
                                        }
                                    }
                                    
                                    if (!targetImage) {
                                        // If no image found, try taking a screenshot of the page instead
                                        const pageCanvas = document.createElement('canvas');
                                        pageCanvas.width = pages[i].clientWidth;
                                        pageCanvas.height = pages[i].clientHeight;
                                        const ctx = pageCanvas.getContext('2d');
                                        
                                        // Draw the page background
                                        ctx.fillStyle = 'white';
                                        ctx.fillRect(0, 0, pageCanvas.width, pageCanvas.height);
                                        
                                        // Use html2canvas approach
                                        try {
                                            await delay(100);
                                            // Just draw what we can see
                                            const allElements = pages[i].querySelectorAll('*');
                                            for (const el of allElements) {
                                                if (el.tagName === 'IMG' && el.complete && el.src) {
                                                    const rect = el.getBoundingClientRect();
                                                    try {
                                                        ctx.drawImage(el, rect.left, rect.top, rect.width, rect.height);
                                                    } catch (e) {
                                                        console.error('Draw error:', e);
                                                    }
                                                }
                                            }
                                        } catch (e) {
                                            console.error('Canvas error:', e);
                                        }
                                        
                                        // Add the canvas to the PDF
                                        const imgData = pageCanvas.toDataURL('image/jpeg', 0.95);
                                        
                                        // Calculate dimensions
                                        const pageWidth = pdf.internal.pageSize.getWidth();
                                        const pageHeight = pdf.internal.pageSize.getHeight();
                                        const imgRatio = pageCanvas.height / pageCanvas.width;
                                        
                                        let imgWidth = pageWidth - 10;
                                        let imgHeight = imgWidth * imgRatio;
                                        
                                        if (imgHeight > pageHeight - 10) {
                                            imgHeight = pageHeight - 10;
                                            imgWidth = imgHeight / imgRatio;
                                        }
                                        
                                        // Center on page
                                        const x = (pageWidth - imgWidth) / 2;
                                        const y = (pageHeight - imgHeight) / 2;
                                        
                                        pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
                                    } else {
                                        // Use the found image
                                        const canvas = document.createElement('canvas');
                                        canvas.width = targetImage.naturalWidth || targetImage.width;
                                        canvas.height = targetImage.naturalHeight || targetImage.height;
                                        const ctx = canvas.getContext('2d');
                                        
                                        // Draw image to canvas
                                        try {
                                            ctx.drawImage(targetImage, 0, 0, canvas.width, canvas.height);
                                        } catch (e) {
                                            console.error('Error drawing image:', e);
                                            continue;
                                        }
                                        
                                        // Add to PDF
                                        const imgData = canvas.toDataURL('image/jpeg', 0.95);
                                        
                                        // Calculate dimensions
                                        const pageWidth = pdf.internal.pageSize.getWidth();
                                        const pageHeight = pdf.internal.pageSize.getHeight();
                                        const imgRatio = canvas.height / canvas.width;
                                        
                                        let imgWidth = pageWidth - 10;
                                        let imgHeight = imgWidth * imgRatio;
                                        
                                        if (imgHeight > pageHeight - 10) {
                                            imgHeight = pageHeight - 10;
                                            imgWidth = imgHeight / imgRatio;
                                        }
                                        
                                        // Center on page
                                        const x = (pageWidth - imgWidth) / 2;
                                        const y = (pageHeight - imgHeight) / 2;
                                        
                                        pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
                                    }
                                }
                            }
                            
                            // Return as base64
                            return pdf.output('datauristring');
                        } catch (e) {
                            console.error('PDF creation error:', e);
                            return null;
                        }
                    }
                    return createPDF();
                """)
                
                if not pdf_base64 or not pdf_base64.startswith('data:application/pdf;base64,'):
                    # If script method failed, try screenshot approach
                    logger.warning("PDF creation script failed, trying fallback method")
                    return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
                
                # Save the PDF from base64
                try:
                    base64_data = pdf_base64.replace('data:application/pdf;base64,', '')
                    pdf_bytes = base64.b64decode(base64_data)
                    
                    with open(save_path, 'wb') as f:
                        f.write(pdf_bytes)
                    
                    # Verify file is not empty
                    if os.path.exists(save_path) and os.path.getsize(save_path) > 1000:
                        logger.info(f"Successfully saved PDF to {save_path}")
                        return True
                    else:
                        logger.warning(f"Generated PDF is too small, using fallback method")
                        return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
                except Exception as e:
                    logger.error(f"Error saving PDF: {e}")
                    return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
        
        except Exception as e:
            logger.error(f"Error in view-only PDF download: {e}")
            # Try fallback method
            return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')

    async def download_viewonly_with_screenshots(self, file_id, save_path, file_type):
        """Download any view-only file by taking screenshots"""
        try:
            async with self.context.new_page() as page:
                # Set high-resolution viewport
                await page.set_viewport_size({"width": 1600, "height": 1200})
                
                # Navigate to the file
                await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle', timeout=60000)
                
                # Make sure the file is loaded
                await page.wait_for_load_state('networkidle')
                await page.wait_for_timeout(3000)  # Extra time for rendering
                
                # Create directory for screenshots if multiple pages
                base_dir = os.path.dirname(save_path)
                base_name = os.path.splitext(os.path.basename(save_path))[0]
                screenshots_dir = os.path.join(base_dir, f"{base_name}_screenshots")
                os.makedirs(screenshots_dir, exist_ok=True)
                
                # Check if it's a multi-page document
                is_multi_page = await page.evaluate("""
                    () => {
                        const pages = document.querySelectorAll('.drive-viewer-paginated-page');
                        return pages.length > 1;
                    }
                """)
                
                if is_multi_page and file_type == 'pdf':
                    # For multi-page PDFs, take screenshots of each page
                    page_count = await page.evaluate("""
                        async () => {
                            const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
                            const pages = document.querySelectorAll('.drive-viewer-paginated-page');
                            const container = document.querySelector('.drive-viewer-paginated-scrollable');
                            
                            if (!container || pages.length === 0) return 0;
                            
                            // Scroll through to make sure all pages are loaded
                            const scrollHeight = container.scrollHeight;
                            const viewportHeight = container.clientHeight;
                            const scrollStep = viewportHeight;
                            
                            for (let scrollPos = 0; scrollPos < scrollHeight; scrollPos += scrollStep) {
                                container.scrollTo(0, scrollPos);
                                await delay(300);
                            }
                            
                            // Scroll back to top
                            container.scrollTo(0, 0);
                            await delay(300);
                            
                            return pages.length;
                        }
                    """)
                    
                    logger.info(f"Found {page_count} pages in document")
                    
                    # Take screenshots of each page
                    screenshots = []
                    for i in range(page_count):
                        # Scroll to page
                        await page.evaluate(f"""
                            async () => {{
                                const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
                                const pages = document.querySelectorAll('.drive-viewer-paginated-page');
                                if (pages.length <= {i}) return false;
                                
                                pages[{i}].scrollIntoView();
                                await delay(500);
                                return true;
                            }}
                        """)
                        
                        # Take screenshot
                        screenshot_path = os.path.join(screenshots_dir, f"page_{i+1}.png")
                        await page.screenshot(path=screenshot_path, clip={
                            'x': 0,
                            'y': 0,
                            'width': 1600,
                            'height': 1200
                        })
                        screenshots.append(screenshot_path)
                    
                    # Combine screenshots into PDF
                    from PIL import Image
                    from reportlab.pdfgen import canvas
                    
                    c = canvas.Canvas(save_path)
                    for screenshot in screenshots:
                        img = Image.open(screenshot)
                        width, height = img.size
                        
                        # Add page to PDF
                        c.setPageSize((width, height))
                        c.drawImage(screenshot, 0, 0, width, height)
                        c.showPage()
                    
                    c.save()
                    
                    # Clean up screenshots
                    for screenshot in screenshots:
                        os.remove(screenshot)
                    os.rmdir(screenshots_dir)
                    
                    return os.path.exists(save_path) and os.path.getsize(save_path) > 0
                else:
                    # For single-page or non-PDF files, just take one screenshot
                    screenshot_path = os.path.join(screenshots_dir, "screenshot.png")
                    await page.screenshot(path=screenshot_path, fullPage=True)
                    
                    # Convert to requested format if needed
                    if file_type == 'pdf':
                        from PIL import Image
                        from reportlab.pdfgen import canvas
                        
                        # Create PDF from screenshot
                        img = Image.open(screenshot_path)
                        width, height = img.size
                        
                        c = canvas.Canvas(save_path, pagesize=(width, height))
                        c.drawImage(screenshot_path, 0, 0, width, height)
                        c.save()
                    else:
                        # Just copy the screenshot to the destination with proper extension
                        shutil.copy(screenshot_path, save_path)
                    
                    # Clean up
                    os.remove(screenshot_path)
                    os.rmdir(screenshots_dir)
                    
                    return os.path.exists(save_path) and os.path.getsize(save_path) > 0
                
        except Exception as e:
            logger.error(f"Error taking screenshots: {e}")
            return False

    async def export_google_doc(self, file_id, file_type, save_path):
        """Export Google Docs/Sheets/Slides to downloadable formats"""
        try:
            # Map file types to export formats
            export_formats = {
                'doc': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',  # docx
                'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
                'sheet': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',  # xlsx
                'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
                'ppt': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',  # pptx
                'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
                'pdf': 'application/pdf',
            }
            
            export_format = export_formats.get(file_type, 'application/pdf')
            export_url = f"https://docs.google.com/document/d/{file_id}/export?format={file_type}"
            
            if 'sheet' in file_type or 'xlsx' in file_type:
                export_url = f"https://docs.google.com/spreadsheets/d/{file_id}/export?format=xlsx"
            elif 'ppt' in file_type or 'presentation' in file_type:
                export_url = f"https://docs.google.com/presentation/d/{file_id}/export/pptx"
            elif file_type == 'pdf':
                export_url = f"https://docs.google.com/document/d/{file_id}/export?format=pdf"
            
            async with self.context.new_page() as page:
                # Get cookies from the main view page first
                await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle')
                
                # Now try the export
                response = await page.goto(export_url, wait_until='networkidle')
                
                if response.status == 200:
                    content = await response.body()
                    with open(save_path, 'wb') as f:
                        f.write(content)
                    return os.path.exists(save_path) and os.path.getsize(save_path) > 0
                else:
                    logger.warning(f"Export failed with status {response.status}")
                    return False
                    
        except Exception as e:
            logger.error(f"Error exporting Google Doc: {e}")
            return False

    async def deep_search(self, url, custom_ext_list=None, sublink_limit=10000, timeout=60):
        if not custom_ext_list:
            custom_ext_list = []
        progress_text = st.empty()
        progress_bar = st.progress(0)
        file_count_text = st.empty()
        try:
            progress_text.text("Analyzing main page...")
            main_files = await self.extract_downloadable_files(url, custom_ext_list)
            initial_count = len(main_files)
            file_count_text.text(f"Found {initial_count} files on main page")
            progress_text.text("Getting sublinks...")
            sublinks = await self.get_sublinks(url, sublink_limit)
            total_links = len(sublinks)
            progress_text.text(f"Found {total_links} sublinks to process")
            if not sublinks:
                progress_bar.progress(1.0)
                return main_files
            all_files = main_files
            for i, sublink in enumerate(sublinks, 1):
                progress = i / total_links
                progress_text.text(f"Processing sublink {i}/{total_links}: {sublink}")
                progress_bar.progress(progress)
                sub_files = await self.extract_downloadable_files(sublink, custom_ext_list)
                all_files.extend(sub_files)
                file_count_text.text(f"Found {len(all_files)} total files")
            seen_urls = set()
            unique_files = []
            for f in all_files:
                if f['url'] not in seen_urls:
                    seen_urls.add(f['url'])
                    unique_files.append(f)
            final_count = len(unique_files)
            progress_text.text(f"Deep search complete!")
            file_count_text.text(f"Found {final_count} unique files")
            progress_bar.progress(1.0)
            return unique_files
        except Exception as e:
            logger.error(f"Deep search error: {e}")
            progress_text.text(f"Error during deep search: {str(e)}")
            return []
        finally:
            await asyncio.sleep(2)
            if not st.session_state.get('keep_progress', False):
                progress_text.empty()
                progress_bar.empty()

    async def get_sublinks(self, url, limit=10000):
        try:
            await self.page.goto(url, timeout=30000)
            content = await self.page.content()
            soup = BeautifulSoup(content, 'html.parser')
            parsed_base = urlparse(url)
            base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
            links = set()
            for a in soup.find_all('a', href=True):
                href = a['href'].strip()
                if href.startswith('http'):
                    links.add(href)
                elif href.startswith('/'):
                    links.add(f"{base_url}{href}")
            return list(links)[:limit]
        except Exception as e:
            logger.error(f"Error getting sublinks: {e}")
            return []

# Utility Functions for New Features
def extract_keywords(text, n=5):
    doc = nlp_model(text)
    keywords = [token.text for token in doc if token.is_alpha and not token.is_stop][:n]
    return keywords

def analyze_sentiment(text):
    sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
    result = sentiment_analyzer(text[:512])[0]
    return result['label'], result['score']

def get_file_hash(file_path):
    hasher = hashlib.md5()
    with open(file_path, 'rb') as f:
        hasher.update(f.read())
    return hasher.hexdigest()

# Main Function
def main():
    if 'initialized' not in st.session_state:
        st.session_state.initialized = True
        st.session_state.discovered_files = []
        st.session_state.current_url = None
        st.session_state.google_creds = None
        st.session_state.selected_files = []
        st.session_state.do_deep_search = False
        st.session_state.deep_search_url = None
        st.session_state.search_results = []

    st.title("Advanced File Downloader")

    with st.sidebar:
        mode = st.radio("Select Mode", ["Manual URL", "Bing Search", "PDF Summarizer"], key="mode_select")
        with st.expander("Advanced Options", expanded=True):
            custom_extensions = st.text_input("Custom File Extensions", placeholder=".csv, .txt, .epub", key="custom_ext_input", help="Enter extensions like .csv, .txt")
            max_sublinks = st.number_input("Maximum Sublinks to Process", min_value=1, max_value=100000, value=10000, step=50, key="max_sublinks_input", help="Max sublinks to scan from main page")
            sublink_timeout = st.number_input("Search Timeout (seconds per sublink)", min_value=1, max_value=3000, value=30, step=5, key="timeout_input", help="Timeout for each sublink")
            use_proxy = st.checkbox("Use Proxy", key="proxy_checkbox")
            proxy = st.text_input("Proxy URL", placeholder="http://proxy:port", key="proxy_input")
        with st.expander("Google Drive Integration", expanded=False):
            if st.button("Start Google Sign-In", key="google_signin_btn"):
                auth_url = get_google_auth_url()
                st.markdown(f"[Click here to authorize]({auth_url})")
            auth_code = st.text_input("Enter authorization code", key="auth_code_input")
            if st.button("Complete Sign-In", key="complete_signin_btn") and auth_code:
                creds, msg = exchange_code_for_credentials(auth_code)
                st.session_state.google_creds = creds
                st.write(msg)

    if mode == "Manual URL":
        st.header("Manual URL Mode")
        url = st.text_input("Enter URL", placeholder="https://example.com", key="url_input")
        col1, col2 = st.columns([3, 1])
        with col1:
            if st.button("Deep Search", use_container_width=True, key="deep_search_btn"):
                if url:
                    custom_ext_list = [ext.strip().lower() for ext in custom_extensions.split(',') if ext.strip()]
                    valid_ext_list = [ext for ext in custom_ext_list if re.match(r'^\.[a-zA-Z0-9]+$', ext)]
                    if custom_ext_list != valid_ext_list:
                        st.warning("Invalid extensions ignored. Use format like '.csv'.")
                    async def run_deep_search():
                        async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
                            files = await dm.deep_search(url, valid_ext_list, max_sublinks, sublink_timeout)
                            return files
                    files = asyncio.run(run_deep_search())
                    if files:
                        st.session_state.discovered_files = files
                        st.session_state.current_url = url
                        st.success(f"Found {len(files)} files!")
                    else:
                        st.warning("No files found.")

        if st.session_state.discovered_files:
            files = st.session_state.discovered_files
            st.success(f"Found {len(files)} files!")
            col1, col2 = st.columns([1, 4])
            with col1:
                if st.button("Select All", key="select_all_btn"):
                    st.session_state.selected_files = list(range(len(files)))
                if st.button("Clear Selection", key="clear_selection_btn"):
                    st.session_state.selected_files = []
            selected_files = st.multiselect("Select files to download", options=list(range(len(files))), default=st.session_state.selected_files, format_func=lambda x: f"{files[x]['filename']} ({files[x]['size']})", key="file_multiselect")
            st.session_state.selected_files = selected_files
            if selected_files:
                col1, col2, col3, col4 = st.columns(4)
                with col1:
                    download_dir = st.text_input("Download Directory", value="./downloads", key="download_dir_input")
                with col2:
                    create_zip = st.checkbox("Create ZIP file", value=True, key="create_zip_checkbox")
                with col3:
                    delete_after = st.checkbox("Delete after creating ZIP", key="delete_after_checkbox")
                with col4:
                    upload_to_drive = st.checkbox("Upload to Google Drive", key="upload_drive_checkbox")
                if st.button("Download Selected", key="download_btn"):
                    if not os.path.exists(download_dir):
                        os.makedirs(download_dir)
                    async def download_files():
                        downloaded_paths = []
                        progress_bar = st.progress(0)
                        status_text = st.empty()
                        async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
                            for i, idx in enumerate(selected_files):
                                progress = (i + 1) / len(selected_files)
                                file_info = files[idx]
                                status_text.text(f"Downloading {file_info['filename']}... ({i+1}/{len(selected_files)})")
                                progress_bar.progress(progress)
                                path = await dm.download_file(file_info, download_dir, url)
                                if path:
                                    downloaded_paths.append(path)
                            status_text.empty()
                            progress_bar.empty()
                            return downloaded_paths
                    downloaded = asyncio.run(download_files())
                    if downloaded:
                        st.success(f"Successfully downloaded {len(downloaded)} files")
                        if create_zip:
                            zip_path = create_zip_file(downloaded, download_dir)
                            st.success(f"Created ZIP file: {zip_path}")
                            with open(zip_path, "rb") as f:
                                zip_data = f.read()
                            st.download_button("Download ZIP", data=zip_data, file_name=os.path.basename(zip_path), mime="application/zip")
                            if upload_to_drive and st.session_state.google_creds:
                                drive_service = googleapiclient.discovery.build("drive", "v3", credentials=st.session_state.google_creds)
                                folder_id = create_drive_folder(drive_service, f"Downloads_{urlparse(url).netloc}")
                                drive_id = google_drive_upload(zip_path, st.session_state.google_creds, folder_id)
                                if not isinstance(drive_id, str) or not drive_id.startswith("Error"):
                                    st.success(f"Uploaded to Google Drive. File ID: {drive_id}")
                                else:
                                    st.error(drive_id)
                            if delete_after:
                                for path in downloaded:
                                    try:
                                        os.remove(path)
                                    except Exception as e:
                                        st.warning(f"Could not delete {path}: {e}")
                                st.info("Deleted original files after ZIP creation")
                        else:
                            for path in downloaded:
                                with open(path, "rb") as f:
                                    file_data = f.read()
                                st.download_button(f"Download {os.path.basename(path)}", data=file_data, file_name=os.path.basename(path))

    elif mode == "Bing Search":
        st.header("Bing Search Mode")
        query = st.text_input("Enter search query", key="search_query_input")
        num_results = st.slider("Number of results", 1, 50, 5, key="num_results_slider")
        if st.button("Search", key="search_btn"):
            if query:
                async def run_search():
                    async with DownloadManager(use_proxy=use_proxy, proxy=proxy, query=query, num_results=num_results) as dm:
                        with st.spinner("Searching..."):
                            urls = await dm.search_bing()
                            if urls:
                                st.session_state.search_results = urls
                                st.success(f"Found {len(urls)} results!")
                                for i, url in enumerate(urls, 1):
                                    with st.expander(f"Result {i}: {url}", expanded=(i == 1)):
                                        if st.button(f"Deep Search Result {i}", key=f"deep_search_result_{i}"):
                                            st.session_state.deep_search_url = url
                                            st.session_state.do_deep_search = True
                            else:
                                st.warning("No search results found.")
                asyncio.run(run_search())

    else:  # PDF Summarizer mode
        if summarizer is None:
            st.error("PDF summarization is not available due to model loading errors.")
        else:
            st.header("PDF Summarizer")
            pdf_url = st.text_input("Enter PDF URL", key="pdf_url_input")
            if st.button("Summarize", key="summarize_btn"):
                if pdf_url:
                    with st.spinner("Generating summary..."):
                        try:
                            response = requests.get(pdf_url, stream=True)
                            temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
                            with open(temp_pdf.name, "wb") as f:
                                f.write(response.content)
                            reader = PdfReader(temp_pdf.name)
                            text = " ".join([page.extract_text() or "" for page in reader.pages])
                            os.remove(temp_pdf.name)
                            summary = summarizer(text[:3000], max_length=200, min_length=50, do_sample=False)
                            st.write("Summary:", summary[0]['summary_text'])
                        except Exception as e:
                            st.error(f"Error summarizing PDF: {e}")

if __name__ == "__main__":
    main()