File size: 31,204 Bytes
7b773e7
 
 
fd2ce95
4b9057c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd2ce95
 
 
 
 
 
 
96b5b31
ac436f8
 
 
 
 
 
 
 
 
 
 
 
0444076
 
 
 
 
 
 
ac436f8
fd2ce95
 
 
 
 
 
 
 
 
 
 
 
 
cd5e6c2
 
ac436f8
 
 
 
 
 
 
 
cd5e6c2
 
9fa91d7
fd2ce95
4b9057c
7b773e7
4b9057c
ac436f8
7b773e7
 
 
 
 
 
 
ac436f8
7b773e7
ac436f8
7b773e7
 
 
 
ac436f8
7b773e7
ac436f8
7b773e7
 
 
 
 
 
 
 
940f220
ac436f8
4b9057c
fd2ce95
 
 
 
 
 
 
 
 
4b9057c
fd2ce95
 
 
 
 
 
4b9057c
fd2ce95
 
 
 
 
 
 
4b9057c
fd2ce95
4b9057c
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9fa91d7
ac436f8
4b9057c
fd2ce95
ac436f8
 
 
 
fd2ce95
 
4b9057c
fd2ce95
ac436f8
 
 
 
 
 
7dc96a3
fb399cc
 
 
 
 
 
 
 
 
 
6b8a747
fb399cc
 
fd2ce95
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
 
 
 
 
 
 
 
 
7dc96a3
 
 
 
 
 
 
f81cb89
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5a2226b
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
 
 
6b8a747
ac436f8
 
fb399cc
 
 
 
 
 
5a2226b
4ce7f57
5a2226b
ac436f8
5a2226b
 
 
 
 
 
 
 
4ce7f57
5a2226b
 
ac436f8
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
fd2ce95
 
 
 
 
 
 
fb399cc
 
 
5a2226b
83b1d34
fb399cc
 
 
 
 
 
1f8c849
7dc96a3
fb399cc
 
7dc96a3
 
ac436f8
 
 
 
7dc96a3
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb399cc
14386df
fb399cc
 
 
 
fd2ce95
5a2226b
 
 
 
 
 
 
fd2ce95
 
5a2226b
fb399cc
5a2226b
 
 
ac436f8
5a2226b
 
 
fb399cc
fd2ce95
fb399cc
 
 
 
 
 
fd2ce95
5a2226b
fd2ce95
fb399cc
 
 
 
fd2ce95
fb399cc
fd2ce95
 
5a2226b
fd2ce95
 
5a2226b
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
09565a0
ac436f8
 
 
 
 
aaa2984
ac436f8
 
 
 
5b830c2
ac436f8
 
 
 
 
 
 
4b9057c
9fa91d7
 
 
 
 
7e696de
ac436f8
 
 
4b9057c
 
ac436f8
4ce7f57
 
 
ac436f8
 
 
4ce7f57
 
 
 
 
 
 
 
 
 
 
ac436f8
4b9057c
9fa91d7
39a7a36
4ce7f57
 
 
 
ac436f8
 
 
 
4ce7f57
ac436f8
 
 
4ce7f57
 
5b830c2
 
4ce7f57
 
 
ac436f8
9ad3033
7e696de
 
 
 
ac436f8
7e696de
ac436f8
7e696de
ac436f8
7e696de
 
 
 
ac436f8
7e696de
ac436f8
7e696de
ac436f8
7e696de
ac436f8
 
7e696de
 
 
 
 
 
 
 
 
 
 
 
ac436f8
7e696de
 
 
 
 
 
 
 
d9324de
7e696de
 
d9324de
 
 
ac436f8
 
 
 
 
 
 
 
7e696de
 
 
 
 
 
 
d9324de
 
 
 
 
ac436f8
9fa91d7
 
39a7a36
 
 
9fa91d7
 
ac436f8
9fa91d7
6b8a747
 
ac436f8
6b8a747
 
d9324de
39a7a36
ac436f8
 
6b8a747
 
 
ac436f8
9fa91d7
2ee6b58
 
 
 
39a7a36
 
2ee6b58
 
fd2ce95
 
 
 
 
 
 
 
ac436f8
 
fd2ce95
 
4b9057c
 
ac436f8
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
import streamlit as st
st.set_page_config(page_title="Advanced File Downloader", layout="wide")

# Core imports
import os
import subprocess
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeoutError
import asyncio
import logging
from urllib.parse import urlparse
import re
from pathlib import Path
from io import BytesIO
import random
from bs4 import BeautifulSoup
from PyPDF2 import PdfReader
import zipfile
import tempfile
import mimetypes
import requests
import datetime
import spacy
import spacy.cli
from spacy.language import Language
import google_auth_oauthlib.flow
import googleapiclient.discovery
import google.auth.transport.requests
from async_timeout import timeout as async_timeout
import pandas as pd
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import schedule
import threading
import time
import hashlib
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from sklearn.cluster import KMeans
import numpy as np

# -------------------- Logging Setup --------------------
logging.basicConfig(
    filename='advanced_download_log.txt',
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)

GOOGLE_OAUTH_CONFIG = {
    "web": {
        "client_id": "90798824947-u25obg1q844qeikjoh4jdmi579kn9p1c.apps.googleusercontent.com",
        "project_id": "huggingface-449214",
        "auth_uri": "https://accounts.google.com/o/oauth2/auth",
        "token_uri": "https://oauth2.googleapis.com/token",
        "auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
        "client_secret": "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",
        "redirect_uris": ["https://euler314-craw-web.hf.space/"]
    }
}

# Playwright Setup
def install_playwright_dependencies():
    os.environ['PLAYWRIGHT_BROWSERS_PATH'] = os.path.expanduser("~/.cache/ms-playwright")
    subprocess.run(['apt-get', 'update', '-y'], check=True)
    packages = [
        'libnss3', 'libnss3-tools', 'libnspr4', 'libatk1.0-0',
        'libatk-bridge2.0-0', 'libatspi2.0-0', 'libcups2', 'libxcomposite1',
        'libxdamage1', 'libdrm2', 'libgbm1', 'libpango-1.0-0'
    ]
    subprocess.run(['apt-get', 'install', '-y', '--no-install-recommends'] + packages, check=True)
    subprocess.run(['python3', '-m', 'playwright', 'install', 'chromium'], check=True)

install_playwright_dependencies()

# Model Loading
@st.cache_resource
def load_models():
    try:
        # Load spaCy model
        try:
            nlp = spacy.load("en_core_web_sm")
        except OSError:
            st.info("Downloading spaCy model...")
            spacy.cli.download("en_core_web_sm")
            nlp = spacy.load("en_core_web_sm")

        # Load SentenceTransformer
        try:
            semantic_model = SentenceTransformer('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B')
        except Exception as e:
            st.error(f"Error loading SentenceTransformer: {e}")
            semantic_model = None

        # Load Transformers pipeline
        try:
            summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
        except Exception as e:
            st.error(f"Error loading Transformers: {e}")
            summarizer = None

        return nlp, semantic_model, summarizer
    except Exception as e:
        st.error(f"Error loading models: {e}")
        return None, None, None

nlp_model, semantic_model, summarizer = load_models()

# Utility Functions
def get_random_user_agent():
    USER_AGENTS = [
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_6_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
        'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
        'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:115.0) Gecko/20100101 Firefox/115.0',
    ]
    return random.choice(USER_AGENTS)

def sizeof_fmt(num, suffix='B'):
    for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
        if abs(num) < 1024.0:
            return f"{num:3.1f}{unit}{suffix}"
        num /= 1024.0
    return f"{num:.1f}Y{suffix}"

def create_zip_file(file_paths, output_dir):
    timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
    zip_path = os.path.join(output_dir, f"downloads_{timestamp}.zip")
    with zipfile.ZipFile(zip_path, 'w') as zipf:
        for file_path in file_paths:
            zipf.write(file_path, os.path.basename(file_path))
    return zip_path

# Google Drive Functions
def get_google_auth_url():
    client_config = GOOGLE_OAUTH_CONFIG["web"]
    flow = google_auth_oauthlib.flow.Flow.from_client_config(
        {"web": client_config},
        scopes=["https://www.googleapis.com/auth/drive.file"]
    )
    flow.redirect_uri = client_config["redirect_uris"][0]
    authorization_url, _ = flow.authorization_url(
        access_type="offline",
        include_granted_scopes="true",
        prompt="consent"
    )
    return authorization_url

def exchange_code_for_credentials(auth_code):
    if not auth_code.strip():
        return None, "No code provided."
    try:
        client_config = GOOGLE_OAUTH_CONFIG["web"]
        flow = google_auth_oauthlib.flow.Flow.from_client_config(
            {"web": client_config},
            scopes=["https://www.googleapis.com/auth/drive.file"]
        )
        flow.redirect_uri = client_config["redirect_uris"][0]
        flow.fetch_token(code=auth_code.strip())
        creds = flow.credentials
        if not creds or not creds.valid:
            return None, "Could not validate credentials. Check code and try again."
        return creds, "Google Sign-In successful!"
    except Exception as e:
        return None, f"Error during token exchange: {e}"

def google_drive_upload(file_path, credentials, folder_id=None):
    try:
        drive_service = googleapiclient.discovery.build("drive", "v3", credentials=credentials)
        file_metadata = {'name': os.path.basename(file_path)}
        if folder_id:
            file_metadata['parents'] = [folder_id]
        media = googleapiclient.http.MediaFileUpload(file_path, resumable=True)
        created = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute()
        return created.get("id", "")
    except Exception as e:
        return f"Error uploading to Drive: {str(e)}"

def create_drive_folder(drive_service, name):
    folder_metadata = {'name': name, 'mimeType': 'application/vnd.google-apps.folder'}
    folder = drive_service.files().create(body=folder_metadata, fields='id').execute()
    return folder.get('id')

# DownloadManager Class
class DownloadManager:
    def __init__(self, use_proxy=False, proxy=None, query=None, num_results=5):
        self.use_proxy = use_proxy
        self.proxy = proxy
        self.query = query
        self.num_results = num_results
        self.playwright = None
        self.browser = None
        self.context = None
        self.page = None

    async def __aenter__(self):
        self.playwright = await async_playwright().start()
        opts = {
            "headless": True,
            "args": [
                '--no-sandbox',
                '--disable-setuid-sandbox',
                '--disable-dev-shm-usage',
                '--disable-gpu',
                '--no-zygote',
                '--single-process'
            ]
        }
        if self.use_proxy and self.proxy:
            opts["proxy"] = {"server": self.proxy}
        self.browser = await self.playwright.chromium.launch(**opts)
        self.context = await self.browser.new_context(user_agent=get_random_user_agent())
        self.page = await self.context.new_page()
        await self.page.set_extra_http_headers({
            'Accept-Language': 'en-US,en;q=0.9',
            'Accept-Encoding': 'gzip, deflate, br',
            'Referer': 'https://www.bing.com/'
        })
        return self

    async def __aexit__(self, exc_type, exc_val, exc_tb):
        if self.browser:
            await self.browser.close()
        if self.playwright:
            await self.playwright.stop()

    async def search_bing(self):
        urls = []
        try:
            search_url = f"https://www.bing.com/search?q={self.query}"
            await self.page.goto(search_url, timeout=30000)
            await self.page.wait_for_load_state('networkidle')
            links = await self.page.query_selector_all("li.b_algo h2 a")
            for link in links[:self.num_results]:
                href = await link.get_attribute('href')
                if href:
                    urls.append(href)
            return urls
        except Exception as e:
            logger.error(f"Error searching Bing: {e}")
            return []

    async def get_file_size(self, url):
        try:
            async with self.context.new_page() as page:
                response = await page.request.head(url, timeout=15000)
                length = response.headers.get('Content-Length', None)
                if length:
                    return sizeof_fmt(int(length))
                else:
                    return "Unknown Size"
        except Exception:
            return "Unknown Size"

    async def get_pdf_metadata(self, url):
        try:
            async with self.context.new_page() as page:
                resp = await page.request.get(url, timeout=15000)
                if resp.ok:
                    content = await resp.body()
                    pdf = BytesIO(content)
                    reader = PdfReader(pdf)
                    return {
                        'Title': reader.metadata.get('/Title', 'N/A') if reader.metadata else 'N/A',
                        'Author': reader.metadata.get('/Author', 'N/A') if reader.metadata else 'N/A',
                        'Pages': len(reader.pages),
                    }
                else:
                    return {}
        except Exception:
            return {}

    async def extract_real_download_url(self, url):
        try:
            async with self.context.new_page() as page:
                response = await page.goto(url, wait_until='networkidle', timeout=30000)
                if response and response.headers.get('location'):
                    return response.headers['location']
                return page.url
        except Exception as e:
            logger.error(f"Error extracting real download URL: {e}")
            return url

    async def extract_downloadable_files(self, url, custom_ext_list):
        found_files = []
        try:
            response = await self.page.goto(url, timeout=30000, wait_until='networkidle')
            if not response:
                return []

            final_url = self.page.url
            if '.php' in final_url or 'download' in final_url:
                real_url = await self.extract_real_download_url(final_url)
                if real_url != final_url:
                    found_files.append({
                        'url': real_url,
                        'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
                        'size': await self.get_file_size(real_url),
                        'metadata': {}
                    })
                    return found_files

            await self.page.wait_for_load_state('networkidle', timeout=30000)
            content = await self.page.content()
            soup = BeautifulSoup(content, 'html.parser')
            
            default_exts = ['.pdf', '.docx', '.doc', '.zip', '.rar', '.mp3', '.mp4', 
                            '.avi', '.mkv', '.png', '.jpg', '.jpeg', '.gif', '.xlsx', 
                            '.pptx', '.odt', '.txt']
            all_exts = set(default_exts + [ext.strip().lower() for ext in custom_ext_list if ext.strip()])
            
            parsed_base = urlparse(final_url)
            base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
            
            for a in soup.find_all('a', href=True):
                href = a['href'].strip()
                
                if '.php' in href.lower() or 'download' in href.lower():
                    full_url = href if href.startswith('http') else f"{base_url}{href}"
                    real_url = await self.extract_real_download_url(full_url)
                    if real_url and real_url != full_url:
                        found_files.append({
                            'url': real_url,
                            'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
                            'size': await self.get_file_size(real_url),
                            'metadata': {}
                        })
                        continue

                if any(href.lower().endswith(ext) for ext in all_exts):
                    file_url = href if href.startswith('http') else f"{base_url}{href}"
                    size_str = await self.get_file_size(file_url)
                    meta = {}
                    if file_url.lower().endswith('.pdf'):
                        meta = await self.get_pdf_metadata(file_url)
                    found_files.append({
                        'url': file_url,
                        'filename': os.path.basename(file_url.split('?')[0]),
                        'size': size_str,
                        'metadata': meta
                    })

                # Handle Google Drive links
                elif ("drive.google.com" in href) or ("docs.google.com" in href):
                    file_id = None
                    for pattern in [r'/file/d/([^/]+)', r'id=([^&]+)', r'open\?id=([^&]+)']:
                        match = re.search(pattern, href)
                        if match:
                            file_id = match.group(1)
                            break
                    if file_id:
                        direct_url = f"https://drive.google.com/uc?export=download&id={file_id}"
                        filename = file_id
                        try:
                            response = await self.page.request.head(direct_url, timeout=15000)
                            cd = response.headers.get("Content-Disposition", "")
                            if cd:
                                mt = re.search(r'filename\*?="?([^";]+)', cd)
                                if mt:
                                    filename = mt.group(1).strip('"').strip()
                            found_files.append({
                                'url': direct_url,
                                'filename': filename,
                                'size': await self.get_file_size(direct_url),
                                'metadata': {}
                            })
                        except Exception as e:
                            logger.error(f"Error processing Google Drive link: {e}")
            
            seen_urls = set()
            unique_files = []
            for f in found_files:
                if f['url'] not in seen_urls:
                    seen_urls.add(f['url'])
                    unique_files.append(f)
            return unique_files
        except Exception as e:
            logger.error(f"Error extracting files from {url}: {e}")
            return []

    async def download_file(self, file_info, save_dir, referer):
        file_url = file_info['url']
        fname = file_info['filename']
        path = os.path.join(save_dir, fname)
        base, ext = os.path.splitext(fname)
        counter = 1
        while os.path.exists(path):
            path = os.path.join(save_dir, f"{base}_{counter}{ext}")
            counter += 1
        os.makedirs(save_dir, exist_ok=True)
        try:
            if "drive.google.com" in file_url:
                import gdown
                output = gdown.download(file_url, path, quiet=False)
                if output:
                    return path
                return None
            async with self.context.new_page() as page:
                headers = {
                    'Accept': '*/*',
                    'Accept-Encoding': 'gzip, deflate, br',
                    'Referer': referer
                }
                response = await page.request.get(file_url, headers=headers, timeout=30000)
                if response.status == 200:
                    content = await response.body()
                    with open(path, 'wb') as f:
                        f.write(content)
                    return path
                else:
                    logger.error(f"Download failed with status {response.status}: {file_url}")
                    return None
        except Exception as e:
            logger.error(f"Error downloading {file_url}: {e}")
            return None

    async def deep_search(self, url, custom_ext_list=None, sublink_limit=10000, timeout=60):
        if not custom_ext_list:
            custom_ext_list = []
        progress_text = st.empty()
        progress_bar = st.progress(0)
        file_count_text = st.empty()
        try:
            progress_text.text("Analyzing main page...")
            main_files = await self.extract_downloadable_files(url, custom_ext_list)
            initial_count = len(main_files)
            file_count_text.text(f"Found {initial_count} files on main page")
            progress_text.text("Getting sublinks...")
            sublinks = await self.get_sublinks(url, sublink_limit)
            total_links = len(sublinks)
            progress_text.text(f"Found {total_links} sublinks to process")
            if not sublinks:
                progress_bar.progress(1.0)
                return main_files
            all_files = main_files
            for i, sublink in enumerate(sublinks, 1):
                progress = i / total_links
                progress_text.text(f"Processing sublink {i}/{total_links}: {sublink}")
                progress_bar.progress(progress)
                sub_files = await self.extract_downloadable_files(sublink, custom_ext_list)
                all_files.extend(sub_files)
                file_count_text.text(f"Found {len(all_files)} total files")
            seen_urls = set()
            unique_files = []
            for f in all_files:
                if f['url'] not in seen_urls:
                    seen_urls.add(f['url'])
                    unique_files.append(f)
            final_count = len(unique_files)
            progress_text.text(f"Deep search complete!")
            file_count_text.text(f"Found {final_count} unique files")
            progress_bar.progress(1.0)
            return unique_files
        except Exception as e:
            logger.error(f"Deep search error: {e}")
            progress_text.text(f"Error during deep search: {str(e)}")
            return []
        finally:
            await asyncio.sleep(2)
            if not st.session_state.get('keep_progress', False):
                progress_text.empty()
                progress_bar.empty()

    async def get_sublinks(self, url, limit=10000):
        try:
            await self.page.goto(url, timeout=30000)
            content = await self.page.content()
            soup = BeautifulSoup(content, 'html.parser')
            parsed_base = urlparse(url)
            base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
            links = set()
            for a in soup.find_all('a', href=True):
                href = a['href'].strip()
                if href.startswith('http'):
                    links.add(href)
                elif href.startswith('/'):
                    links.add(f"{base_url}{href}")
            return list(links)[:limit]
        except Exception as e:
            logger.error(f"Error getting sublinks: {e}")
            return []

# Utility Functions for New Features
def extract_keywords(text, n=5):
    doc = nlp_model(text)
    keywords = [token.text for token in doc if token.is_alpha and not token.is_stop][:n]
    return keywords

def analyze_sentiment(text):
    sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
    result = sentiment_analyzer(text[:512])[0]
    return result['label'], result['score']

def get_file_hash(file_path):
    hasher = hashlib.md5()
    with open(file_path, 'rb') as f:
        hasher.update(f.read())
    return hasher.hexdigest()

# Main Function
def main():
    if 'initialized' not in st.session_state:
        st.session_state.initialized = True
        st.session_state.discovered_files = []
        st.session_state.current_url = None
        st.session_state.google_creds = None
        st.session_state.selected_files = []
        st.session_state.do_deep_search = False
        st.session_state.deep_search_url = None
        st.session_state.search_results = []

    st.title("Advanced File Downloader")

    with st.sidebar:
        mode = st.radio("Select Mode", ["Manual URL", "Bing Search", "PDF Summarizer"], key="mode_select")
        with st.expander("Advanced Options", expanded=True):
            custom_extensions = st.text_input("Custom File Extensions", placeholder=".csv, .txt, .epub", key="custom_ext_input", help="Enter extensions like .csv, .txt")
            max_sublinks = st.number_input("Maximum Sublinks to Process", min_value=1, max_value=100000, value=10000, step=50, key="max_sublinks_input", help="Max sublinks to scan from main page")
            sublink_timeout = st.number_input("Search Timeout (seconds per sublink)", min_value=1, max_value=3000, value=30, step=5, key="timeout_input", help="Timeout for each sublink")
            use_proxy = st.checkbox("Use Proxy", key="proxy_checkbox")
            proxy = st.text_input("Proxy URL", placeholder="http://proxy:port", key="proxy_input")
        with st.expander("Google Drive Integration", expanded=False):
            if st.button("Start Google Sign-In", key="google_signin_btn"):
                auth_url = get_google_auth_url()
                st.markdown(f"[Click here to authorize]({auth_url})")
            auth_code = st.text_input("Enter authorization code", key="auth_code_input")
            if st.button("Complete Sign-In", key="complete_signin_btn") and auth_code:
                creds, msg = exchange_code_for_credentials(auth_code)
                st.session_state.google_creds = creds
                st.write(msg)

    if mode == "Manual URL":
        st.header("Manual URL Mode")
        url = st.text_input("Enter URL", placeholder="https://example.com", key="url_input")
        col1, col2 = st.columns([3, 1])
        with col1:
            if st.button("Deep Search", use_container_width=True, key="deep_search_btn"):
                if url:
                    custom_ext_list = [ext.strip().lower() for ext in custom_extensions.split(',') if ext.strip()]
                    valid_ext_list = [ext for ext in custom_ext_list if re.match(r'^\.[a-zA-Z0-9]+$', ext)]
                    if custom_ext_list != valid_ext_list:
                        st.warning("Invalid extensions ignored. Use format like '.csv'.")
                    async def run_deep_search():
                        async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
                            files = await dm.deep_search(url, valid_ext_list, max_sublinks, sublink_timeout)
                            return files
                    files = asyncio.run(run_deep_search())
                    if files:
                        st.session_state.discovered_files = files
                        st.session_state.current_url = url
                        st.success(f"Found {len(files)} files!")
                    else:
                        st.warning("No files found.")

        if st.session_state.discovered_files:
            files = st.session_state.discovered_files
            st.success(f"Found {len(files)} files!")
            col1, col2 = st.columns([1, 4])
            with col1:
                if st.button("Select All", key="select_all_btn"):
                    st.session_state.selected_files = list(range(len(files)))
                if st.button("Clear Selection", key="clear_selection_btn"):
                    st.session_state.selected_files = []
            selected_files = st.multiselect("Select files to download", options=list(range(len(files))), default=st.session_state.selected_files, format_func=lambda x: f"{files[x]['filename']} ({files[x]['size']})", key="file_multiselect")
            st.session_state.selected_files = selected_files
            if selected_files:
                col1, col2, col3, col4 = st.columns(4)
                with col1:
                    download_dir = st.text_input("Download Directory", value="./downloads", key="download_dir_input")
                with col2:
                    create_zip = st.checkbox("Create ZIP file", value=True, key="create_zip_checkbox")
                with col3:
                    delete_after = st.checkbox("Delete after creating ZIP", key="delete_after_checkbox")
                with col4:
                    upload_to_drive = st.checkbox("Upload to Google Drive", key="upload_drive_checkbox")
                if st.button("Download Selected", key="download_btn"):
                    if not os.path.exists(download_dir):
                        os.makedirs(download_dir)
                    async def download_files():
                        downloaded_paths = []
                        progress_bar = st.progress(0)
                        status_text = st.empty()
                        async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
                            for i, idx in enumerate(selected_files):
                                progress = (i + 1) / len(selected_files)
                                file_info = files[idx]
                                status_text.text(f"Downloading {file_info['filename']}... ({i+1}/{len(selected_files)})")
                                progress_bar.progress(progress)
                                path = await dm.download_file(file_info, download_dir, url)
                                if path:
                                    downloaded_paths.append(path)
                            status_text.empty()
                            progress_bar.empty()
                            return downloaded_paths
                    downloaded = asyncio.run(download_files())
                    if downloaded:
                        st.success(f"Successfully downloaded {len(downloaded)} files")
                        if create_zip:
                            zip_path = create_zip_file(downloaded, download_dir)
                            st.success(f"Created ZIP file: {zip_path}")
                            with open(zip_path, "rb") as f:
                                zip_data = f.read()
                            st.download_button("Download ZIP", data=zip_data, file_name=os.path.basename(zip_path), mime="application/zip")
                            if upload_to_drive and st.session_state.google_creds:
                                drive_service = googleapiclient.discovery.build("drive", "v3", credentials=st.session_state.google_creds)
                                folder_id = create_drive_folder(drive_service, f"Downloads_{urlparse(url).netloc}")
                                drive_id = google_drive_upload(zip_path, st.session_state.google_creds, folder_id)
                                if not isinstance(drive_id, str) or not drive_id.startswith("Error"):
                                    st.success(f"Uploaded to Google Drive. File ID: {drive_id}")
                                else:
                                    st.error(drive_id)
                            if delete_after:
                                for path in downloaded:
                                    try:
                                        os.remove(path)
                                    except Exception as e:
                                        st.warning(f"Could not delete {path}: {e}")
                                st.info("Deleted original files after ZIP creation")
                        else:
                            for path in downloaded:
                                with open(path, "rb") as f:
                                    file_data = f.read()
                                st.download_button(f"Download {os.path.basename(path)}", data=file_data, file_name=os.path.basename(path))

    elif mode == "Bing Search":
        st.header("Bing Search Mode")
        query = st.text_input("Enter search query", key="search_query_input")
        num_results = st.slider("Number of results", 1, 50, 5, key="num_results_slider")
        if st.button("Search", key="search_btn"):
            if query:
                async def run_search():
                    async with DownloadManager(use_proxy=use_proxy, proxy=proxy, query=query, num_results=num_results) as dm:
                        with st.spinner("Searching..."):
                            urls = await dm.search_bing()
                            if urls:
                                st.session_state.search_results = urls
                                st.success(f"Found {len(urls)} results!")
                                for i, url in enumerate(urls, 1):
                                    with st.expander(f"Result {i}: {url}", expanded=(i == 1)):
                                        if st.button(f"Deep Search Result {i}", key=f"deep_search_result_{i}"):
                                            st.session_state.deep_search_url = url
                                            st.session_state.do_deep_search = True
                            else:
                                st.warning("No search results found.")
                asyncio.run(run_search())

    else:  # PDF Summarizer mode
        if summarizer is None:
            st.error("PDF summarization is not available due to model loading errors.")
        else:
            st.header("PDF Summarizer")
            pdf_url = st.text_input("Enter PDF URL", key="pdf_url_input")
            if st.button("Summarize", key="summarize_btn"):
                if pdf_url:
                    with st.spinner("Generating summary..."):
                        try:
                            response = requests.get(pdf_url, stream=True)
                            temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
                            with open(temp_pdf.name, "wb") as f:
                                f.write(response.content)
                            reader = PdfReader(temp_pdf.name)
                            text = " ".join([page.extract_text() or "" for page in reader.pages])
                            os.remove(temp_pdf.name)
                            summary = summarizer(text[:3000], max_length=200, min_length=50, do_sample=False)
                            st.write("Summary:", summary[0]['summary_text'])
                        except Exception as e:
                            st.error(f"Error summarizing PDF: {e}")

if __name__ == "__main__":
    main()