import streamlit as st
st.set_page_config(page_title="Advanced File Downloader", layout="wide")
# Core imports
import os
import subprocess
from playwright.async_api import async_playwright, TimeoutError as PlaywrightTimeoutError
import asyncio
import logging
from urllib.parse import urlparse
import re
from pathlib import Path
from io import BytesIO
import random
from bs4 import BeautifulSoup
from PyPDF2 import PdfReader
import zipfile
import tempfile
import mimetypes
import requests
import datetime
import spacy
import spacy.cli
from spacy.language import Language
import google_auth_oauthlib.flow
import googleapiclient.discovery
import google.auth.transport.requests
from async_timeout import timeout as async_timeout
import pandas as pd
from sentence_transformers import SentenceTransformer
from transformers import pipeline
import schedule
import threading
import time
import hashlib
from reportlab.lib.pagesizes import letter
from reportlab.pdfgen import canvas
from sklearn.cluster import KMeans
import numpy as np
import base64
import shutil
from PIL import Image # Make sure to pip install Pillow
from reportlab.pdfgen import canvas
# -------------------- Logging Setup --------------------
logging.basicConfig(
filename='advanced_download_log.txt',
level=logging.INFO,
format='%(asctime)s - %(levelname)s - %(message)s'
)
logger = logging.getLogger(__name__)
GOOGLE_OAUTH_CONFIG = {
"web": {
"client_id": "90798824947-u25obg1q844qeikjoh4jdmi579kn9p1c.apps.googleusercontent.com",
"project_id": "huggingface-449214",
"auth_uri": "https://accounts.google.com/o/oauth2/auth",
"token_uri": "https://oauth2.googleapis.com/token",
"auth_provider_x509_cert_url": "https://www.googleapis.com/oauth2/v1/certs",
"client_secret": "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",
"redirect_uris": ["https://euler314-craw-web.hf.space/"]
}
}
# Playwright Setup
def install_playwright_dependencies():
os.environ['PLAYWRIGHT_BROWSERS_PATH'] = os.path.expanduser("~/.cache/ms-playwright")
subprocess.run(['apt-get', 'update', '-y'], check=True)
packages = [
'libnss3', 'libnss3-tools', 'libnspr4', 'libatk1.0-0',
'libatk-bridge2.0-0', 'libatspi2.0-0', 'libcups2', 'libxcomposite1',
'libxdamage1', 'libdrm2', 'libgbm1', 'libpango-1.0-0'
]
subprocess.run(['apt-get', 'install', '-y', '--no-install-recommends'] + packages, check=True)
subprocess.run(['python3', '-m', 'playwright', 'install', 'chromium'], check=True)
install_playwright_dependencies()
# Model Loading
@st.cache_resource
def load_models():
try:
# Load spaCy model
try:
nlp = spacy.load("en_core_web_sm")
except OSError:
st.info("Downloading spaCy model...")
spacy.cli.download("en_core_web_sm")
nlp = spacy.load("en_core_web_sm")
# Load SentenceTransformer
try:
semantic_model = SentenceTransformer('deepseek-ai/DeepSeek-R1-Distill-Qwen-1.5B')
except Exception as e:
st.error(f"Error loading SentenceTransformer: {e}")
semantic_model = None
# Load Transformers pipeline
try:
summarizer = pipeline("summarization", model="facebook/bart-large-cnn")
except Exception as e:
st.error(f"Error loading Transformers: {e}")
summarizer = None
return nlp, semantic_model, summarizer
except Exception as e:
st.error(f"Error loading models: {e}")
return None, None, None
nlp_model, semantic_model, summarizer = load_models()
# Utility Functions
def get_random_user_agent():
USER_AGENTS = [
'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'Mozilla/5.0 (Macintosh; Intel Mac OS X 12_6_3) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/16.4 Safari/605.1.15',
'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36',
'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:115.0) Gecko/20100101 Firefox/115.0',
]
return random.choice(USER_AGENTS)
def sizeof_fmt(num, suffix='B'):
for unit in ['', 'K', 'M', 'G', 'T', 'P', 'E', 'Z']:
if abs(num) < 1024.0:
return f"{num:3.1f}{unit}{suffix}"
num /= 1024.0
return f"{num:.1f}Y{suffix}"
def create_zip_file(file_paths, output_dir):
timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
zip_path = os.path.join(output_dir, f"downloads_{timestamp}.zip")
with zipfile.ZipFile(zip_path, 'w') as zipf:
for file_path in file_paths:
zipf.write(file_path, os.path.basename(file_path))
return zip_path
# Google Drive Functions
def get_google_auth_url():
client_config = GOOGLE_OAUTH_CONFIG["web"]
flow = google_auth_oauthlib.flow.Flow.from_client_config(
{"web": client_config},
scopes=["https://www.googleapis.com/auth/drive.file"]
)
flow.redirect_uri = client_config["redirect_uris"][0]
authorization_url, _ = flow.authorization_url(
access_type="offline",
include_granted_scopes="true",
prompt="consent"
)
return authorization_url
def exchange_code_for_credentials(auth_code):
if not auth_code.strip():
return None, "No code provided."
try:
client_config = GOOGLE_OAUTH_CONFIG["web"]
flow = google_auth_oauthlib.flow.Flow.from_client_config(
{"web": client_config},
scopes=["https://www.googleapis.com/auth/drive.file"]
)
flow.redirect_uri = client_config["redirect_uris"][0]
flow.fetch_token(code=auth_code.strip())
creds = flow.credentials
if not creds or not creds.valid:
return None, "Could not validate credentials. Check code and try again."
return creds, "Google Sign-In successful!"
except Exception as e:
return None, f"Error during token exchange: {e}"
def google_drive_upload(file_path, credentials, folder_id=None):
try:
drive_service = googleapiclient.discovery.build("drive", "v3", credentials=credentials)
file_metadata = {'name': os.path.basename(file_path)}
if folder_id:
file_metadata['parents'] = [folder_id]
media = googleapiclient.http.MediaFileUpload(file_path, resumable=True)
created = drive_service.files().create(body=file_metadata, media_body=media, fields='id').execute()
return created.get("id", "")
except Exception as e:
return f"Error uploading to Drive: {str(e)}"
def create_drive_folder(drive_service, name):
folder_metadata = {'name': name, 'mimeType': 'application/vnd.google-apps.folder'}
folder = drive_service.files().create(body=folder_metadata, fields='id').execute()
return folder.get('id')
# DownloadManager Class
class DownloadManager:
def __init__(self, use_proxy=False, proxy=None, query=None, num_results=5):
self.use_proxy = use_proxy
self.proxy = proxy
self.query = query
self.num_results = num_results
self.playwright = None
self.browser = None
self.context = None
self.page = None
async def __aenter__(self):
self.playwright = await async_playwright().start()
opts = {
"headless": True,
"args": [
'--no-sandbox',
'--disable-setuid-sandbox',
'--disable-dev-shm-usage',
'--disable-gpu',
'--no-zygote',
'--single-process'
]
}
if self.use_proxy and self.proxy:
opts["proxy"] = {"server": self.proxy}
self.browser = await self.playwright.chromium.launch(**opts)
self.context = await self.browser.new_context(user_agent=get_random_user_agent())
self.page = await self.context.new_page()
await self.page.set_extra_http_headers({
'Accept-Language': 'en-US,en;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': 'https://www.bing.com/'
})
return self
async def __aexit__(self, exc_type, exc_val, exc_tb):
if self.browser:
await self.browser.close()
if self.playwright:
await self.playwright.stop()
async def search_bing(self):
urls = []
try:
search_url = f"https://www.bing.com/search?q={self.query}"
await self.page.goto(search_url, timeout=30000)
await self.page.wait_for_load_state('networkidle')
links = await self.page.query_selector_all("li.b_algo h2 a")
for link in links[:self.num_results]:
href = await link.get_attribute('href')
if href:
urls.append(href)
return urls
except Exception as e:
logger.error(f"Error searching Bing: {e}")
return []
async def get_file_size(self, url):
try:
async with self.context.new_page() as page:
response = await page.request.head(url, timeout=15000)
length = response.headers.get('Content-Length', None)
if length:
return sizeof_fmt(int(length))
else:
return "Unknown Size"
except Exception:
return "Unknown Size"
async def get_pdf_metadata(self, url):
try:
async with self.context.new_page() as page:
resp = await page.request.get(url, timeout=15000)
if resp.ok:
content = await resp.body()
pdf = BytesIO(content)
reader = PdfReader(pdf)
return {
'Title': reader.metadata.get('/Title', 'N/A') if reader.metadata else 'N/A',
'Author': reader.metadata.get('/Author', 'N/A') if reader.metadata else 'N/A',
'Pages': len(reader.pages),
}
else:
return {}
except Exception:
return {}
async def extract_real_download_url(self, url):
try:
async with self.context.new_page() as page:
response = await page.goto(url, wait_until='networkidle', timeout=30000)
if response and response.headers.get('location'):
return response.headers['location']
return page.url
except Exception as e:
logger.error(f"Error extracting real download URL: {e}")
return url
async def extract_downloadable_files(self, url, custom_ext_list):
found_files = []
try:
response = await self.page.goto(url, timeout=30000, wait_until='networkidle')
if not response:
return []
final_url = self.page.url
if '.php' in final_url or 'download' in final_url:
real_url = await self.extract_real_download_url(final_url)
if real_url != final_url:
found_files.append({
'url': real_url,
'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
'size': await self.get_file_size(real_url),
'metadata': {}
})
return found_files
await self.page.wait_for_load_state('networkidle', timeout=30000)
content = await self.page.content()
soup = BeautifulSoup(content, 'html.parser')
default_exts = ['.pdf', '.docx', '.doc', '.zip', '.rar', '.mp3', '.mp4',
'.avi', '.mkv', '.png', '.jpg', '.jpeg', '.gif', '.xlsx',
'.pptx', '.odt', '.txt']
all_exts = set(default_exts + [ext.strip().lower() for ext in custom_ext_list if ext.strip()])
parsed_base = urlparse(final_url)
base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
for a in soup.find_all('a', href=True):
href = a['href'].strip()
if '.php' in href.lower() or 'download' in href.lower():
full_url = href if href.startswith('http') else f"{base_url}{href}"
real_url = await self.extract_real_download_url(full_url)
if real_url and real_url != full_url:
found_files.append({
'url': real_url,
'filename': os.path.basename(urlparse(real_url).path) or 'downloaded_file',
'size': await self.get_file_size(real_url),
'metadata': {}
})
continue
if any(href.lower().endswith(ext) for ext in all_exts):
file_url = href if href.startswith('http') else f"{base_url}{href}"
size_str = await self.get_file_size(file_url)
meta = {}
if file_url.lower().endswith('.pdf'):
meta = await self.get_pdf_metadata(file_url)
found_files.append({
'url': file_url,
'filename': os.path.basename(file_url.split('?')[0]),
'size': size_str,
'metadata': meta
})
# Handle Google Drive links
elif ("drive.google.com" in href) or ("docs.google.com" in href):
file_id = None
for pattern in [r'/file/d/([^/]+)', r'id=([^&]+)', r'open\?id=([^&]+)']:
match = re.search(pattern, href)
if match:
file_id = match.group(1)
break
if file_id:
# We'll detect file type during download, so just use the ID for filename initially
filename = f"gdrive_{file_id}"
try:
# Get file info to determine type and size
file_type, is_view_only = await self.get_google_drive_file_info(file_id)
if file_type:
filename = f"{filename}.{file_type}"
found_files.append({
'url': href, # Use original URL, as we'll process it specially
'filename': filename,
'size': "View-only" if is_view_only else await self.get_file_size(f"https://drive.google.com/uc?export=download&id={file_id}"),
'metadata': {'view_only': is_view_only, 'file_type': file_type, 'file_id': file_id}
})
except Exception as e:
logger.error(f"Error processing Google Drive link: {e}")
# Fallback if we can't get info
found_files.append({
'url': href,
'filename': filename,
'size': "Unknown Size",
'metadata': {'file_id': file_id}
})
seen_urls = set()
unique_files = []
for f in found_files:
if f['url'] not in seen_urls:
seen_urls.add(f['url'])
unique_files.append(f)
return unique_files
except Exception as e:
logger.error(f"Error extracting files from {url}: {e}")
return []
async def download_file(self, file_info, save_dir, referer):
file_url = file_info['url']
fname = file_info['filename']
path = os.path.join(save_dir, fname)
base, ext = os.path.splitext(fname)
counter = 1
while os.path.exists(path):
path = os.path.join(save_dir, f"{base}_{counter}{ext}")
counter += 1
os.makedirs(save_dir, exist_ok=True)
try:
# Special handling for Google Drive files
if "drive.google.com" in file_url or "docs.google.com" in file_url:
# Use enhanced Google Drive downloader
success = await self.download_from_google_drive(file_url, path)
return path if success else None
# Original code for non-Google Drive downloads
async with self.context.new_page() as page:
headers = {
'Accept': '*/*',
'Accept-Encoding': 'gzip, deflate, br',
'Referer': referer
}
response = await page.request.get(file_url, headers=headers, timeout=30000)
if response.status == 200:
content = await response.body()
with open(path, 'wb') as f:
f.write(content)
return path
else:
logger.error(f"Download failed with status {response.status}: {file_url}")
return None
except Exception as e:
logger.error(f"Error downloading {file_url}: {e}")
return None
async def download_from_google_drive(self, url, save_path):
"""Enhanced method to download from Google Drive with multiple fallback approaches"""
# Extract the file ID from different URL formats
file_id = None
url_patterns = [
r'drive\.google\.com/file/d/([^/]+)',
r'drive\.google\.com/open\?id=([^&]+)',
r'docs\.google\.com/\w+/d/([^/]+)',
r'id=([^&]+)',
r'drive\.google\.com/uc\?id=([^&]+)',
]
for pattern in url_patterns:
match = re.search(pattern, url)
if match:
file_id = match.group(1)
break
if not file_id:
logger.error(f"Could not extract file ID from URL: {url}")
return False
# Determine file type first (important for handling different file types)
file_type, is_view_only = await self.get_google_drive_file_info(file_id)
logger.info(f"Google Drive file type: {file_type}, View-only: {is_view_only}")
base, ext = os.path.splitext(save_path)
if not ext and file_type:
# Add the correct extension if missing
save_path = f"{base}.{file_type}"
# For view-only files, use specialized approaches
if is_view_only:
# Approach 1: For PDFs, use the JS method
if file_type == 'pdf':
success = await self.download_viewonly_pdf_with_js(file_id, save_path)
if success:
return True
# Approach 2: For Google Docs, Sheets, etc., use export API
if file_type in ['doc', 'docx', 'sheet', 'ppt', 'xlsx', 'pptx']:
success = await self.export_google_doc(file_id, file_type, save_path)
if success:
return True
# Approach 3: Try the direct screenshot method for any view-only file
success = await self.download_viewonly_with_screenshots(file_id, save_path, file_type)
if success:
return True
# Try standard approaches for non-view-only files
try:
# Try with gdown first
import gdown
output = gdown.download(f"https://drive.google.com/uc?id={file_id}", save_path, quiet=False, fuzzy=True)
if output and os.path.exists(save_path) and os.path.getsize(save_path) > 0:
with open(save_path, 'rb') as f:
content = f.read(100) # Read first 100 bytes
if b'' not in content: # Check not HTML error page
logger.info(f"Successfully downloaded with gdown: {url}")
return True
except Exception as e:
logger.warning(f"gdown download failed: {e}")
# Try with requests and session cookies
try:
session = requests.Session()
session.headers.update({'User-Agent': get_random_user_agent()})
# Visit the page first to get cookies
session.get(f"https://drive.google.com/file/d/{file_id}/view", timeout=30)
# Try download
url = f"https://drive.google.com/uc?id={file_id}&export=download"
response = session.get(url, stream=True, timeout=30)
# Check for confirmation token
confirmation_token = None
for k, v in response.cookies.items():
if k.startswith('download_warning'):
confirmation_token = v
break
# Use confirmation token if found
if confirmation_token:
url = f"{url}&confirm={confirmation_token}"
response = session.get(url, stream=True, timeout=60)
# Check if we're getting HTML instead of the file
content_type = response.headers.get('Content-Type', '')
if 'text/html' in content_type:
logger.warning("Received HTML instead of file - likely download restriction")
else:
with open(save_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=1024*1024):
if chunk:
f.write(chunk)
if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
with open(save_path, 'rb') as f:
content = f.read(100)
if b'' not in content:
logger.info("Successfully downloaded with requests session")
return True
except Exception as e:
logger.warning(f"Requests session download failed: {e}")
# If all methods failed for view-only file, try one last approach
if is_view_only:
try:
# Try a direct headless browser download
async with self.context.new_page() as page:
await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle', timeout=60000)
# Try to capture the content directly from viewer
file_content = await page.evaluate("""
() => {
// Try to find the actual viewer content
const viewerContent = document.querySelector('.drive-viewer-paginated-content');
if (viewerContent) {
return viewerContent.innerHTML;
}
return document.documentElement.innerHTML;
}
""")
if file_content:
# Save as HTML and then we can convert it if needed
html_path = f"{base}.html"
with open(html_path, 'w', encoding='utf-8') as f:
f.write(f"""
Google Drive Extracted Content
{file_content}
""")
# If requested a PDF, convert HTML to PDF
if file_type == 'pdf' or ext.lower() == '.pdf':
try:
import pdfkit
pdfkit.from_file(html_path, save_path)
os.remove(html_path) # Clean up HTML file
return True
except Exception as pdf_err:
logger.warning(f"Error converting HTML to PDF: {pdf_err}")
# Keep the HTML file as fallback
shutil.copy(html_path, save_path)
return True
else:
# Just use the HTML file
shutil.copy(html_path, save_path)
return True
except Exception as e:
logger.warning(f"Final direct browser capture failed: {e}")
# All methods failed
logger.error(f"All download approaches failed for Google Drive file: {file_id}")
return False
async def get_google_drive_file_info(self, file_id):
"""Get file type and view-only status from Google Drive"""
file_type = None
is_view_only = False
try:
async with self.context.new_page() as page:
await page.goto(f"https://drive.google.com/file/d/{file_id}/view", timeout=30000)
# Check if view-only
view_only_text = await page.query_selector('text="the owner has not granted you permission to download this file"')
is_view_only = view_only_text is not None
# Check for Google Docs viewer
gdocs_viewer = await page.query_selector('iframe[src*="docs.google.com/document"]')
gsheets_viewer = await page.query_selector('iframe[src*="docs.google.com/spreadsheets"]')
gslides_viewer = await page.query_selector('iframe[src*="docs.google.com/presentation"]')
if gdocs_viewer:
file_type = 'docx'
elif gsheets_viewer:
file_type = 'xlsx'
elif gslides_viewer:
file_type = 'pptx'
else:
# Check for PDF viewer
pdf_viewer = await page.query_selector('embed[type="application/pdf"]')
if pdf_viewer:
file_type = 'pdf'
else:
# Check for image viewer
img_viewer = await page.query_selector('img[src*="googleusercontent.com"]')
if img_viewer:
# Get image type from src
img_src = await img_viewer.get_attribute('src')
if 'jpg' in img_src or 'jpeg' in img_src:
file_type = 'jpg'
elif 'png' in img_src:
file_type = 'png'
else:
file_type = 'jpg' # Default to jpg
else:
# Generic file type fallback
file_type = 'pdf' # Default to PDF
# If still no type, check filename
if not file_type:
title_element = await page.query_selector('div[role="heading"]')
if title_element:
title = await title_element.text_content()
if title:
ext_match = re.search(r'\.([a-zA-Z0-9]+)$', title)
if ext_match:
file_type = ext_match.group(1).lower()
except Exception as e:
logger.error(f"Error getting Google Drive file info: {e}")
file_type = 'pdf' # Default to PDF if we can't determine
return file_type, is_view_only
async def download_viewonly_pdf_with_js(self, file_id, save_path):
"""Download view-only PDF using JavaScript approach - improved version"""
try:
async with self.context.new_page() as page:
# Set viewport size to ensure we capture full pages
await page.set_viewport_size({"width": 1200, "height": 1600})
# Visit the file
view_url = f"https://drive.google.com/file/d/{file_id}/view"
await page.goto(view_url, wait_until='networkidle', timeout=60000)
# Wait for rendering
await page.wait_for_timeout(2000)
# Inject required libraries - use CDN for jsPDF
await page.evaluate("""
async function injectLibraries() {
// Add jsPDF
return new Promise((resolve) => {
const jspdfScript = document.createElement('script');
jspdfScript.src = 'https://cdnjs.cloudflare.com/ajax/libs/jspdf/2.5.1/jspdf.umd.min.js';
jspdfScript.onload = () => resolve(true);
document.head.appendChild(jspdfScript);
});
}
return injectLibraries();
""")
# Wait for libraries to load
await page.wait_for_timeout(2000)
# Scroll through document to load all pages
await page.evaluate("""
async function scrollThroughDocument() {
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
const container = document.querySelector('.drive-viewer-paginated-scrollable');
if (!container) return false;
const scrollHeight = container.scrollHeight;
const viewportHeight = container.clientHeight;
const scrollStep = viewportHeight / 2;
for (let scrollPos = 0; scrollPos < scrollHeight; scrollPos += scrollStep) {
container.scrollTo(0, scrollPos);
await delay(500);
}
// One final scroll to bottom to ensure everything is loaded
container.scrollTo(0, scrollHeight);
await delay(1000);
// Scroll back to top for PDF creation
container.scrollTo(0, 0);
await delay(500);
return true;
}
return scrollThroughDocument();
""")
# Wait after scrolling
await page.wait_for_timeout(2000)
# Use the improved PDF creation script that captures all pages
pdf_base64 = await page.evaluate("""
async function createPDF() {
try {
// Make sure jsPDF is loaded
if (typeof window.jspdf === 'undefined') {
console.error('jsPDF not loaded');
return null;
}
const { jsPDF } = window.jspdf;
const pdf = new jsPDF();
// Get all page elements
const pages = document.querySelectorAll('.drive-viewer-paginated-page');
console.log('Found pages:', pages.length);
if (pages.length === 0) {
// Alternative: try to find images directly
const images = Array.from(document.querySelectorAll('img')).filter(img =>
img.src.startsWith('blob:') && img.width > 100 && img.height > 100
);
console.log('Found images:', images.length);
if (images.length === 0) {
return null;
}
// Process each image
for (let i = 0; i < images.length; i++) {
const img = images[i];
if (i > 0) {
pdf.addPage();
}
// Create canvas and draw image
const canvas = document.createElement('canvas');
canvas.width = img.width;
canvas.height = img.height;
const ctx = canvas.getContext('2d');
ctx.drawImage(img, 0, 0, img.width, img.height);
// Add to PDF
const imgData = canvas.toDataURL('image/jpeg', 0.95);
// Calculate dimensions
const pageWidth = pdf.internal.pageSize.getWidth();
const pageHeight = pdf.internal.pageSize.getHeight();
const imgRatio = img.height / img.width;
let imgWidth = pageWidth - 10;
let imgHeight = imgWidth * imgRatio;
if (imgHeight > pageHeight - 10) {
imgHeight = pageHeight - 10;
imgWidth = imgHeight / imgRatio;
}
// Center on page
const x = (pageWidth - imgWidth) / 2;
const y = (pageHeight - imgHeight) / 2;
pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
}
} else {
// Process each page
const container = document.querySelector('.drive-viewer-paginated-scrollable');
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
for (let i = 0; i < pages.length; i++) {
// Add a new page for each page after the first
if (i > 0) {
pdf.addPage();
}
// Scroll to the page and wait for it to render
pages[i].scrollIntoView();
await delay(300);
// Find the image element inside the page
const pageImages = pages[i].querySelectorAll('img');
let targetImage = null;
for (const img of pageImages) {
if (img.src.startsWith('blob:') && img.width > 50 && img.height > 50) {
targetImage = img;
break;
}
}
if (!targetImage) {
// If no image found, try taking a screenshot of the page instead
const pageCanvas = document.createElement('canvas');
pageCanvas.width = pages[i].clientWidth;
pageCanvas.height = pages[i].clientHeight;
const ctx = pageCanvas.getContext('2d');
// Draw the page background
ctx.fillStyle = 'white';
ctx.fillRect(0, 0, pageCanvas.width, pageCanvas.height);
// Use html2canvas approach
try {
await delay(100);
// Just draw what we can see
const allElements = pages[i].querySelectorAll('*');
for (const el of allElements) {
if (el.tagName === 'IMG' && el.complete && el.src) {
const rect = el.getBoundingClientRect();
try {
ctx.drawImage(el, rect.left, rect.top, rect.width, rect.height);
} catch (e) {
console.error('Draw error:', e);
}
}
}
} catch (e) {
console.error('Canvas error:', e);
}
// Add the canvas to the PDF
const imgData = pageCanvas.toDataURL('image/jpeg', 0.95);
// Calculate dimensions
const pageWidth = pdf.internal.pageSize.getWidth();
const pageHeight = pdf.internal.pageSize.getHeight();
const imgRatio = pageCanvas.height / pageCanvas.width;
let imgWidth = pageWidth - 10;
let imgHeight = imgWidth * imgRatio;
if (imgHeight > pageHeight - 10) {
imgHeight = pageHeight - 10;
imgWidth = imgHeight / imgRatio;
}
// Center on page
const x = (pageWidth - imgWidth) / 2;
const y = (pageHeight - imgHeight) / 2;
pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
} else {
// Use the found image
const canvas = document.createElement('canvas');
canvas.width = targetImage.naturalWidth || targetImage.width;
canvas.height = targetImage.naturalHeight || targetImage.height;
const ctx = canvas.getContext('2d');
// Draw image to canvas
try {
ctx.drawImage(targetImage, 0, 0, canvas.width, canvas.height);
} catch (e) {
console.error('Error drawing image:', e);
continue;
}
// Add to PDF
const imgData = canvas.toDataURL('image/jpeg', 0.95);
// Calculate dimensions
const pageWidth = pdf.internal.pageSize.getWidth();
const pageHeight = pdf.internal.pageSize.getHeight();
const imgRatio = canvas.height / canvas.width;
let imgWidth = pageWidth - 10;
let imgHeight = imgWidth * imgRatio;
if (imgHeight > pageHeight - 10) {
imgHeight = pageHeight - 10;
imgWidth = imgHeight / imgRatio;
}
// Center on page
const x = (pageWidth - imgWidth) / 2;
const y = (pageHeight - imgHeight) / 2;
pdf.addImage(imgData, 'JPEG', x, y, imgWidth, imgHeight);
}
}
}
// Return as base64
return pdf.output('datauristring');
} catch (e) {
console.error('PDF creation error:', e);
return null;
}
}
return createPDF();
""")
if not pdf_base64 or not pdf_base64.startswith('data:application/pdf;base64,'):
# If script method failed, try screenshot approach
logger.warning("PDF creation script failed, trying fallback method")
return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
# Save the PDF from base64
try:
base64_data = pdf_base64.replace('data:application/pdf;base64,', '')
pdf_bytes = base64.b64decode(base64_data)
with open(save_path, 'wb') as f:
f.write(pdf_bytes)
# Verify file is not empty
if os.path.exists(save_path) and os.path.getsize(save_path) > 1000:
logger.info(f"Successfully saved PDF to {save_path}")
return True
else:
logger.warning(f"Generated PDF is too small, using fallback method")
return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
except Exception as e:
logger.error(f"Error saving PDF: {e}")
return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
except Exception as e:
logger.error(f"Error in view-only PDF download: {e}")
# Try fallback method
return await self.download_viewonly_with_screenshots(file_id, save_path, 'pdf')
async def download_viewonly_with_screenshots(self, file_id, save_path, file_type):
"""Download any view-only file by taking screenshots"""
try:
async with self.context.new_page() as page:
# Set high-resolution viewport
await page.set_viewport_size({"width": 1600, "height": 1200})
# Navigate to the file
await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle', timeout=60000)
# Make sure the file is loaded
await page.wait_for_load_state('networkidle')
await page.wait_for_timeout(3000) # Extra time for rendering
# Create directory for screenshots if multiple pages
base_dir = os.path.dirname(save_path)
base_name = os.path.splitext(os.path.basename(save_path))[0]
screenshots_dir = os.path.join(base_dir, f"{base_name}_screenshots")
os.makedirs(screenshots_dir, exist_ok=True)
# Check if it's a multi-page document
is_multi_page = await page.evaluate("""
() => {
const pages = document.querySelectorAll('.drive-viewer-paginated-page');
return pages.length > 1;
}
""")
if is_multi_page and file_type == 'pdf':
# For multi-page PDFs, take screenshots of each page
page_count = await page.evaluate("""
async () => {
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
const pages = document.querySelectorAll('.drive-viewer-paginated-page');
const container = document.querySelector('.drive-viewer-paginated-scrollable');
if (!container || pages.length === 0) return 0;
// Scroll through to make sure all pages are loaded
const scrollHeight = container.scrollHeight;
const viewportHeight = container.clientHeight;
const scrollStep = viewportHeight;
for (let scrollPos = 0; scrollPos < scrollHeight; scrollPos += scrollStep) {
container.scrollTo(0, scrollPos);
await delay(300);
}
// Scroll back to top
container.scrollTo(0, 0);
await delay(300);
return pages.length;
}
""")
logger.info(f"Found {page_count} pages in document")
# Take screenshots of each page
screenshots = []
for i in range(page_count):
# Scroll to page
await page.evaluate(f"""
async () => {{
const delay = ms => new Promise(resolve => setTimeout(resolve, ms));
const pages = document.querySelectorAll('.drive-viewer-paginated-page');
if (pages.length <= {i}) return false;
pages[{i}].scrollIntoView();
await delay(500);
return true;
}}
""")
# Take screenshot
screenshot_path = os.path.join(screenshots_dir, f"page_{i+1}.png")
await page.screenshot(path=screenshot_path, clip={
'x': 0,
'y': 0,
'width': 1600,
'height': 1200
})
screenshots.append(screenshot_path)
# Combine screenshots into PDF
from PIL import Image
from reportlab.pdfgen import canvas
c = canvas.Canvas(save_path)
for screenshot in screenshots:
img = Image.open(screenshot)
width, height = img.size
# Add page to PDF
c.setPageSize((width, height))
c.drawImage(screenshot, 0, 0, width, height)
c.showPage()
c.save()
# Clean up screenshots
for screenshot in screenshots:
os.remove(screenshot)
os.rmdir(screenshots_dir)
return os.path.exists(save_path) and os.path.getsize(save_path) > 0
else:
# For single-page or non-PDF files, just take one screenshot
screenshot_path = os.path.join(screenshots_dir, "screenshot.png")
await page.screenshot(path=screenshot_path, fullPage=True)
# Convert to requested format if needed
if file_type == 'pdf':
from PIL import Image
from reportlab.pdfgen import canvas
# Create PDF from screenshot
img = Image.open(screenshot_path)
width, height = img.size
c = canvas.Canvas(save_path, pagesize=(width, height))
c.drawImage(screenshot_path, 0, 0, width, height)
c.save()
else:
# Just copy the screenshot to the destination with proper extension
shutil.copy(screenshot_path, save_path)
# Clean up
os.remove(screenshot_path)
os.rmdir(screenshots_dir)
return os.path.exists(save_path) and os.path.getsize(save_path) > 0
except Exception as e:
logger.error(f"Error taking screenshots: {e}")
return False
async def export_google_doc(self, file_id, file_type, save_path):
"""Export Google Docs/Sheets/Slides to downloadable formats"""
try:
# Map file types to export formats
export_formats = {
'doc': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document', # docx
'docx': 'application/vnd.openxmlformats-officedocument.wordprocessingml.document',
'sheet': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', # xlsx
'xlsx': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
'ppt': 'application/vnd.openxmlformats-officedocument.presentationml.presentation', # pptx
'pptx': 'application/vnd.openxmlformats-officedocument.presentationml.presentation',
'pdf': 'application/pdf',
}
export_format = export_formats.get(file_type, 'application/pdf')
export_url = f"https://docs.google.com/document/d/{file_id}/export?format={file_type}"
if 'sheet' in file_type or 'xlsx' in file_type:
export_url = f"https://docs.google.com/spreadsheets/d/{file_id}/export?format=xlsx"
elif 'ppt' in file_type or 'presentation' in file_type:
export_url = f"https://docs.google.com/presentation/d/{file_id}/export/pptx"
elif file_type == 'pdf':
export_url = f"https://docs.google.com/document/d/{file_id}/export?format=pdf"
async with self.context.new_page() as page:
# Get cookies from the main view page first
await page.goto(f"https://drive.google.com/file/d/{file_id}/view", wait_until='networkidle')
# Now try the export
response = await page.goto(export_url, wait_until='networkidle')
if response.status == 200:
content = await response.body()
with open(save_path, 'wb') as f:
f.write(content)
return os.path.exists(save_path) and os.path.getsize(save_path) > 0
else:
logger.warning(f"Export failed with status {response.status}")
return False
except Exception as e:
logger.error(f"Error exporting Google Doc: {e}")
return False
async def deep_search(self, url, custom_ext_list=None, sublink_limit=10000, timeout=60):
if not custom_ext_list:
custom_ext_list = []
progress_text = st.empty()
progress_bar = st.progress(0)
file_count_text = st.empty()
try:
progress_text.text("Analyzing main page...")
main_files = await self.extract_downloadable_files(url, custom_ext_list)
initial_count = len(main_files)
file_count_text.text(f"Found {initial_count} files on main page")
progress_text.text("Getting sublinks...")
sublinks = await self.get_sublinks(url, sublink_limit)
total_links = len(sublinks)
progress_text.text(f"Found {total_links} sublinks to process")
if not sublinks:
progress_bar.progress(1.0)
return main_files
all_files = main_files
for i, sublink in enumerate(sublinks, 1):
progress = i / total_links
progress_text.text(f"Processing sublink {i}/{total_links}: {sublink}")
progress_bar.progress(progress)
sub_files = await self.extract_downloadable_files(sublink, custom_ext_list)
all_files.extend(sub_files)
file_count_text.text(f"Found {len(all_files)} total files")
seen_urls = set()
unique_files = []
for f in all_files:
if f['url'] not in seen_urls:
seen_urls.add(f['url'])
unique_files.append(f)
final_count = len(unique_files)
progress_text.text(f"Deep search complete!")
file_count_text.text(f"Found {final_count} unique files")
progress_bar.progress(1.0)
return unique_files
except Exception as e:
logger.error(f"Deep search error: {e}")
progress_text.text(f"Error during deep search: {str(e)}")
return []
finally:
await asyncio.sleep(2)
if not st.session_state.get('keep_progress', False):
progress_text.empty()
progress_bar.empty()
async def get_sublinks(self, url, limit=10000):
try:
await self.page.goto(url, timeout=30000)
content = await self.page.content()
soup = BeautifulSoup(content, 'html.parser')
parsed_base = urlparse(url)
base_url = f"{parsed_base.scheme}://{parsed_base.netloc}"
links = set()
for a in soup.find_all('a', href=True):
href = a['href'].strip()
if href.startswith('http'):
links.add(href)
elif href.startswith('/'):
links.add(f"{base_url}{href}")
return list(links)[:limit]
except Exception as e:
logger.error(f"Error getting sublinks: {e}")
return []
# Utility Functions for New Features
def extract_keywords(text, n=5):
doc = nlp_model(text)
keywords = [token.text for token in doc if token.is_alpha and not token.is_stop][:n]
return keywords
def analyze_sentiment(text):
sentiment_analyzer = pipeline("sentiment-analysis", model="distilbert-base-uncased-finetuned-sst-2-english")
result = sentiment_analyzer(text[:512])[0]
return result['label'], result['score']
def get_file_hash(file_path):
hasher = hashlib.md5()
with open(file_path, 'rb') as f:
hasher.update(f.read())
return hasher.hexdigest()
# Main Function
def main():
if 'initialized' not in st.session_state:
st.session_state.initialized = True
st.session_state.discovered_files = []
st.session_state.current_url = None
st.session_state.google_creds = None
st.session_state.selected_files = []
st.session_state.do_deep_search = False
st.session_state.deep_search_url = None
st.session_state.search_results = []
st.title("Advanced File Downloader")
with st.sidebar:
mode = st.radio("Select Mode", ["Manual URL", "Bing Search", "PDF Summarizer"], key="mode_select")
with st.expander("Advanced Options", expanded=True):
custom_extensions = st.text_input("Custom File Extensions", placeholder=".csv, .txt, .epub", key="custom_ext_input", help="Enter extensions like .csv, .txt")
max_sublinks = st.number_input("Maximum Sublinks to Process", min_value=1, max_value=100000, value=10000, step=50, key="max_sublinks_input", help="Max sublinks to scan from main page")
sublink_timeout = st.number_input("Search Timeout (seconds per sublink)", min_value=1, max_value=3000, value=30, step=5, key="timeout_input", help="Timeout for each sublink")
use_proxy = st.checkbox("Use Proxy", key="proxy_checkbox")
proxy = st.text_input("Proxy URL", placeholder="http://proxy:port", key="proxy_input")
with st.expander("Google Drive Integration", expanded=False):
if st.button("Start Google Sign-In", key="google_signin_btn"):
auth_url = get_google_auth_url()
st.markdown(f"[Click here to authorize]({auth_url})")
auth_code = st.text_input("Enter authorization code", key="auth_code_input")
if st.button("Complete Sign-In", key="complete_signin_btn") and auth_code:
creds, msg = exchange_code_for_credentials(auth_code)
st.session_state.google_creds = creds
st.write(msg)
if mode == "Manual URL":
st.header("Manual URL Mode")
url = st.text_input("Enter URL", placeholder="https://example.com", key="url_input")
col1, col2 = st.columns([3, 1])
with col1:
if st.button("Deep Search", use_container_width=True, key="deep_search_btn"):
if url:
custom_ext_list = [ext.strip().lower() for ext in custom_extensions.split(',') if ext.strip()]
valid_ext_list = [ext for ext in custom_ext_list if re.match(r'^\.[a-zA-Z0-9]+$', ext)]
if custom_ext_list != valid_ext_list:
st.warning("Invalid extensions ignored. Use format like '.csv'.")
async def run_deep_search():
async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
files = await dm.deep_search(url, valid_ext_list, max_sublinks, sublink_timeout)
return files
files = asyncio.run(run_deep_search())
if files:
st.session_state.discovered_files = files
st.session_state.current_url = url
st.success(f"Found {len(files)} files!")
else:
st.warning("No files found.")
if st.session_state.discovered_files:
files = st.session_state.discovered_files
st.success(f"Found {len(files)} files!")
col1, col2 = st.columns([1, 4])
with col1:
if st.button("Select All", key="select_all_btn"):
st.session_state.selected_files = list(range(len(files)))
if st.button("Clear Selection", key="clear_selection_btn"):
st.session_state.selected_files = []
selected_files = st.multiselect("Select files to download", options=list(range(len(files))), default=st.session_state.selected_files, format_func=lambda x: f"{files[x]['filename']} ({files[x]['size']})", key="file_multiselect")
st.session_state.selected_files = selected_files
if selected_files:
col1, col2, col3, col4 = st.columns(4)
with col1:
download_dir = st.text_input("Download Directory", value="./downloads", key="download_dir_input")
with col2:
create_zip = st.checkbox("Create ZIP file", value=True, key="create_zip_checkbox")
with col3:
delete_after = st.checkbox("Delete after creating ZIP", key="delete_after_checkbox")
with col4:
upload_to_drive = st.checkbox("Upload to Google Drive", key="upload_drive_checkbox")
if st.button("Download Selected", key="download_btn"):
if not os.path.exists(download_dir):
os.makedirs(download_dir)
async def download_files():
downloaded_paths = []
progress_bar = st.progress(0)
status_text = st.empty()
async with DownloadManager(use_proxy=use_proxy, proxy=proxy) as dm:
for i, idx in enumerate(selected_files):
progress = (i + 1) / len(selected_files)
file_info = files[idx]
status_text.text(f"Downloading {file_info['filename']}... ({i+1}/{len(selected_files)})")
progress_bar.progress(progress)
path = await dm.download_file(file_info, download_dir, url)
if path:
downloaded_paths.append(path)
status_text.empty()
progress_bar.empty()
return downloaded_paths
downloaded = asyncio.run(download_files())
if downloaded:
st.success(f"Successfully downloaded {len(downloaded)} files")
if create_zip:
zip_path = create_zip_file(downloaded, download_dir)
st.success(f"Created ZIP file: {zip_path}")
with open(zip_path, "rb") as f:
zip_data = f.read()
st.download_button("Download ZIP", data=zip_data, file_name=os.path.basename(zip_path), mime="application/zip")
if upload_to_drive and st.session_state.google_creds:
drive_service = googleapiclient.discovery.build("drive", "v3", credentials=st.session_state.google_creds)
folder_id = create_drive_folder(drive_service, f"Downloads_{urlparse(url).netloc}")
drive_id = google_drive_upload(zip_path, st.session_state.google_creds, folder_id)
if not isinstance(drive_id, str) or not drive_id.startswith("Error"):
st.success(f"Uploaded to Google Drive. File ID: {drive_id}")
else:
st.error(drive_id)
if delete_after:
for path in downloaded:
try:
os.remove(path)
except Exception as e:
st.warning(f"Could not delete {path}: {e}")
st.info("Deleted original files after ZIP creation")
else:
for path in downloaded:
with open(path, "rb") as f:
file_data = f.read()
st.download_button(f"Download {os.path.basename(path)}", data=file_data, file_name=os.path.basename(path))
elif mode == "Bing Search":
st.header("Bing Search Mode")
query = st.text_input("Enter search query", key="search_query_input")
num_results = st.slider("Number of results", 1, 50, 5, key="num_results_slider")
if st.button("Search", key="search_btn"):
if query:
async def run_search():
async with DownloadManager(use_proxy=use_proxy, proxy=proxy, query=query, num_results=num_results) as dm:
with st.spinner("Searching..."):
urls = await dm.search_bing()
if urls:
st.session_state.search_results = urls
st.success(f"Found {len(urls)} results!")
for i, url in enumerate(urls, 1):
with st.expander(f"Result {i}: {url}", expanded=(i == 1)):
if st.button(f"Deep Search Result {i}", key=f"deep_search_result_{i}"):
st.session_state.deep_search_url = url
st.session_state.do_deep_search = True
else:
st.warning("No search results found.")
asyncio.run(run_search())
else: # PDF Summarizer mode
if summarizer is None:
st.error("PDF summarization is not available due to model loading errors.")
else:
st.header("PDF Summarizer")
pdf_url = st.text_input("Enter PDF URL", key="pdf_url_input")
if st.button("Summarize", key="summarize_btn"):
if pdf_url:
with st.spinner("Generating summary..."):
try:
response = requests.get(pdf_url, stream=True)
temp_pdf = tempfile.NamedTemporaryFile(delete=False, suffix=".pdf")
with open(temp_pdf.name, "wb") as f:
f.write(response.content)
reader = PdfReader(temp_pdf.name)
text = " ".join([page.extract_text() or "" for page in reader.pages])
os.remove(temp_pdf.name)
summary = summarizer(text[:3000], max_length=200, min_length=50, do_sample=False)
st.write("Summary:", summary[0]['summary_text'])
except Exception as e:
st.error(f"Error summarizing PDF: {e}")
if __name__ == "__main__":
main()