File size: 4,269 Bytes
03f155a
 
 
 
d647402
2de1374
654e3a2
 
53afd28
f8f0382
 
d647402
f8f0382
407358f
654e3a2
 
 
3b59fe8
03f155a
407358f
2f6d918
 
 
 
3b59fe8
2f6d918
 
 
407358f
03f155a
654e3a2
03f155a
 
 
b6e66c6
03f155a
 
654e3a2
b6e66c6
 
a6f9b25
 
b6e66c6
654e3a2
03f155a
a6f9b25
03f155a
407358f
 
03f155a
 
 
 
 
b70740a
03f155a
b70740a
f8f0382
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a69db2f
407358f
c93a6f3
2de1374
3b59fe8
654e3a2
 
 
3b59fe8
654e3a2
 
 
 
 
 
 
3b59fe8
407358f
654e3a2
 
 
407358f
654e3a2
 
407358f
654e3a2
 
 
 
40082db
f8f0382
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json
import mimetypes
import shutil
from zipfile import ZipFile

EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md', '.gitattributes', "backup.py", "Dockerfile"]

if not os.path.exists("history.json"):
    with open("history.json", "w") as f:
        json.dump({}, f)

def download_file(url, local_filename):
    if url.startswith('http://') or url.startswith('https://'):
        try:
            with requests.get(url, stream=True) as r:
                r.raise_for_status()
                with open(local_filename, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            return local_filename
        except requests.exceptions.HTTPError as err:
            print(f"HTTP error occurred: {err}")

def download_html_and_files(url, subdir):
    html_content = requests.get(url).text
    soup = BeautifulSoup(html_content, 'html.parser')
    base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))

    for link in soup.find_all('a'):
        file_url = urllib.parse.urljoin(base_url, link.get('href'))
        local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])

        if not local_filename.endswith('/') and local_filename != subdir:
            link['href'] = local_filename
            download_file(file_url, local_filename)
    
    with open(os.path.join(subdir, "index.html"), "w") as file:
        file.write(str(soup))

def list_files(directory_path='.'):
    files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
    return [f for f in files if f not in EXCLUDED_FILES]

def get_download_link(file):
    with open(file, "rb") as f:
        bytes = f.read()
        b64 = base64.b64encode(bytes).decode()
        href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Click to download {os.path.basename(file)}</a>'
    return href

def delete_all_files():
    for root, dirs, files in os.walk(".", topdown=False):
        for name in files:
            if name not in EXCLUDED_FILES:
                os.remove(os.path.join(root, name))
        for name in dirs:
            shutil.rmtree(os.path.join(root, name))
    st.success("All files and folders deleted successfully!")

def create_zip_and_get_link():
    zip_filename = "all_files.zip"
    with ZipFile(zip_filename, 'w') as zipf:
        for root, dirs, files in os.walk(".", topdown=False):
            for file in files:
                if file not in EXCLUDED_FILES and file != zip_filename:
                    zipf.write(os.path.join(root, file))
    with open(zip_filename, "rb") as f:
        bytes = f.read()
        b64 = base64.b64encode(bytes).decode()
        href = f'<a href="data:file/zip;base64,{b64}" download=\'{zip_filename}\'>🔽 Download All Files</a>'
        st.markdown(href, unsafe_allow_html=True)

def main():
    st.sidebar.title('Web Datasets Bulk Downloader')
    url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')

    with open("history.json", "r") as f:
        history = json.load(f)

    if url:
        subdir = hashlib.md5(url.encode()).hexdigest()
        if not os.path.exists(subdir):
            os.makedirs(subdir)
        if url not in history:
            history[url] = subdir
            with open("history.json", "w") as f:
                json.dump(history, f)

    if st.sidebar.button('📥 Get All the Content'):
        download_html_and_files(url, history[url])
        show_download_links(history[url])

    if st.sidebar.button('📂 Show Download Links'):
        for subdir in history.values():
            show_download_links(subdir)

    with st.expander("URL History and Downloaded Files"):
        for url, subdir in history.items():
            st.markdown(f"#### {url}")
            show_download_links(subdir)

    if st.sidebar.button('🗑️ Delete All'):
        delete_all_files()

    if st.sidebar.button('📦 Download All'):
        create_zip_and_get_link