File size: 3,557 Bytes
03f155a
 
 
 
d647402
2de1374
654e3a2
 
d647402
f1ba02a
407358f
654e3a2
 
 
 
3b59fe8
03f155a
407358f
2f6d918
 
 
 
3b59fe8
2f6d918
 
 
407358f
03f155a
654e3a2
03f155a
 
 
b6e66c6
03f155a
 
654e3a2
b6e66c6
 
 
a6f9b25
 
b6e66c6
 
654e3a2
03f155a
a6f9b25
03f155a
407358f
 
03f155a
 
 
 
 
654e3a2
03f155a
 
654e3a2
 
 
 
407358f
 
c93a6f3
2de1374
3b59fe8
654e3a2
 
 
 
 
3b59fe8
654e3a2
 
 
 
 
 
 
3b59fe8
407358f
654e3a2
 
 
407358f
654e3a2
 
407358f
3b59fe8
654e3a2
 
 
 
40082db
03f155a
654e3a2
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json

EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]

# Create a history.json file if it doesn't exist yet
if not os.path.exists("history.json"):
    with open("history.json", "w") as f:
        json.dump({}, f)

def download_file(url, local_filename):
    if url.startswith('http://') or url.startswith('https://'):
        try:
            with requests.get(url, stream=True) as r:
                r.raise_for_status()
                with open(local_filename, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            return local_filename
        except requests.exceptions.HTTPError as err:
            print(f"HTTP error occurred: {err}")

def download_html_and_files(url, subdir):
    html_content = requests.get(url).text
    soup = BeautifulSoup(html_content, 'html.parser')
    base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))

    for link in soup.find_all('a'):
        file_url = urllib.parse.urljoin(base_url, link.get('href'))
        local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])

        # Skip if the local filename is a directory
        if not local_filename.endswith('/') and local_filename != subdir:
            link['href'] = local_filename
            download_file(file_url, local_filename)
    
    # Save the modified HTML content
    with open(os.path.join(subdir, "index.html"), "w") as file:
        file.write(str(soup))

def list_files(directory_path='.'):
    files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
    return [f for f in files if f not in EXCLUDED_FILES]

def get_download_link(file):
    with open(file, "rb") as f:
        bytes = f.read()
        b64 = base64.b64encode(bytes).decode()
        href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{file.split("/")[-1]}\'>Click to download {file.split("/")[-1]}</a>'
    return href

def show_download_links(subdir):
    st.write(f'Files for {subdir}:')
    for file in list_files(subdir):
        st.markdown(get_download_link(file), unsafe_allow_html=True)

def main():
    st.sidebar.title('Web Datasets Bulk Downloader')
    url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')

    # Load history
    with open("history.json", "r") as f:
        history = json.load(f)

    # Save the history of URL entered as a json file
    if url:
        subdir = hashlib.md5(url.encode()).hexdigest()
        if not os.path.exists(subdir):
            os.makedirs(subdir)
        if url not in history:
            history[url] = subdir
            with open("history.json", "w") as f:
                json.dump(history, f)

    if st.sidebar.button('📥 Get All the Content'):
        download_html_and_files(url, history[url])
        show_download_links(history[url])

    if st.sidebar.button('📂 Show Download Links'):
        for subdir in history.values():
            show_download_links(subdir)

    # Display history as markdown
    with st.expander("URL History and Downloaded Files"):
        for url, subdir in history.items():
            st.markdown(f"#### {url}")
            show_download_links(subdir)

if __name__ == "__main__":
    main()