File size: 4,911 Bytes
c227f8b
 
 
 
 
524da2b
ba33805
 
524da2b
 
acbd0bd
 
 
 
 
c227f8b
ba33805
 
 
5a7ef97
c227f8b
524da2b
c227f8b
 
 
 
5a7ef97
c227f8b
 
 
524da2b
c227f8b
ba33805
c227f8b
 
 
ba33805
c227f8b
 
ba33805
 
 
c227f8b
 
ba33805
 
c227f8b
 
 
524da2b
 
c227f8b
ba33805
 
acbd0bd
 
 
 
 
 
 
 
 
 
 
 
ba33805
 
acbd0bd
ba33805
acbd0bd
 
 
 
ba33805
 
 
 
 
 
 
 
 
 
 
c227f8b
 
 
 
ba33805
c227f8b
 
524da2b
 
acbd0bd
 
 
 
 
 
 
5a7ef97
ba33805
 
 
5a7ef97
ba33805
 
 
 
 
 
 
5a7ef97
524da2b
ba33805
 
 
524da2b
ba33805
 
524da2b
ba33805
 
 
 
5a7ef97
c227f8b
ba33805
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json

EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
URLS = {
    "Chordify - Play Along Chords": "https://chordify.net/",
    "National Guitar Academy - Guitar Learning": "https://www.guitaracademy.com/",
    "Ultimate Guitar - Massive Song Database": "https://www.ultimate-guitar.com/",
}

if not os.path.exists("history.json"):
    with open("history.json", "w") as f:
        json.dump({}, f)

def download_file(url, local_filename):
    if url.startswith('http://') or url.startswith('https://'):
        try:
            with requests.get(url, stream=True) as r:
                r.raise_for_status()
                with open(local_filename, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            return local_filename
        except requests.exceptions.HTTPError as err:
            print(f"HTTP error occurred: {err}")

def download_html_and_files(url, subdir):
    html_content = requests.get(url).text
    soup = BeautifulSoup(html_content, 'html.parser')
    base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))

    for link in soup.find_all('a'):
        file_url = urllib.parse.urljoin(base_url, link.get('href'))
        local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])

        if not local_filename.endswith('/') and local_filename != subdir:
            link['href'] = local_filename
            download_file(file_url, local_filename)
    
    with open(os.path.join(subdir, "index.html"), "w") as file:
        file.write(str(soup))

def list_files(directory_path='.'):
    files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
    return [f for f in files if f not in EXCLUDED_FILES]

def show_file_operations(file_path):
    st.write(f"File: {os.path.basename(file_path)}")
    unique_key = hashlib.md5(file_path.encode()).hexdigest()
    col1, col2, col3 = st.columns(3)

    with col1:
        if st.button(f"✏️ Edit", key=f"edit_{unique_key}"):
            file_content = ""
            with open(file_path, "r") as f:
                file_content = f.read()
            file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=f"text_area_{unique_key}")

    with col2:
        if st.button(f"💾 Save", key=f"save_{unique_key}"):
            with open(file_path, "w") as f:
                f.write(file_content)
            st.success(f"File saved!")

    with col3:
        if st.button(f"🗑️ Delete", key=f"delete_{unique_key}"):
            os.remove(file_path)
            st.markdown(f"File deleted!")

def show_download_links(subdir):
    st.write(f'Files for {subdir}:')
    for file in list_files(subdir):
        file_path = os.path.join(subdir, file)
        if os.path.isfile(file_path):
            st.markdown(get_download_link(file_path), unsafe_allow_html=True)
            show_file_operations(file_path)
        else:
            st.write(f"File not found: {file}")

def get_download_link(file):
    with open(file, "rb") as f:
        bytes = f.read()
        b64 = base64.b64encode(bytes).decode()
        href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Click to download {os.path.basename(file)}</a>'
    return href

def main():
    st.sidebar.title('Web Datasets Bulk Downloader')
    url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
    url = ""
    if url_input_method == "Enter URL":
        url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
    else:
        selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
        url = URLS[selected_site]

    with open("history.json", "r") as f:
        history = json.load(f)

    if url:
        subdir = hashlib.md5(url.encode()).hexdigest()
        if not os.path.exists(subdir):
            os.makedirs(subdir)
        if url not in history:
            history[url] = subdir
            with open("history.json", "w") as f:
                json.dump(history, f)

    if st.sidebar.button('📥 Get All the Content'):
        download_html_and_files(url, history[url])
        show_download_links(history[url])

    if st.sidebar.button('📂 Show Download Links'):
        for subdir in history.values():
            show_download_links(subdir)

    with st.expander("URL History and Downloaded Files"):
        for url, subdir in history.items():
            st.markdown(f"#### {url}")
            show_download_links(subdir)

if __name__ == "__main__":
    main()