File size: 10,082 Bytes
03f155a
 
 
 
d647402
2de1374
654e3a2
 
4f9cb7f
4cb3444
5e01c41
d647402
66c145a
9e2fd8c
 
 
 
626f6c7
 
 
 
 
 
 
 
9e2fd8c
407358f
654e3a2
 
 
3b59fe8
769025e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
a8ae3fe
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
03f155a
407358f
2f6d918
 
 
 
3b59fe8
2f6d918
 
 
407358f
03f155a
654e3a2
03f155a
 
 
b6e66c6
03f155a
 
654e3a2
b6e66c6
 
a6f9b25
 
b6e66c6
654e3a2
03f155a
a6f9b25
03f155a
407358f
 
03f155a
04a03d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
76534ee
7194637
9e2fd8c
29d71c0
 
59a3eed
 
 
1358221
 
69859cf
76534ee
 
4e02737
 
76534ee
 
9e2fd8c
 
76534ee
 
29d71c0
 
 
 
66c145a
9e2fd8c
76534ee
 
9e2fd8c
 
03387c0
57c8c98
 
 
66c145a
57c8c98
66c145a
 
57c8c98
 
 
 
 
 
66c145a
0a3b145
 
66c145a
 
 
03f155a
 
 
 
b781dbf
03f155a
b781dbf
407358f
c93a6f3
04a03d2
e745405
 
 
654e3a2
e745405
 
 
 
 
 
 
 
 
 
 
407358f
e745405
 
 
 
 
 
f2d7de5
 
 
 
 
e745405
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6032067
a8ae3fe
6032067
 
 
 
4672749
a8ae3fe
 
 
769025e
a8ae3fe
769025e
 
4672749
769025e
e745405
c13ef30
a8ea47d
 
 
 
 
 
4281770
 
 
66c145a
0a3b145
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json
import uuid 
import glob
import zipfile

EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
URLS = {
    "Chordify - Play Along Chords": "https://chordify.net/",
    "National Guitar Academy - Guitar Learning": "https://www.guitaracademy.com/",
    "Ultimate Guitar - Massive Song Database": "https://www.ultimate-guitar.com/",
    "Wolf Alice": "https://www.chordie.com/song.php/songartist/Wolf+Alice/index.html",
    "Everclear": "https://www.chordie.com/song.php/songartist/Everclear/index.html",
    "Jungle": "https://www.ultimate-guitar.com/artist/jungle_47745",
    "Mylie Cyrus": "https://www.ultimate-guitar.com/search.php?title=mile+cyrus&spelling=Mylie+cyrus",
    "Kanye": "https://www.ultimate-guitar.com/search.php?search_type=title&value=Kanye%20west",
    "Cat Stevens": "https://www.ultimate-guitar.com/search.php?search_type=title&value=cat%20stevens",
    "Metric": "https://www.ultimate-guitar.com/search.php?search_type=title&value=Metric",
    "John Lennon": "https://www.ultimate-guitar.com/search.php?search_type=title&value=John%20Lennon",
}

if not os.path.exists("history.json"):
    with open("history.json", "w") as f:
        json.dump({}, f)

import os
import base64
import zipfile
import streamlit as st

def zip_subdirs(start_dir):
    for subdir, dirs, files in os.walk(start_dir):
        if subdir != start_dir:  # Skip the root directory
            zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
            with zipfile.ZipFile(zip_filename, 'w') as zipf:
                for file in files:
                    file_path = os.path.join(subdir, file)
                    zipf.write(file_path, os.path.relpath(file_path, start_dir))
                    st.write(f"Added: {file_path}")
            yield zip_filename

def get_zip_download_link(zip_file):
    with open(zip_file, 'rb') as f:
        bytes = f.read()
    b64 = base64.b64encode(bytes).decode()
    link_name = os.path.basename(zip_file)
    href = f'<a href="data:file/zip;base64,{b64}" download="{link_name}">Download: {link_name}</a>'
    return href

  
@st.cache_resource
def create_zip_of_files(files):
    zip_name = "all_files.zip"
    with zipfile.ZipFile(zip_name, 'w') as zipf:
        for file in files:
            zipf.write(file)
    return zip_name
    
@st.cache_resource
def get_zip_download_link(zip_file):
    with open(zip_file, 'rb') as f:
        data = f.read()
    b64 = base64.b64encode(data).decode()
    href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
    return href



def download_file(url, local_filename):
    if url.startswith('http://') or url.startswith('https://'):
        try:
            with requests.get(url, stream=True) as r:
                r.raise_for_status()
                with open(local_filename, 'wb') as f:
                    for chunk in r.iter_content(chunk_size=8192):
                        f.write(chunk)
            return local_filename
        except requests.exceptions.HTTPError as err:
            print(f"HTTP error occurred: {err}")

def download_html_and_files(url, subdir):
    html_content = requests.get(url).text
    soup = BeautifulSoup(html_content, 'html.parser')
    base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))

    for link in soup.find_all('a'):
        file_url = urllib.parse.urljoin(base_url, link.get('href'))
        local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])

        if not local_filename.endswith('/') and local_filename != subdir:
            link['href'] = local_filename
            download_file(file_url, local_filename)
    
    with open(os.path.join(subdir, "index.html"), "w") as file:
        file.write(str(soup))

def list_files(directory_path='.'):
    files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
    return [f for f in files if f not in EXCLUDED_FILES]

def file_editor(file_path):
    st.write(f"Editing File: {os.path.basename(file_path)}")
    file_content = ""

    with open(file_path, "r") as f:
        file_content = f.read()

    file_content = st.text_area("Edit the file content:", value=file_content, height=250)

    if st.button("💾 Save"):
        with open(file_path, "w") as f:
            f.write(file_content)
        st.success(f"File '{os.path.basename(file_path)}' saved!")


def show_file_operations(file_path, sequence_number):
    #st.write(f"File: {os.path.basename(file_path)}")
    unique_key = hashlib.md5(file_path.encode()).hexdigest()
    file_content = ""

    col01, col02, col1, col2, col3 = st.columns(5)
    with col01:
        st.write(os.path.basename(file_path))
#with col02:
        #st.write(file_path)
    with col1:
        edit_key = f"edit_{unique_key}_{sequence_number}"
        if st.button(f"✏️ Edit", key=edit_key):
            with open(file_path, "r") as f:
                file_content = f.read()
            text_area_key = f"text_area_{unique_key}_{sequence_number}"
            file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)

    with col2:
        save_key = f"save_{unique_key}_{sequence_number}"
        if st.button(f"💾 Save", key=save_key):
            if file_content:  # Ensure file_content is not empty
                with open(file_path, "w") as f:
                    f.write(file_content)
                st.success(f"File saved!")

    with col3:
        delete_key = f"delete_{unique_key}_{sequence_number}"
        if st.button(f"🗑️ Delete", key=delete_key):
            os.remove(file_path)
            st.markdown(f"File deleted!")


file_sequence_numbers = {}

def show_download_links(subdir):
    global file_sequence_numbers
    for file in list_files(subdir):
        file_path = os.path.join(subdir, file)
        if file_path not in file_sequence_numbers:
            file_sequence_numbers[file_path] = 1
        else:
            file_sequence_numbers[file_path] += 1
        sequence_number = file_sequence_numbers[file_path]

        if os.path.isfile(file_path):
            st.markdown(get_download_link(file_path), unsafe_allow_html=True)
            show_file_operations(file_path, sequence_number)
        else:
            st.write(f"File not found: {file}")

def get_download_link(file):
    with open(file, "rb") as f:
        bytes = f.read()
        b64 = base64.b64encode(bytes).decode()
        href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Download: {os.path.basename(file)}</a>'
    return href
    
def main():
    st.sidebar.title('Web Datasets Bulk Downloader')

    # Check for query parameters for file editing
    query_params = st.experimental_get_query_params()
    file_to_edit = query_params.get('file_to_edit', [None])[0]

    if file_to_edit and os.path.exists(file_to_edit):
        file_editor(file_to_edit)
    else:
        # Selecting URL input method
        url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
        url = ""
        if url_input_method == "Enter URL":
            url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
        else:
            selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
            url = URLS[selected_site]

        # Reading or creating history.json
        if not os.path.exists("history.json"):
            with open("history.json", "w") as f:
                json.dump({}, f)

        with open("history.json", "r") as f:
            try:
                history = json.load(f)
            except:
                print('error')
                
        # Handling URL submission
        if url:
            subdir = hashlib.md5(url.encode()).hexdigest()
            if not os.path.exists(subdir):
                os.makedirs(subdir)
            if url not in history:
                history[url] = subdir
                with open("history.json", "w") as f:
                    json.dump(history, f)

        # Button for downloading content
        if st.sidebar.button('📥 Get All the Content'):
            download_html_and_files(url, history[url])
            show_download_links(history[url])

        # Button for showing download links
        if st.sidebar.button('📂 Show Download Links'):
            for subdir in history.values():
                show_download_links(subdir)


        if st.sidebar.button("🗑 Delete All"):
            # Compose all_files
            all_files = glob.glob("*.*")
            all_files = [file for file in all_files if len(os.path.splitext(file)[0]) >= 10]  # exclude files with short names
            all_files.sort(key=lambda x: (os.path.splitext(x)[1], x), reverse=True)  # sort by file type and file name in descending order
                
            for file in all_files:
                os.remove(file)
            st.experimental_rerun()
                        
        if st.sidebar.button("⬇️ Download All"):
            start_directory = '.'  # Current directory
            for zip_file in zip_subdirs(start_directory):
                st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
            
        # Expander for showing URL history and download links
        with st.expander("URL History and Downloaded Files"):
            try:
                for url, subdir in history.items():
                    st.markdown(f"#### {url}")
                    show_download_links(subdir)
            except:
                print('url history is empty')
        # Update each time to show files we have
        for subdir in history.values():
            show_download_links(subdir)
if __name__ == "__main__":
    main()