WebDatasets / app.py
awacke1's picture
Update app.py
f8f0382
raw
history blame
4.27 kB
import streamlit as st
import requests
import os
import urllib
import base64
from bs4 import BeautifulSoup
import hashlib
import json
import mimetypes
import shutil
from zipfile import ZipFile
EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md', '.gitattributes', "backup.py", "Dockerfile"]
if not os.path.exists("history.json"):
with open("history.json", "w") as f:
json.dump({}, f)
def download_file(url, local_filename):
if url.startswith('http://') or url.startswith('https://'):
try:
with requests.get(url, stream=True) as r:
r.raise_for_status()
with open(local_filename, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
f.write(chunk)
return local_filename
except requests.exceptions.HTTPError as err:
print(f"HTTP error occurred: {err}")
def download_html_and_files(url, subdir):
html_content = requests.get(url).text
soup = BeautifulSoup(html_content, 'html.parser')
base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
for link in soup.find_all('a'):
file_url = urllib.parse.urljoin(base_url, link.get('href'))
local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
if not local_filename.endswith('/') and local_filename != subdir:
link['href'] = local_filename
download_file(file_url, local_filename)
with open(os.path.join(subdir, "index.html"), "w") as file:
file.write(str(soup))
def list_files(directory_path='.'):
files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
return [f for f in files if f not in EXCLUDED_FILES]
def get_download_link(file):
with open(file, "rb") as f:
bytes = f.read()
b64 = base64.b64encode(bytes).decode()
href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Click to download {os.path.basename(file)}</a>'
return href
def delete_all_files():
for root, dirs, files in os.walk(".", topdown=False):
for name in files:
if name not in EXCLUDED_FILES:
os.remove(os.path.join(root, name))
for name in dirs:
shutil.rmtree(os.path.join(root, name))
st.success("All files and folders deleted successfully!")
def create_zip_and_get_link():
zip_filename = "all_files.zip"
with ZipFile(zip_filename, 'w') as zipf:
for root, dirs, files in os.walk(".", topdown=False):
for file in files:
if file not in EXCLUDED_FILES and file != zip_filename:
zipf.write(os.path.join(root, file))
with open(zip_filename, "rb") as f:
bytes = f.read()
b64 = base64.b64encode(bytes).decode()
href = f'<a href="data:file/zip;base64,{b64}" download=\'{zip_filename}\'>🔽 Download All Files</a>'
st.markdown(href, unsafe_allow_html=True)
def main():
st.sidebar.title('Web Datasets Bulk Downloader')
url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
with open("history.json", "r") as f:
history = json.load(f)
if url:
subdir = hashlib.md5(url.encode()).hexdigest()
if not os.path.exists(subdir):
os.makedirs(subdir)
if url not in history:
history[url] = subdir
with open("history.json", "w") as f:
json.dump(history, f)
if st.sidebar.button('📥 Get All the Content'):
download_html_and_files(url, history[url])
show_download_links(history[url])
if st.sidebar.button('📂 Show Download Links'):
for subdir in history.values():
show_download_links(subdir)
with st.expander("URL History and Downloaded Files"):
for url, subdir in history.items():
st.markdown(f"#### {url}")
show_download_links(subdir)
if st.sidebar.button('🗑️ Delete All'):
delete_all_files()
if st.sidebar.button('📦 Download All'):
create_zip_and_get_link