Spaces:
Runtime error
Runtime error
import streamlit as st | |
import requests | |
import os | |
import urllib | |
import base64 | |
from bs4 import BeautifulSoup | |
import hashlib | |
import json | |
EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"] | |
# Create a history.json file if it doesn't exist yet | |
if not os.path.exists("history.json"): | |
with open("history.json", "w") as f: | |
json.dump({}, f) | |
def download_file(url, local_filename): | |
if url.startswith('http://') or url.startswith('https://'): | |
try: | |
with requests.get(url, stream=True) as r: | |
r.raise_for_status() | |
with open(local_filename, 'wb') as f: | |
for chunk in r.iter_content(chunk_size=8192): | |
f.write(chunk) | |
return local_filename | |
except requests.exceptions.HTTPError as err: | |
print(f"HTTP error occurred: {err}") | |
def download_html_and_files(url, subdir): | |
html_content = requests.get(url).text | |
soup = BeautifulSoup(html_content, 'html.parser') | |
base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment='')) | |
for link in soup.find_all('a'): | |
file_url = urllib.parse.urljoin(base_url, link.get('href')) | |
local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1]) | |
# Skip if the local filename is a directory | |
if not local_filename.endswith('/') and local_filename != subdir: | |
link['href'] = local_filename | |
download_file(file_url, local_filename) | |
# Save the modified HTML content | |
with open(os.path.join(subdir, "index.html"), "w") as file: | |
file.write(str(soup)) | |
def list_files(directory_path='.'): | |
files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))] | |
return [f for f in files if f not in EXCLUDED_FILES] | |
def show_file_operations(file_path): | |
st.write(f"File: {os.path.basename(file_path)}") | |
# Edit button | |
if st.button(f"✏️ Edit {os.path.basename(file_path)}"): | |
with open(file_path, "r") as f: | |
file_content = f.read() | |
file_content = st.text_area("Edit the file content:", value=file_content, height=250) | |
if st.button(f"💾 Save {os.path.basename(file_path)}"): | |
with open(file_path, "w") as f: | |
f.write(file_content) | |
st.success(f"File {os.path.basename(file_path)} saved!") | |
# Delete button | |
if st.button(f"🗑️ Delete {os.path.basename(file_path)}"): | |
os.remove(file_path) | |
st.markdown(f"🎉 File {os.path.basename(file_path)} deleted!") | |
def show_download_links(subdir): | |
st.write(f'Files for {subdir}:') | |
for file in list_files(subdir): | |
file_path = os.path.join(subdir, file) | |
if os.path.isfile(file_path): | |
st.markdown(get_download_link(file_path), unsafe_allow_html=True) | |
show_file_operations(file_path) | |
else: | |
st.write(f"File not found: {file}") | |
def get_download_link(file): | |
with open(file, "rb") as f: | |
bytes = f.read() | |
b64 = base64.b64encode(bytes).decode() | |
href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Click to download {os.path.basename(file)}</a>' | |
return href | |
def main(): | |
st.sidebar.title('Web Datasets Bulk Downloader') | |
url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files') | |
# Load history | |
with open("history.json", "r") as f: | |
history = json.load(f) | |
# Save the history of URL entered as a json file | |
if url: | |
subdir = hashlib.md5(url.encode()).hexdigest() | |
if not os.path.exists(subdir): | |
os.makedirs(subdir) | |
if url not in history: | |
history[url] = subdir | |
with open("history.json", "w") as f: | |
json.dump(history, f) | |
if st.sidebar.button('📥 Get All the Content'): | |
download_html_and_files(url, history[url]) | |
show_download_links(history[url]) | |
if st.sidebar.button('📂 Show Download Links'): | |
for subdir in history.values(): | |
show_download_links(subdir) | |
# Display history as markdown | |
with st.expander("URL History and Downloaded Files"): | |
for url, subdir in history.items(): | |
st.markdown(f"#### {url}") | |
show_download_links(subdir) | |
if __name__ == "__main__": | |
main() | |