awacke1 commited on
Commit
c2fee75
·
verified ·
1 Parent(s): 62af43f

Create bacckup_app2.py

Browse files
Files changed (1) hide show
  1. bacckup_app2.py +270 -0
bacckup_app2.py ADDED
@@ -0,0 +1,270 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Edited most recent 2/11/2024 - had some problems with showing activity in real time with UI stream - going to try again and rresimplify./.
2
+
3
+ import streamlit as st
4
+ import requests
5
+ import os
6
+ import urllib
7
+ import base64
8
+ from bs4 import BeautifulSoup
9
+ import hashlib
10
+ import json
11
+ import uuid
12
+ import glob
13
+ import zipfile
14
+
15
+ EXCLUDED_FILES = ['app.py', 'requirements.txt', 'pre-requirements.txt', 'packages.txt', 'README.md','.gitattributes', "backup.py","Dockerfile"]
16
+ URLS = {
17
+ "National Library of Medicine": "https://www.nlm.nih.gov/",
18
+ "World Health Organization": "https://www.who.int/",
19
+ "UHCProvider - United Health and Optum": "https://www.uhcprovider.com/",
20
+ "CMS - Centers for Medicare & Medicaid Services": "https://www.cms.gov/",
21
+ "Mayo Clinic": "https://www.mayoclinic.org/",
22
+ "WebMD": "https://www.webmd.com/",
23
+ "MedlinePlus": "https://medlineplus.gov/",
24
+ "Healthline": "https://www.healthline.com/",
25
+ "CDC - Centers for Disease Control and Prevention": "https://www.cdc.gov/",
26
+ "Johns Hopkins Medicine": "https://www.hopkinsmedicine.org/"
27
+ }
28
+
29
+ if not os.path.exists("history.json"):
30
+ with open("history.json", "w") as f:
31
+ json.dump({}, f)
32
+
33
+ import os
34
+ import base64
35
+ import zipfile
36
+ import streamlit as st
37
+
38
+ def zip_subdirs(start_dir):
39
+ for subdir, dirs, files in os.walk(start_dir):
40
+ if subdir != start_dir: # Skip the root directory
41
+ zip_filename = os.path.join(start_dir, subdir.split(os.sep)[-1] + '.zip')
42
+ allFileSummary = ""
43
+ with zipfile.ZipFile(zip_filename, 'w') as zipf:
44
+ for file in files:
45
+ file_path = os.path.join(subdir, file)
46
+ zipf.write(file_path, os.path.relpath(file_path, start_dir))
47
+ allFileSummary=allFileSummary+(f"Added: {file_path}")
48
+ st.write(allFileSummary)
49
+ yield zip_filename
50
+
51
+ def get_zip_download_link(zip_file):
52
+ with open(zip_file, 'rb') as f:
53
+ bytes = f.read()
54
+ b64 = base64.b64encode(bytes).decode()
55
+ link_name = os.path.basename(zip_file)
56
+ href = f'<a href="data:file/zip;base64,{b64}" download="{link_name}">Download: {link_name}</a>'
57
+ return href
58
+
59
+
60
+ @st.cache_resource
61
+ def create_zip_of_files(files):
62
+ zip_name = "all_files.zip"
63
+ with zipfile.ZipFile(zip_name, 'w') as zipf:
64
+ for file in files:
65
+ zipf.write(file)
66
+ return zip_name
67
+
68
+ @st.cache_resource
69
+ def get_zip_download_link(zip_file):
70
+ with open(zip_file, 'rb') as f:
71
+ data = f.read()
72
+ b64 = base64.b64encode(data).decode()
73
+ href = f'<a href="data:application/zip;base64,{b64}" download="{zip_file}">Download All</a>'
74
+ return href
75
+
76
+
77
+
78
+ def download_file(url, local_filename):
79
+ if url.startswith('http://') or url.startswith('https://'):
80
+ try:
81
+ with requests.get(url, stream=True) as r:
82
+ r.raise_for_status()
83
+ with open(local_filename, 'wb') as f:
84
+ for chunk in r.iter_content(chunk_size=8192):
85
+ f.write(chunk)
86
+ return local_filename
87
+ except requests.exceptions.HTTPError as err:
88
+ print(f"HTTP error occurred: {err}")
89
+
90
+ def download_html_and_files(url, subdir):
91
+ html_content = requests.get(url).text
92
+ soup = BeautifulSoup(html_content, 'html.parser')
93
+ base_url = urllib.parse.urlunparse(urllib.parse.urlparse(url)._replace(path='', params='', query='', fragment=''))
94
+
95
+ for link in soup.find_all('a'):
96
+ file_url = urllib.parse.urljoin(base_url, link.get('href'))
97
+ local_filename = os.path.join(subdir, urllib.parse.urlparse(file_url).path.split('/')[-1])
98
+
99
+ if not local_filename.endswith('/') and local_filename != subdir:
100
+ link['href'] = local_filename
101
+ download_file(file_url, local_filename)
102
+
103
+ with open(os.path.join(subdir, "index.html"), "w") as file:
104
+ file.write(str(soup))
105
+
106
+ def list_files(directory_path='.'):
107
+ files = [f for f in os.listdir(directory_path) if os.path.isfile(os.path.join(directory_path, f))]
108
+ return [f for f in files if f not in EXCLUDED_FILES]
109
+
110
+ def file_editor(file_path):
111
+ st.write(f"Editing File: {os.path.basename(file_path)}")
112
+ file_content = ""
113
+
114
+ with open(file_path, "r") as f:
115
+ file_content = f.read()
116
+
117
+ file_content = st.text_area("Edit the file content:", value=file_content, height=250)
118
+
119
+ if st.button("💾 Save"):
120
+ with open(file_path, "w") as f:
121
+ f.write(file_content)
122
+ st.success(f"File '{os.path.basename(file_path)}' saved!")
123
+
124
+
125
+ def show_file_operations(file_path, sequence_number):
126
+ #st.write(f"File: {os.path.basename(file_path)}")
127
+ unique_key = hashlib.md5(file_path.encode()).hexdigest()
128
+ file_content = ""
129
+
130
+ col01, col02, col1, col2, col3 = st.columns(5)
131
+ with col01:
132
+ st.write(os.path.basename(file_path))
133
+ #with col02:
134
+ #st.write(file_path)
135
+ with col1:
136
+ edit_key = f"edit_{unique_key}_{sequence_number}"
137
+ if st.button(f"✏️ Edit", key=edit_key):
138
+ file_editor(file_path)
139
+ #with open(file_path, "r") as f:
140
+ # file_content = f.read()
141
+ #text_area_key = f"text_area_{unique_key}_{sequence_number}"
142
+ #file_content = st.text_area("Edit the file content:", value=file_content, height=250, key=text_area_key)
143
+
144
+ with col2:
145
+ save_key = f"save_{unique_key}_{sequence_number}"
146
+ if st.button(f"💾 Save", key=save_key):
147
+ if file_content: # Ensure file_content is not empty
148
+ with open(file_path, "w") as f:
149
+ f.write(file_content)
150
+ st.success(f"File saved!")
151
+
152
+ with col3:
153
+ delete_key = f"delete_{unique_key}_{sequence_number}"
154
+ if st.button(f"🗑️ Delete", key=delete_key):
155
+ os.remove(file_path)
156
+ st.markdown(f"File deleted!")
157
+
158
+
159
+ file_sequence_numbers = {}
160
+
161
+ def show_download_links(subdir):
162
+ global file_sequence_numbers
163
+ for file in list_files(subdir):
164
+ file_path = os.path.join(subdir, file)
165
+ if file_path not in file_sequence_numbers:
166
+ file_sequence_numbers[file_path] = 1
167
+ else:
168
+ file_sequence_numbers[file_path] += 1
169
+ sequence_number = file_sequence_numbers[file_path]
170
+
171
+ if os.path.isfile(file_path):
172
+ #st.markdown(get_download_link(file_path), unsafe_allow_html=True)
173
+ st.markdown(file_path, unsafe_allow_html=True) # faster than encapsulating file into base64 download link
174
+ show_file_operations(file_path, sequence_number)
175
+ else:
176
+ st.write(f"File not found: {file}")
177
+
178
+ def get_download_link(file):
179
+ with open(file, "rb") as f:
180
+ bytes = f.read()
181
+ b64 = base64.b64encode(bytes).decode()
182
+ href = f'<a href="data:file/octet-stream;base64,{b64}" download=\'{os.path.basename(file)}\'>Download: {os.path.basename(file)}</a>'
183
+ return href
184
+
185
+ def main():
186
+ st.sidebar.title('📥Web Data Downloader📂')
187
+
188
+ # Check for query parameters for file editing
189
+ query_params = st.experimental_get_query_params()
190
+ file_to_edit = query_params.get('file_to_edit', [None])[0]
191
+
192
+ if file_to_edit and os.path.exists(file_to_edit):
193
+ file_editor(file_to_edit)
194
+ else:
195
+ # Selecting URL input method
196
+ url_input_method = st.sidebar.radio("Choose URL Input Method", ["Enter URL", "Select from List"])
197
+ url = ""
198
+ if url_input_method == "Enter URL":
199
+ url = st.sidebar.text_input('Please enter a Web URL to bulk download text and files')
200
+ else:
201
+ selected_site = st.sidebar.selectbox("Select a Website", list(URLS.keys()))
202
+ url = URLS[selected_site]
203
+
204
+ # Reading or creating history.json
205
+ if not os.path.exists("history.json"):
206
+ with open("history.json", "w") as f:
207
+ json.dump({}, f)
208
+
209
+ with open("history.json", "r") as f:
210
+ try:
211
+ history = json.load(f)
212
+ except:
213
+ print('error')
214
+
215
+ # Handling URL submission
216
+ if url:
217
+ subdir = hashlib.md5(url.encode()).hexdigest()
218
+ if not os.path.exists(subdir):
219
+ os.makedirs(subdir)
220
+ if url not in history:
221
+ history[url] = subdir
222
+ with open("history.json", "w") as f:
223
+ json.dump(history, f)
224
+
225
+ # Button for downloading content
226
+ if st.sidebar.button('📥 Get All the Content'):
227
+ download_html_and_files(url, history[url])
228
+ show_download_links(history[url])
229
+
230
+ # Button for showing download links
231
+ if st.sidebar.button('📂 Show Download Links'):
232
+ for subdir in history.values():
233
+ show_download_links(subdir)
234
+
235
+
236
+ if st.sidebar.button("🗑 Delete All"):
237
+ # Clear history file
238
+ with open("history.json", "w") as f:
239
+ json.dump({}, f)
240
+
241
+ # Delete all files in subdirectories
242
+ for subdir in glob.glob('*'):
243
+ if os.path.isdir(subdir) and subdir not in EXCLUDED_FILES:
244
+ for file in os.listdir(subdir):
245
+ file_path = os.path.join(subdir, file)
246
+ os.remove(file_path)
247
+ st.write(f"Deleted: {file_path}")
248
+ os.rmdir(subdir) # Remove the empty directory
249
+
250
+ st.experimental_rerun()
251
+
252
+ if st.sidebar.button("⬇️ Download All"):
253
+ start_directory = '.' # Current directory
254
+ for zip_file in zip_subdirs(start_directory):
255
+ st.sidebar.markdown(zip_file, unsafe_allow_html=True)
256
+ st.sidebar.markdown(get_zip_download_link(zip_file), unsafe_allow_html=True)
257
+
258
+ # Expander for showing URL history and download links
259
+ with st.expander("URL History and Downloaded Files"):
260
+ try:
261
+ for url, subdir in history.items():
262
+ st.markdown(f"#### {url}")
263
+ # show_download_links(subdir)
264
+ except:
265
+ print('url history is empty')
266
+ # Update each time to show files we have
267
+ #for subdir in history.values():
268
+ # show_download_links(subdir)
269
+ if __name__ == "__main__":
270
+ main()