Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -10,15 +10,17 @@ def fetch_pdf_links_and_titles():
|
|
10 |
soup = BeautifulSoup(response.text, 'html.parser')
|
11 |
|
12 |
seen_urls = set()
|
13 |
-
links_html = ""
|
14 |
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
|
15 |
for link in pdf_links:
|
16 |
title = link.text.strip()
|
17 |
full_url = link['href']
|
18 |
if full_url not in seen_urls:
|
19 |
seen_urls.add(full_url)
|
20 |
-
|
21 |
-
|
|
|
|
|
22 |
except Exception as e:
|
23 |
return f"An error occurred: {str(e)}"
|
24 |
|
|
|
10 |
soup = BeautifulSoup(response.text, 'html.parser')
|
11 |
|
12 |
seen_urls = set()
|
13 |
+
links_html = "<div style='margin-top: 20px;'>"
|
14 |
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
|
15 |
for link in pdf_links:
|
16 |
title = link.text.strip()
|
17 |
full_url = link['href']
|
18 |
if full_url not in seen_urls:
|
19 |
seen_urls.add(full_url)
|
20 |
+
# HTML 문자열로 링크를 추가하며 타이틀과 URL을 포함
|
21 |
+
links_html += f"<p><a href='{full_url}' download='{full_url.split('/')[-1]}'>{title}</a></p>"
|
22 |
+
links_html += "</div>"
|
23 |
+
return links_html if links_html else "No PDF links found."
|
24 |
except Exception as e:
|
25 |
return f"An error occurred: {str(e)}"
|
26 |
|