Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -4,22 +4,23 @@ from bs4 import BeautifulSoup
|
|
4 |
import re
|
5 |
|
6 |
def fetch_pdf_links_and_titles():
|
7 |
-
|
8 |
-
|
9 |
-
|
10 |
-
|
11 |
-
|
12 |
-
|
13 |
-
|
14 |
-
|
15 |
-
|
16 |
-
|
17 |
-
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
-
|
|
|
23 |
|
24 |
# Gradio 인터페이스
|
25 |
with gr.Blocks() as app:
|
|
|
4 |
import re
|
5 |
|
6 |
def fetch_pdf_links_and_titles():
|
7 |
+
try:
|
8 |
+
url = "https://finance.naver.com/research/company_list.naver"
|
9 |
+
response = requests.get(url)
|
10 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
11 |
+
|
12 |
+
seen_urls = set()
|
13 |
+
links_html = ""
|
14 |
+
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
|
15 |
+
for link in pdf_links:
|
16 |
+
title = link.text.strip()
|
17 |
+
full_url = link['href']
|
18 |
+
if full_url not in seen_urls:
|
19 |
+
seen_urls.add(full_url)
|
20 |
+
links_html += f"<div><a href='{full_url}' download='{full_url.split('/')[-1]}'>{title}</a></div>"
|
21 |
+
return links_html if links_html else "No links found."
|
22 |
+
except Exception as e:
|
23 |
+
return f"An error occurred: {str(e)}"
|
24 |
|
25 |
# Gradio 인터페이스
|
26 |
with gr.Blocks() as app:
|