Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
@@ -9,7 +9,7 @@ def fetch_pdf_links_and_titles():
|
|
9 |
soup = BeautifulSoup(response.text, 'html.parser')
|
10 |
|
11 |
seen_urls = set()
|
12 |
-
|
13 |
# ๋ชจ๋ PDF ๋งํฌ์ ์ ๋ชฉ์ ์ฐพ์ต๋๋ค.
|
14 |
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
|
15 |
for link in pdf_links:
|
@@ -17,17 +17,17 @@ def fetch_pdf_links_and_titles():
|
|
17 |
full_url = link['href']
|
18 |
if full_url not in seen_urls:
|
19 |
seen_urls.add(full_url)
|
20 |
-
#
|
21 |
-
|
22 |
-
return
|
23 |
|
24 |
# Gradio ์ธํฐํ์ด์ค
|
25 |
with gr.Blocks() as app:
|
26 |
btn_fetch = gr.Button("PDF ๋งํฌ ๋ฐ ์ ๋ณด ์กฐํ")
|
27 |
-
|
28 |
btn_fetch.click(
|
29 |
fn=fetch_pdf_links_and_titles,
|
30 |
-
outputs=
|
31 |
)
|
32 |
|
33 |
app.launch()
|
|
|
9 |
soup = BeautifulSoup(response.text, 'html.parser')
|
10 |
|
11 |
seen_urls = set()
|
12 |
+
links_html = ""
|
13 |
# ๋ชจ๋ PDF ๋งํฌ์ ์ ๋ชฉ์ ์ฐพ์ต๋๋ค.
|
14 |
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
|
15 |
for link in pdf_links:
|
|
|
17 |
full_url = link['href']
|
18 |
if full_url not in seen_urls:
|
19 |
seen_urls.add(full_url)
|
20 |
+
# HTML ๋ฌธ์์ด๋ก ๋งํฌ ์ถ๊ฐ
|
21 |
+
links_html += f"<div><a href='{full_url}' download='{full_url.split('/')[-1]}'>{title}</a></div>"
|
22 |
+
return links_html
|
23 |
|
24 |
# Gradio ์ธํฐํ์ด์ค
|
25 |
with gr.Blocks() as app:
|
26 |
btn_fetch = gr.Button("PDF ๋งํฌ ๋ฐ ์ ๋ณด ์กฐํ")
|
27 |
+
output_html = gr.HTML()
|
28 |
btn_fetch.click(
|
29 |
fn=fetch_pdf_links_and_titles,
|
30 |
+
outputs=output_html
|
31 |
)
|
32 |
|
33 |
app.launch()
|