urlcrawl / app.py
seawolf2357's picture
Update app.py
a280e58 verified
raw
history blame
1.11 kB
import gradio as gr
import requests
from bs4 import BeautifulSoup
import re
def fetch_pdf_links_and_titles():
url = "https://finance.naver.com/research/company_list.naver"
response = requests.get(url)
soup = BeautifulSoup(response.text, 'html.parser')
# ๋ชจ๋“  PDF ๋งํฌ์™€ ์ œ๋ชฉ์„ ์ฐพ์Šต๋‹ˆ๋‹ค.
pdf_links = soup.find_all('a', href=re.compile("^https://ssl.pstatic.net/imgstock/upload/research/company/.*\.pdf$"))
links_and_titles = []
for link in pdf_links:
title = link.text.strip() # ๋งํฌ ํ…์ŠคํŠธ์—์„œ ์ œ๋ชฉ ์ถ”์ถœ
full_url = link['href']
# ๋‹ค์šด๋กœ๋“œ ๊ฐ€๋Šฅํ•œ ๋งํฌ ํ˜•ํƒœ๋กœ ์ €์žฅ
links_and_titles.append([title, f"<a href='{full_url}' download='{full_url.split('/')[-1]}'>{full_url}</a>"])
return links_and_titles
# Gradio ์ธํ„ฐํŽ˜์ด์Šค
with gr.Blocks() as app:
btn_fetch = gr.Button("PDF ๋งํฌ ๋ฐ ์ •๋ณด ์กฐํšŒ")
output_links = gr.Dataframe(headers=["Title", "PDF ๋งํฌ"], interactive=False)
btn_fetch.click(
fn=fetch_pdf_links_and_titles,
outputs=output_links
)
app.launch()