|
import gradio as gr |
|
import requests |
|
from bs4 import BeautifulSoup |
|
import re |
|
|
|
def fetch_pdf_links(): |
|
url = "https://finance.naver.com/research/company_list.naver" |
|
response = requests.get(url) |
|
soup = BeautifulSoup(response.text, 'html.parser') |
|
|
|
|
|
pdf_links = soup.find_all('a', href=re.compile("\.pdf$")) |
|
links = [] |
|
for link in pdf_links: |
|
full_url = "https://finance.naver.com" + link['href'] |
|
|
|
links.append([f"<a href='{full_url}' download='{full_url.split('/')[-1]}'>{full_url.split('/')[-1]}</a>"]) |
|
return links |
|
|
|
|
|
with gr.Blocks() as app: |
|
btn_fetch = gr.Button("PDF ๋งํฌ ์กฐํ") |
|
output_links = gr.Dataframe(headers=["PDF ๋งํฌ"], interactive=False) |
|
btn_fetch.click( |
|
fn=fetch_pdf_links, |
|
outputs=output_links |
|
) |
|
|
|
app.launch() |
|
|