File size: 1,785 Bytes
e84fb4f
d92c861
462e814
 
d92c861
9ba3ade
 
 
 
 
 
 
d92c861
 
 
 
 
 
 
 
 
0f0c7dc
 
fdf1d1f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
462e814
 
 
 
 
0f0c7dc
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
from scraper import Scraper


try: from pip._internal.operations import freeze
except ImportError: # pip < 10.0
    from pip.operations import freeze

pkgs = freeze.freeze()
for pkg in pkgs: print(pkg)

app = FastAPI()
app.add_middleware(
    CORSMiddleware,
    allow_origins=["*"],
    allow_credentials=True,
    allow_methods=["*"],
    allow_headers=["*"],
)

@app.get("/get_scraped_data")
async def get_data(url: str):
    import requests
    from bs4 import BeautifulSoup
    
    # URL of the page to scrape
    #url = "https://www.imf.org/en/News/Articles/2024/03/21/pr2494-sri-lanka-imf-staff-level-agreement-for-second-review-sla"
    url = url
    
    # Send a GET request to the URL
    response = requests.get(url)
    
    # Check if the request was successful
    if response.status_code == 200:
        # Parse the page content
        soup = BeautifulSoup(response.content, 'html.parser')
    
        # Extract all text content (paragraphs, headers, etc.)
        elements = soup.find_all(['p', 'h1', 'h2', 'h3', 'h4', 'h5', 'h6'])
        body_text = "\n".join([element.get_text().strip() for element in elements])
    
        # Extract all links
        links = []
        for a_tag in soup.find_all('a', href=True):
            links.append(a_tag['href'])
    
        # Print the extracted information
        print("Body Text:")
        print(body_text)
        print("\nLinks:")
        for link in links:
            print(link)
    else:
        print("Failed to retrieve the webpage")
    return "done"
    try:
        data = await Scraper.scrape(url)
        return data
    except Exception as e:
        raise HTTPException(status_code=500, detail=str(e))