File size: 1,678 Bytes
6ac8795
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
"""

Instalar os pacotes, assim:

$ poetry install
"""
import time
import pandas as pd
from tqdm import tqdm
from trafilatura.sitemaps import sitemap_search
from trafilatura import fetch_url, extract, extract_metadata


def get_urls_from_sitemap(resource_url: str) -> list:
    """
    Função que cria um DataFrame Pandas de URLs e artigos.
    """
    urls = sitemap_search(resource_url)
    return urls


def extract_article(url: str) -> dict:
    """
    Estrae un articolo da una URL con Trafilatura
    """
    downloaded = fetch_url(url)
    article = extract(downloaded, favor_precision=True, only_with_metadata=True)
    metadata = extract_metadata(downloaded)
    return article, metadata


def create_dataset(list_of_websites: list) -> pd.DataFrame:
    """
    Funzione che crea un DataFrame Pandas di URL e articoli.
    """
    data = []
    for website in tqdm(list_of_websites, desc="Websites"):
        urls = get_urls_from_sitemap(website)
        for url in tqdm(urls, desc="URLs"):
            article, metadata = extract_article(url)
            d = {
                "url": url,
                "article": article,
                "title": metadata.title,
                "description": metadata.description,
                "author": metadata.author,
                "date": metadata.date,
            }
            data.append(d)
            time.sleep(0.5)

    df = pd.DataFrame(data)
    df = df.drop_duplicates()
    df = df.dropna()

    return df


if __name__ == "__main__":
    list_of_websites = [
        "https://www.diariodiunanalista.it/",
    ]

    df = create_dataset(list_of_websites)

    df.to_csv("./data/articles.csv", index=False)