|
|
|
|
|
""" |
|
Random Line Fetcher for Large Datasets |
|
|
|
Extracts and stores relevant links from local French online news articles. |
|
|
|
pip install beautifulsoup4 mysql-connector-python colorama |
|
|
|
Author : Guillaume Eckendoerffer |
|
Date : 28-09-23 |
|
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/ |
|
https://huggingface.co/datasets/eckendoerffer/news_fr |
|
""" |
|
|
|
import os |
|
from bs4 import BeautifulSoup |
|
import mysql.connector |
|
import hashlib |
|
from colorama import Fore, init |
|
|
|
|
|
db_config = { |
|
"host": "[host]", |
|
"user": "[user]", |
|
"password": "[passwd]", |
|
"database": "[database]" |
|
} |
|
|
|
conn = mysql.connector.connect(**db_config) |
|
cursor = conn.cursor() |
|
query = "SELECT `key_media` FROM `base_news` WHERE `key_media` != ''" |
|
cursor.execute(query) |
|
keys = cursor.fetchall() |
|
formatted_keys = "|".join([key[0] for key in keys]) + "|" |
|
|
|
init(autoreset=True) |
|
|
|
def get_dom_path(url): |
|
from urllib.parse import urlparse |
|
parsed_url = urlparse(url) |
|
return f"{parsed_url.scheme}://{parsed_url.netloc}" |
|
|
|
def get_html_content(file_path): |
|
with open(file_path, 'r', encoding='utf8', errors='ignore') as file: |
|
return file.read() |
|
|
|
def mysqli_return_number(conn, query, params=None): |
|
cursor = conn.cursor() |
|
cursor.execute(query) |
|
result = cursor.fetchone() |
|
cursor.close() |
|
return result[0] if result else 0 |
|
|
|
def process_news_source(id_source, url_source, id_media): |
|
global formatted_keys |
|
|
|
dom = get_dom_path(url_source) |
|
cursor.execute(f"UPDATE `base_news` SET `link`='1' WHERE `id`='{id_source}' LIMIT 1") |
|
conn.commit() |
|
|
|
file_path = f"sources/html_news/{id_source}.txt" |
|
if os.path.exists(file_path): |
|
html_content = get_html_content(file_path) |
|
else: |
|
return |
|
|
|
print(f"{id_source} {url_source} {id_media} ({len(html_content)})") |
|
|
|
soup = BeautifulSoup(html_content, 'html.parser') |
|
nb_add = 0 |
|
for link in soup.find_all('a'): |
|
url = link.get('href') |
|
if url is None: |
|
continue |
|
url = url.split("#")[0] |
|
url = url.split("?")[0] |
|
|
|
if not url: |
|
continue |
|
if not "//" in url: |
|
url = f"{dom}/{url}" if url[0] != '/' else f"{dom}{url}" |
|
elif "http" not in url: |
|
url = 'https:' + url |
|
if not url.startswith(("http://", "https://")) or url.count(' ') or url.count('%') or url.count('\''): |
|
continue |
|
|
|
key = hashlib.md5(url.encode()).hexdigest() |
|
nb_base_news = formatted_keys.count(f'{key}|') |
|
|
|
if url.startswith(dom): |
|
if nb_base_news: |
|
|
|
continue |
|
elif ( |
|
url.count("-") > 6 and |
|
not any(substring in url for substring in ['replay', 'video', 'login', '/inloggen', '?redirect', '.jpg', '.png', 'mailto']) |
|
): |
|
print(Fore.GREEN + url) |
|
insert_query = f"INSERT INTO `base_news` (`id`, `key_media`, `media`, `url`, `step`) VALUES (NULL, '{key}', '{id_media}', '{url}', '0');" |
|
cursor.execute(insert_query) |
|
conn.commit() |
|
formatted_keys = f'{formatted_keys}{key}|' |
|
nb_add += 1 |
|
else: |
|
|
|
continue |
|
|
|
def process(): |
|
global formatted_keys |
|
|
|
cursor = conn.cursor() |
|
query = ("SELECT `id`, `url`, `media` FROM `base_news` WHERE `link`='0' AND `step` > 0 ORDER BY Rand() LIMIT 1000") |
|
cursor.execute(query) |
|
rows = cursor.fetchall() |
|
|
|
if not rows: |
|
print('No unprocessed news source found.') |
|
|
|
for row in rows: |
|
id_source, url_source, id_media = row |
|
process_news_source(id_source, url_source, id_media) |
|
|
|
while True: |
|
process() |
|
|
|
conn.close() |
|
|
|
|