File size: 10,817 Bytes
fc9c13b 347327f fc9c13b d86d5f2 fc9c13b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 |
# -*- coding: utf-8 -*-
"""
News article text extractor
This script extracts the text from locally-stored news articles. The main goal is
to retrieve clean text with minimal external elements, such as user menus, article lists,
and advertisements.
To install the necessary packages:
pip install mysql-connector-python chardet colorama pyquery
After completing this step, you can use the Python script located at /dataset/2_cleaning_txt.py
to standardize the text for your dataset.
Note:
RSS feed links for media sources, as well as the HTML structure of media pages, tend to change
and evolve regularly. It's crucial to regularly check the output per media source and adjust
the parsing process to ensure high-quality text extraction and to address potential changes
in RSS feed URLs.
Author : Guillaume Eckendoerffer
Date : 29-09-23
Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
https://huggingface.co/datasets/eckendoerffer/news_fr
"""
import chardet
import time, os, re, html, json, hashlib
from colorama import Fore, init
from pyquery import PyQuery as pq
from config import DB_CONFIG
from utils import create_connection, get_file_content, save_to_file, clean_text, decode_unicode_escapes, decode_content
index_id = 1
stop_id = 1000000
path = os.getcwd()
init(autoreset=True)
connection = create_connection(DB_CONFIG)
cursor = connection.cursor()
query = "SELECT `key_title` FROM `base_news` WHERE `key_title` != ''"
cursor.execute(query)
keys = cursor.fetchall()
formatted_keys = "|".join([key[0] for key in keys]) + "|"
while True:
id_source =''
next_id = index_id + 1
time_start = time.time()
with connection.cursor() as cursor:
cursor = connection.cursor(dictionary=True)
cursor.execute(f"SELECT `id`, `url`, `media`, `key_title` FROM `base_news` WHERE `id`='{index_id}' LIMIT 1")
row = cursor.fetchone()
if row:
id_source = row["id"]
id_media = row["media"]
key_title = row["key_title"]
url = row["url"].strip()
if key_title.strip():
index_id = next_id
continue
if id_source and id_source > stop_id:
break
# Retrieve HTML content from the file
html_content = ''
content=''
title=''
file_path = os.path.join(path, "sources", "html_news", f"{id_source}.txt")
html_content = get_file_content(file_path)
if '/replay' in url or not html_content:
index_id = next_id
continue
len_source = len(html_content)
# encoding
if isinstance(html_content, str):
html_content_bytes = html_content.encode('utf-8')
else:
html_content_bytes = html_content
decoded_content = decode_content(html_content_bytes)
if decoded_content is None:
charset_result = chardet.detect(html_content_bytes)
current_encoding = charset_result['encoding']
try:
html_content = html_content_bytes.decode(current_encoding)
except Exception as e:
print(Fore.WHITE + f"Error: {e}")
index_id = next_id
continue
else:
html_content = decoded_content
len_or = len(html_content)
# Use pyquery to parse the HTML
try:
doc = pq(html_content)
except Exception as e:
print(Fore.WHITE + f"({id_source}) Error parsing HTML: {e} {url}")
index_id = next_id
continue
# Extracting the title
if title.strip() =='':
title = html.unescape(doc('h1:first').text())
if title.strip() =='':
index_id = next_id
continue
extract_method = 0
# Extracting the text from "articleBody": ld json
match = re.search(r'"articleBody"\s*:\s*"([^"]+)"', html_content)
if match:
content = html.unescape(match.group(1))
if content.strip():
extract_method = 1
try:
content = json.loads(f'"{content}"')
except json.JSONDecodeError:
content = decode_unicode_escapes(content)
# Extracting the text from <article> tag
if not extract_method or len(content) < 100:
p_elements = doc('article p')
if not p_elements:
p_elements = doc('div.Body p')
if not p_elements:
p_elements = doc('div.post-body p')
if not p_elements:
p_elements = doc('div.article_content p')
if not p_elements:
p_elements = doc('div.article__text p')
if not p_elements:
p_elements = doc('div.article-description p')
if not p_elements:
p_elements = doc('div.mainBody p')
if not p_elements:
p_elements = doc('section.article-section p')
if not p_elements:
p_elements = doc('div.article p')
for p in p_elements:
html_element = pq(p)
html_content += f" {html_element.html().strip()} "
if ".futura-sciences.com" in url:
html_element.find('a').remove()
content += f" {html.unescape(html_element.text().strip())} "
if content.strip(): extract_method = 2
len_text = len(content)
# Adding a space after punctuation and various deletions
content = content.replace('\r', ' ').replace('\n', ' ')
remove_phrases = ['.', '?', '!', ';', '!', '»', ']']
for phrase in remove_phrases:
content = content.replace(phrase, phrase + ' ')
content = content.replace(html.unescape(' '), ' ')
content = re.sub(r'\s{2,}', ' ', content)
content = re.sub(r'À lire aussi.{1,200}?»', ' ', content)
content = re.sub(r'Temps de Lecture.{1,20}? Fiche', ' ', content)
content = re.sub(r'Temps de Lecture.{1,20}? min.', ' ', content)
# Media-specific cutting rules
if ".elle.fr" in url and "sur 5" in content:
content = re.sub(r'Note :.{13,45}? sur 5', ' ', content)
content = content.replace(content[content.find(" sur 5 "):], ' ')
if ".latribune.fr" in url:
content = content.replace("Partager :", ' ')
removePhrasesEnd = [
'Sur le même sujet',
'Sur lemême thème',
'Nos articles à lire aussi',
'Suivez toute l’actualité de vos villes',
'En direct',
"J'ai déjà un compte",
'> Ecoutez',
"Lire aussi >>",
'Courrier international',
'Vous avez trouvé une erreur?',
'Il vous reste',
'Partager',
"Suivez-nous",
'Newsletter',
'Abonnez-vous',
'1€ le premier mois',
'Votre France Bleu',
'Soyez le premier à commenter cet article',
'Pour rester informé(e)',
'Un site du groupe',
"Cet article est réservé aux abonnés",
"Recevez chaque vendredi l'essentiel",
"Suivez toute l'actualité de ZDNet",
"Suivez-nous sur les résaux sociaux",
". par ",
"Le résumé de la semaine",
"ACTUELLEMENT EN KIOSQUE",
" L’actualité par la rédaction de",
"Gratis onbeperkt",
"Débloquez immédiatement cet article",
"À voir également",
"null null null ",
'Du lundi au vendredi, à 19h',
"La rédaction de La Tribune",
"Restez toujours informé: suivez-nous sur Google Actualités",
"Du lundi au vendredi, votre rendez-vous",
"Enregistrer mon nom, mon e-mail",
"Mot de passe oublié",
"accès à ce contenu",
"En cliquant sur",
'(function',
]
# Remove sentences at the end
for phrase in removePhrasesEnd:
if phrase in content:
content = content.split(phrase, 1)[0]
removePhrases = [
"Inscrivez-vous pour recevoir les newsletters de la Rép' dans votre boîte mail",
"TF1 INFO",
"Sujet TF1 Info",
"Sujet JT LCI",
"TF1 Info ",
"JT 20h WE ",
"JT 20h Semaine ",
"Source :",
"Inscrivez-vous aux newsletters de la RTBF Tous les sujets de l'article",
"Pour voir ce contenu, connectez-vous gratuitement",
">> LIRE AUSSI",
"À LIRE AUSSI",
"A lire aussi >> ",
"» LIRE AUSSI -",
" → À LIRE.",
"À voir également",
"Image d'illustration -",
"Le média de la vie locale ",
"Les plus lus.",
"Ce live est à présent terminé.",
" . -",
"[…]",
"[.]",
"(…)",
"(.)",
"©",
"Tous droits réservés",
" sur TF1",
"Avec AFP",
" AFP /",
"/ AFP ",
". AFP",
" BELGA /",
"GETTY",
"Getty Images",
"→ EXPLICATION",
"→ LES FAITS",
"→",
"À lire aussi",
"EN RÉSUMÉ",
"•",
"►►► ",
"► Écoutez l'entièreté de ce podcast ci-dessus",
"► Pour écouter ce podcast , abonnez-vous",
"►"
]
# Remove terms
for phrase in removePhrases:
content = content.replace(phrase, '.')
# Replace sequences of characters
content = content.replace(',.', '.')
content = content.replace('.,', '.')
content = re.sub(r'\.{2,}', '.', content)
content = re.sub(r'\s{2,}', ' ', content)
content = re.sub(r'-{2,}', '-', content)
content = re.sub(r'__{2,}', '_', content)
content = re.sub(r'Publié le\s?:? \d{2} ?\/ ?\d{2} ?\/ ?\d{4}( à \d{2}h\d{2})?', '', content)
content = re.sub(r'Mis à jour le \d{1,2} \w+ \. \d{4}', '', content)
matches = [match.group() for match in re.finditer(r'(\d{2}:\d{2})?Modifié le : \d{2}/\d{2}/\d{4}( - \d{2}:\d{2})?', content) if len(match.group()) <= 38]
for match in matches:
content = content.replace(match, ' ')
# Format the content for the txt file into the 'add' variable
content = re.sub(r'<.*?>', '', content)
add = f"{title}. "
if len(content) > 160:
add += f"{content}."
add = clean_text(add)
key = hashlib.md5(title.encode()).hexdigest()
nb_base = formatted_keys.count(f'{key}|')
# Display
color = Fore.GREEN if len(content) > 200 else Fore.WHITE
if len(content) > 200 and nb_base:
color = Fore.CYAN
if len(content) > 200 and not nb_base:
cursor = connection.cursor()
cursor.execute("UPDATE `base_news` SET `key_title`=%s WHERE `id`=%s LIMIT 1", (key, id_source))
formatted_keys = f'{formatted_keys}{key}|'
save_to_file(f"{path}/sources/txt_news/{id_source}.txt", add)
elapsed_time = time.time() - time_start
print(color + f"{id_source:8}) ({extract_method:1}) [{elapsed_time:.3f}] [{len_source:7}{len_text:7}{len(content):7}{len(title):4} ] {url} ")
index_id = next_id
|