eckendoerffer commited on
Commit
d6234a1
·
1 Parent(s): 86c1e5d

Upload 4_extract_news_url.py

Browse files
Files changed (1) hide show
  1. extract_news/4_extract_news_url.py +125 -0
extract_news/4_extract_news_url.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+
3
+ """
4
+ Random Line Fetcher for Large Datasets
5
+
6
+ Extracts and stores relevant links from local French online news articles.
7
+
8
+ pip install beautifulsoup4 mysql-connector-python colorama
9
+
10
+ Author : Guillaume Eckendoerffer
11
+ Date : 28-09-23
12
+ Repository : https://github.com/Eckendoerffer/TorchTrainerFlow/
13
+ https://huggingface.co/datasets/eckendoerffer/news_fr
14
+ """
15
+
16
+ import os
17
+ from bs4 import BeautifulSoup
18
+ import mysql.connector
19
+ import hashlib
20
+ from colorama import Fore, init
21
+
22
+ # Database configuration
23
+ db_config = {
24
+ "host": "[host]",
25
+ "user": "[user]",
26
+ "password": "[passwd]",
27
+ "database": "[database]"
28
+ }
29
+
30
+ conn = mysql.connector.connect(**db_config)
31
+ cursor = conn.cursor()
32
+ query = "SELECT `key_media` FROM `base_news` WHERE `key_media` != ''"
33
+ cursor.execute(query)
34
+ keys = cursor.fetchall()
35
+ formatted_keys = "|".join([key[0] for key in keys]) + "|"
36
+
37
+ init(autoreset=True)
38
+
39
+ def get_dom_path(url):
40
+ from urllib.parse import urlparse
41
+ parsed_url = urlparse(url)
42
+ return f"{parsed_url.scheme}://{parsed_url.netloc}"
43
+
44
+ def get_html_content(file_path):
45
+ with open(file_path, 'r', encoding='utf8', errors='ignore') as file:
46
+ return file.read()
47
+
48
+ def mysqli_return_number(conn, query, params=None):
49
+ cursor = conn.cursor()
50
+ cursor.execute(query)
51
+ result = cursor.fetchone()
52
+ cursor.close()
53
+ return result[0] if result else 0
54
+
55
+ def process_news_source():
56
+ global formatted_keys
57
+
58
+ cursor = conn.cursor()
59
+ query = ("SELECT `id`, `url`, `media` FROM `base_news` WHERE `link`='0' AND `step` > 0 AND `id` > 215000 AND `url` NOT LIKE 'https://avis-vin.%' AND `url` NOT LIKE 'https://www.elle.fr/%' AND `url` NOT LIKE 'www.lamontagne.fr/%' AND `url` NOT LIKE 'https://www.rtbf.be/%' AND `url` NOT LIKE 'https://www.tf1info.fr/%' AND `url` NOT LIKE 'https://www.futura-sciences.com/%' AND `url` NOT LIKE 'https://cdn-elle.ladmedia.fr/%' ORDER BY Rand() LIMIT 1")
60
+ cursor.execute(query)
61
+ row = cursor.fetchone()
62
+
63
+ if not row:
64
+ return 'No unprocessed news source found.'
65
+
66
+ id_source, url_source, id_media = row
67
+ dom = get_dom_path(url_source)
68
+ cursor.execute(f"UPDATE `base_news` SET `link`='1' WHERE `id`='{id_source}' LIMIT 1")
69
+ conn.commit()
70
+
71
+ querys = "SELECT COUNT(`id`) FROM `base_news` WHERE `step`='0'"
72
+ nb_link = mysqli_return_number(conn, querys)
73
+
74
+ file_path = f"sources/html_news/{id_source}.txt"
75
+ if os.path.exists(file_path):
76
+ html_content = get_html_content(file_path)
77
+ else:
78
+ return
79
+
80
+ print(f"{nb_link} {url_source} {id_media} ({len(html_content)})")
81
+
82
+ soup = BeautifulSoup(html_content, 'html.parser')
83
+ nb_add = 0
84
+ for link in soup.find_all('a'):
85
+ url = link.get('href')
86
+ if url is None:
87
+ continue
88
+ url = url.split("#")[0]
89
+ url = url.split("?")[0]
90
+
91
+ if not url:
92
+ continue
93
+ if not "//" in url:
94
+ url = f"{dom}/{url}" if url[0] != '/' else f"{dom}{url}"
95
+ elif "http" not in url:
96
+ url = 'https:' + url
97
+ if not url.startswith(("http://", "https://")) or url.count(' ') or url.count('%') or url.count('\''):
98
+ continue
99
+
100
+ key = hashlib.md5(url.encode()).hexdigest()
101
+ nb_base_news = formatted_keys.count(f'{key}|')
102
+
103
+ if url.startswith(dom):
104
+ if nb_base_news:
105
+ #print(Fore.YELLOW + url)
106
+ continue
107
+ elif (
108
+ url.count("-") > 6 and
109
+ not any(substring in url for substring in ['replay', 'video', 'login', '/inloggen', '?redirect', '.jpg', '.png', 'mailto'])
110
+ ):
111
+ print(Fore.GREEN + url)
112
+ insert_query = f"INSERT INTO `base_news` (`id`, `key_media`, `media`, `url`, `step`) VALUES (NULL, '{key}', '{id_media}', '{url}', '0');"
113
+ cursor.execute(insert_query)
114
+ conn.commit()
115
+ formatted_keys = f'{formatted_keys}{key}|'
116
+ nb_add += 1
117
+ else:
118
+ #print(Fore.RED + url)
119
+ continue
120
+
121
+ while True:
122
+ process_news_source()
123
+
124
+ conn.close()
125
+