File size: 22,675 Bytes
675b3d2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
import trafilatura
import requests
import lzma
import os
import re
import time
from datetime import datetime
import json
from pprint import pprint
import subprocess

import config

from utils import *

from text_utils import *

from llm import *

from mode_llm import llm_html_to_md, md_to_text, get_html_body_with_soup

from crawl4ai import WebCrawler # pip install "crawl4ai @ git+https://github.com/unclecode/crawl4ai.git"

# Create an instance of WebCrawler
crawler = WebCrawler()

# Warm up the crawler (load necessary models)
crawler.warmup()

## Cách lấy cookies và headers sử dụng https://curlconverter.com
cookies = {
    'ASP.NET_SessionId': '42i3ivvgk14yd2tnxmddybvq',
    'Culture': 'vi',
    'Cookie_VB': 'close',
    'ruirophaply-covi19': '24',
    'SLG_G_WPT_TO': 'vi',
    'G_ENABLED_IDPS': 'google',
    'SLG_GWPT_Show_Hide_tmp': '1',
    'SLG_wptGlobTipTmp': '1',
    '__zlcmid': '1NOmxyopHgawxjN',
    '45C5EF': '96780c17-dee3-49b2-9bf7-6335c4348d4f',
    'vqc': '0',
}

headers = {
    'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
    'accept-language': 'en-US,en;q=0.9',
    'cache-control': 'max-age=0',
    'priority': 'u=0, i',
    'sec-ch-ua': '"Opera GX";v="111", "Chromium";v="125", "Not.A/Brand";v="24"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'sec-fetch-dest': 'document',
    'sec-fetch-mode': 'navigate',
    'sec-fetch-site': 'none',
    'sec-fetch-user': '?1',
    'sec-gpc': '1',
    'upgrade-insecure-requests': '1',
    'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36 OPR/111.0.0.0',
    # 'User-Agent': "Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/113.0.0.0 Mobile Safari/537.36",
}


def norm_url_and_gen_filename(url):
    url = url.strip() # loại bỏ ký tự trống ở đầu và cuối
    if url[-1] == "/":  url = url[:-1] # loại bỏ "/" ở cuối

    # init filename và tạo sub folder nếu chưa có
    filename = f'.cache/{url}'
    return url, filename


def reset_content(url):
    _, filename = norm_url_and_gen_filename(url)

    text_filename = filename + ".txt"
    json_filename = filename + ".json"
    html_filename = filename + ".html"
    xz_filename   = filename + ".xz"

    for filename in [
        text_filename,
        json_filename,
        html_filename,
        xz_filename,
    ]:
        cmd = f"rm -rf '{filename}'"; print(cmd)
        subprocess.run(cmd, shell = True)


# from functools import lru_cache
# @lru_cache(maxsize=128)
def url_content(url, update_text=None):
    url, filename = norm_url_and_gen_filename(url)
    parts = filename.split("/")

    for i in range(1, len(parts)):
        path = "/".join(parts[:i])
        # print(path) # DEBUG
        os.makedirs(path, exist_ok=True)

    
    text_filename = filename + ".txt"
    json_filename = filename + ".json"
    html_filename = filename + ".html"
    xz_filename   = filename + ".xz"


    # Nếu được yêu cầu update nội dung text của url thì update
    if update_text is not None:

        print("\nUPDATE TEXT", filename)

        text, noise_ratio, max_noise = normalize_text(update_text, get_noise_info = True)

        # update meta, Lưu lại text và meta
        meta = json.load(open(json_filename))

        meta["updated_at"] = str(datetime.now())
        meta["updates_count"] += 1

        meta["noise_ratio"] = noise_ratio
        meta["max_noise"] = max_noise

        # Cắt ngắn text nếu đầu vào quá dài
        text = cut_if_too_long(text, meta)

        with open(json_filename, "wt") as f:
            f.write(json.dumps(meta, ensure_ascii = False))
    
        with open(text_filename, "wt") as f:
            f.write(text)

        # gọi gen_clear_view.py (chạy chậm) ở process riêng
        get_clear_view(filename)

        # Re-gen llm contents
        get_llm_gen_contents(url, use_cache = False)

        print(CYAN,end=""); print(meta); print(RESET,end="", flush=True)


    # Nếu tồn tại text_filename thì trả về,
    # đây có thể là nội dung text đã được chỉnh sửa nên ưu tiên trả về trước
    if os.path.exists(text_filename):
        print("\nGOT TEXT", filename, flush=True)

        norm_text = open(text_filename, "rt").read()
        return norm_text


    html = None
    # Thử đọc nội dung html đã được cache
    if os.path.exists(xz_filename):
        try:
            html = lzma.open(xz_filename,"rt").read()
            print("\nGOT HTML", filename, flush=True)
        except:
            pass



    blacklist = """

Your connection is not private

-----BEGIN CERTIFICATE-----

""".strip().split("\n")


    ## Thử các cách lấy HTML: requests vs crawl4ai vs reader
    get_html_method = "requests" 
    if html is None:
        # Thử lần 1 bằng requests
        print("\nGET HTML", filename, flush=True)

        try:
            html = requests.get(url, cookies=cookies, headers=headers)
            html = str(html.text)

            # Lưu lại nội dung html vào xz_filename
            with lzma.open(xz_filename, "wt") as f: f.write(html)

        except Exception as e:
            print(f"!!! REQUESTS Error {e} !!!")

        if isinstance(html, str):
            for x in blacklist:
                if x in html:
                    print(f"--{x}--")
                    i = html.find(x)
                    print(f"{RED}!!! REQUESTS đọc lỗi {html[i-30:i+200]} !!!{RESET}")
                    html = None
                    break


    meta = None
    if html is None or len(html) < 500:
        # Thử lần 2 bằng CRAWL4AI
        print("GET HTML CRAWL4AI", filename, flush=True)
        get_html_method = "crawl4ai" 

        try:
            result = crawler.run(url=url)
            html = result.html

            # Lưu lại nội dung html vào xz_filename
            with lzma.open(xz_filename, "wt") as f: f.write(html)

            # {'title': 'Ngập úng và thiệt hại trên 202.000 ha lúa | baotintuc.vn', 'description': 'Thống kê từ Bộ Nông nghiệp và Phát triển nông thôn, tính đến sáng 13/9, có trên 202.000 ha lúa, gần 39.300 ha hoa màu bị ngập úng, thiệt hại do ảnh hưởng của bão số 3.', 'keywords': None, 'author': 'baotintuc.vn', 'og:type': 'article', 'og:url': 'https://baotintuc.vn/xa-hoi/ngap-ung-va-thiet-hai-tren-202000-ha-lua-20240913095621343.htm', 'og:image': 'https://cdnmedia.baotintuc.vn/Upload/EqV5H9rWgvy9oNikwkHLXA/files/13092024-bao-1.jpg', 'og:image:url': 'https://cdnmedia.baotintuc.vn/Upload/EqV5H9rWgvy9oNikwkHLXA/files/13092024-bao-1.jpg', 'og:image:secure_url': 'https://cdnmedia.baotintuc.vn/Upload/EqV5H9rWgvy9oNikwkHLXA/files/13092024-bao-1.jpg', 'og:image:width': '460', 'og:image:height': '345', 'og:title': 'Ngập úng và thiệt hại trên 202.000 ha lúa', 'og:description': 'Thống kê từ Bộ Nông nghiệp và Phát triển nông thôn, đến sáng 13/9, có trên 202.000 ha lúa, gần 39.300 ha hoa màu bị ngập úng, thiệt hại do ảnh hưởng của bão số 3.', 'twitter: card': 'summary_large_image', 'twitter: image': 'https://cdnmedia.baotintuc.vn/Upload/EqV5H9rWgvy9oNikwkHLXA/files/13092024-bao-1.jpg'}
            meta = dict(result.metadata)

            for key in result.metadata.keys():
                if "og:" in key or "twitter:" in key:
                    meta.pop(key)

        except Exception as e:
            print(f"!!! CRAWL4AI Error {e} !!!")

        if isinstance(html, str):
            for x in blacklist:
                 if x in html:
                    i = html.find(x)
                    print(f"{RED}!!! CRAWL4AI đọc lỗi {html[i-30:i+200]} !!!{RESET}")
                    html = None
                    meta = {}
                    break


    if html is None or len(html) < 500:
        # Thử lần 3 bằng reader api
        print("GET HTML READER", filename, flush=True)
        get_html_method = "reader" 

        try:
            reader_url = "https://r.jina.ai/" + url
            # Use below header make https://jina.ai/reader return text not markdown by default
            html = requests.get(reader_url, headers = { 'X-Return-Format': 'html', }).text

            # Lưu lại nội dung html vào xz_filename
            with lzma.open(xz_filename, "wt") as f: f.write(html)

        except Exception as e:
            print(f"!!! READER Error {e} !!!")


        if isinstance(html, str):
            for x in blacklist:
                 if x in html:
                    i = html.find(x)
                    print(f"{RED}!!! READER đọc lỗi {html[i-30:i+200]} !!!{RESET}")
                    html = None
                    break


    ## Thử các cách extract text: trafilatura vs llm vs reader
    extract_method = "trafilatura" 
    # https://trafilatura.readthedocs.io/en/latest/corefunctions.html#extract
    try: 
        text = trafilatura.extract(html, 
            # favor_recall = True,
            include_tables = True,
            include_comments = False,
            with_metadata = False,
        )
    except:
        text = ""

    if meta is None: # Có thể meta đã đc lấy ở crawl4ai
        try:
            meta = trafilatura.extract(html, only_with_metadata = True)
            if meta and len(meta) > 0:
                # print(meta); input() # DEBUG
                meta = meta.split("---")[1]
                splits = re.findall(r'\S+: [^\n]+', meta)
                meta = { x.split(": ", 1)[0].strip() : x.split(": ", 1)[1].strip() for x in splits }
            else:
                meta = {}
        except:
            meta = {}


    # Chuẩn hóa text
    if text is None: text = ""
    text, noise_ratio, max_noise = normalize_text(text, get_noise_info = True)
    print(f">>> {RED}noise_ratio {pretty_num(noise_ratio)}, max_noise {max_noise}{RESET}")


    MEANINGFUL = 500
    MAX_NOISE_RATIO = 0.3

    too_short = ( len(text)  < MEANINGFUL )
    too_noise = ( noise_ratio > MAX_NOISE_RATIO or max_noise > MEANINGFUL )

    # ko lấy đc text hoặc text quá ngắn (cào trượt), hoặc text quá noise
    if text is None or too_short or too_noise: 
        # Lấy text thông qua phương pháp khác
        print("!!! Đoạn text dưới do trafilatura triết xuất có vấn đề?")
        print("too short", too_short)
        print("too noise", too_noise)
        print("- - - "*6)
        print(f"{YELLOW}{text}{RESET}")
        print("- - - "*6)

        print("!!! Dùng Jina Reader ...")
        reader_url = "https://r.jina.ai/" + url
        # Use below header make https://jina.ai/reader return text not markdown by default
        reader_text = requests.get(reader_url, headers = { 'X-Return-Format': 'text', }).text
        reader_text, reader_noise_ratio, reader_max_noise = normalize_text(reader_text, get_noise_info = True)

       # Chuẩn hóa text
        reader_text, reader_noise_ratio, reader_max_noise = normalize_text(reader_text, get_noise_info = True)

        reader_too_noise = ( reader_noise_ratio > MAX_NOISE_RATIO or reader_max_noise > MEANINGFUL )

        print(f">>> {RED}reader_noise_ratio {pretty_num(reader_noise_ratio)}, reader_max_noise {reader_max_noise}{RESET}")
        print(f">>> {RED}reader_too_noise {reader_too_noise}{RESET}")

        signal = int( len(text) * (1 - noise_ratio) ) + 1
        reader_signal = int( len(reader_text) * (1 - reader_noise_ratio) ) + 1

        samesame = ( abs(signal - reader_signal) / reader_signal ) < 0.2
        print(f">>> {RED}samesame {samesame}, original signal {pretty_num(signal)}, reader_signal {pretty_num(reader_signal)}{RESET}")

        # Nếu bản gốc quá ngắn nhưng bản reader quá noise thì thà chọn ngắn còn hơn
        original_too_shot_but_reader_too_noise = ( 
            too_short and (samesame or reader_noise_ratio >= 0.5 )
        )

        original_too_noise_but_reader_even_more_noise = (
            too_noise and noise_ratio < reader_noise_ratio and max_noise < reader_max_noise
        )

        if original_too_shot_but_reader_too_noise:
            print("!!! reader quá noise, chọn bản trafilatura too_short còn hơn.")

        if original_too_noise_but_reader_even_more_noise:
            print("!!! reader còn noise hơn bản trafilatura, bỏ qua.")


        if not original_too_shot_but_reader_too_noise and \
            not original_too_noise_but_reader_even_more_noise:

            choose_original_text = False

            if reader_too_noise: # vẫn còn noisy lắm, thử dùng readability.js

                if html is not None and len(html) > 200:

                    html_filename = filename + ".html"
                    with open(html_filename, "wt") as f:
                        f.write(html)

                    abi_text = subprocess.run(
                        f"node node_readability.js '{html_filename}' '{url}'", 
                        shell=True, 
                        capture_output=True,
                    ).stdout.decode('utf-8')

                    abi_text, abi_noise_ratio, abi_max_noise = \
                        normalize_text(abi_text, get_noise_info = True)

                    if abi_max_noise < reader_max_noise:
                        print(GREEN, ">>>", abi_text, "<<<", RESET)

                        if len(abi_text) < len(reader_text) and len(text) < len(reader_text): # chuộng text ngắn
                            choose_original_text = True


            if not choose_original_text:

                extract_method = "reader"
                text = reader_text
                noise_ratio = reader_noise_ratio
                max_noise = reader_max_noise


    # update meta, Lưu lại text và meta
    meta["url"] = url
    meta["get_html_method"] = get_html_method
    meta["extract_method"] = extract_method
    meta["created_at"] = str(datetime.now())
    meta["updates_count"] = 0
    meta["noise_ratio"] = noise_ratio
    meta["max_noise"] = max_noise
    meta["text_origin_len"] = len(text)

    if "hostname" in meta: meta.pop("hostname")
    if "sitename" in meta: meta.pop("sitename")

    # Thêm title và description vào text (nếu có)
    norm_text = normalize_text(text)
    text = add_title_desc_to_text(norm_text, meta)

    # Cắt ngắn text nếu đầu vào quá dài
    text = cut_if_too_long(text, meta)

    print(CYAN,end=""); print(meta); print(RESET,end="")

    with open(json_filename, "wt") as f:
        f.write(json.dumps(meta, ensure_ascii = False))

    with open(text_filename, "wt") as f:
        f.write(text)

    get_clear_view(filename)
    get_llm_gen_contents(url, use_cache = False)

    return text


def get_clear_view(filename):
    # gọi gen_clear_view.py (chạy chậm) ở process riêng
    subprocess.run(f"nohup python3 gen_clear_view.py '{filename}' &", shell = True)
    import time; time.sleep(1) # chờ 1 giây


def cut_if_too_long(text, meta, max_words = config.text_max_words):
    words = text.split()

    if len(words) > max_words:

        words = words[ : max_words]
        threshold = len(" ".join(words))

        meta["text_cutoff"] = True
        meta["text_cutoff_len"] = threshold

        return text[ : threshold ]

    else:
        return text


def add_title_desc_to_text(text, meta):
    content = []
    title       = meta["title"]       if "title"       in meta else None
    description = meta["description"] if "description" in meta else None

    if title is not None and len(title) > 5:
        content.append(f"**title**: {title}")

    if description is not None and len(description) > 10:
        content.append(f"**description**: {description}")

    content.append(text)
    return "\n\n".join(content)



def normalize_text(text, get_noise_info = False):
    text = text.strip()
    chunks = re.split(r'\s*(?:\n\s*)+', text, flags = re.MULTILINE)
    text = "\n\n".join([ x for x in chunks if len(x) > 20 ])

    if get_noise_info:
        noise_len = 1
        total_len = 1

        max_noise = 0
        continuous_noise = 0
        for x in chunks:
            n = len(x)
            total_len += n

            if n < 80:
                noise_len += n
                continuous_noise += n
                if continuous_noise > max_noise:
                    max_noise = continuous_noise
            else:
                continuous_noise = 0

        noise_ratio = noise_len / total_len
        return text, noise_ratio, max_noise
    else:
        return text


def get_clean_view(url):
    url, filename = norm_url_and_gen_filename(url)
    clean_view_filename = filename + "__clean_view.txt"

    if os.path.exists(clean_view_filename):
        return open(clean_view_filename, "rt").read()
    else:
        return None


def get_meta(url):
    url, filename = norm_url_and_gen_filename(url)
    json_filename = filename + ".json"
    return json.load(open(json_filename))


TAGS = "keyphrases figures summary".split()
###
def get_llm_gen_contents(url, use_cache = True):
    url, filename = norm_url_and_gen_filename(url)
    json_filename = filename + ".json"
    text_filename = filename + ".txt"

    if os.path.exists(json_filename):
        meta = json.load(open(json_filename, "rt"))
        
        generated = ( "llm_generated" in meta )

        if not use_cache or not generated:

            text = open(text_filename, "rt").read()
            marked_text, chunks = add_chunk_markers(text, para = True)

            raw = extract_keyphrases_figures_summary(marked_text)
            result = extract_xmls(raw, TAGS)

            result["raw"] = raw
            meta["llm_generated"] = result

            with open(json_filename, "wt") as f:
                f.write(json.dumps(meta, ensure_ascii = False))

        return meta["llm_generated"]

    else:
        return {
            "summary": "Tóm tắt nội dung ... văn bản nói về ...",
            "keyphrases": ["keywords 1", "keywords 2", "keywords 3"]
        }


default_urls_input = """

https://thuvienphapluat.vn/phap-luat/ho-so-dien-tu-thuc-hien-thu-tuc-hanh-chinh-la-gi-huong-dan-chuan-bi-va-nop-ho-so-dien-tu-khi-thuc-h-155754-140107.html

https://video.vnexpress.net/bon-ngay-chong-choi-lu-ngap-gan-3-m-cua-nguoi-dan-thai-nguyen-4791440.html

http://danvan.vn/Home/Tin-hoat-dong/Ban-dan-van/18706/Ban-Dan-van-Trung-uong-va-Hoi-Chu-thap-do-Viet-Nam-tham-tang-qua-nhan-dan-bi-anh-huong-bao-so-3-tai-Thai-Nguyen

https://baodauthau.vn/thai-nguyen-144-ty-dong-nang-cap-duong-cach-mang-thang-8-tp-song-cong-post164486.html

https://baothainguyen.vn/chinh-tri/202409/chu-tich-quoc-hoi-tran-thanh-man-lam-viec-voi-tinh-thai-nguyen-ve-cong-tackhac-phuc-hau-qua-bao-so-3-3f9253f/

https://baothainguyen.vn/giao-duc/202409/dam-bao-dieu-kien-de-hoc-sinh-tro-lai-truong-cham-nhat-ngay-16-9-9742985/

https://baothainguyen.vn/tai-nguyen-moi-truong/202409/khu-khuan-dien-rong-nhung-vung-bi-ngap-lut-tai-tp-thai-nguyen-585273d/

https://baothainguyen.vn/thoi-su-thai-nguyen/202409/dien-luc-tp-thai-nguyen-no-luccap-dien-tro-lai-cho-tren-2000-hotrong-ngay-12-9-da21a20/

https://baothainguyen.vn/xa-hoi/202409/tao-sinh-ke-giam-ngheo-vung-dong-bao-dan-toc-thieu-so-b8f041c/

https://baotintuc.vn/xa-hoi/ngap-ung-va-thiet-hai-tren-202000-ha-lua-20240913095621343.htm

https://daidoanket.vn/thai-nguyen-hai-nguoi-tu-vong-thiet-hai-hon-600-ty-dong-do-bao-yagi-10290104.html

https://dangcongsan.vn/xay-dung-dang/thai-nguyen-cong-bo-cac-quyet-dinh-ve-cong-tac-can-bo-677747.html

https://danviet.vn/62-y-bac-si-cua-binh-dinh-den-thai-nguyen-yen-bai-quyet-tam-cung-dong-bao-vuot-qua-kho-khan-20240913101402511.htm

https://laodong.vn/thoi-su/chu-tich-quoc-hoi-kiem-tra-cong-tac-khac-phuc-hau-qua-mua-lu-o-thai-nguyen-1393445.ldo

https://nhandan.vn/anh-chu-tich-quoc-hoi-tran-thanh-man-kiem-tra-cong-tac-khac-phuc-hau-qua-bao-so-3-tai-tinh-thai-nguyen-post830447.html

https://nld.com.vn/toi-7-gio-13-9-336-nguoi-chet-va-mat-tich-hon-130-ngan-nguoi-dan-phai-di-doi-do-bao-lu-196240913101124546.htm

https://phunuvietnam.vn/thai-nguyen-hoi-vien-phu-nu-chung-tay-khac-phuc-hau-qua-ngap-lut-20240912154801867.htm

https://phunuvietnam.vn/thai-nguyen-trien-khai-cong-tac-phong-chong-dich-sau-thien-tai-20240912174641866.htm

https://thainguyen.dcs.vn/hoat-dong-cua-cac-dang-bo/dang-bo-tp-thai-nguyen/hoi-nghi-ban-thuong-vu-thanh-uy-thai-nguyen-lan-thu-102-857.html

https://thainguyen.dms.gov.vn/tin-chi-tiet/-/chi-tiet/thai-nguyen-%C4%91am-bao-nguon-hang-hoa-phuc-vu-nhan-dan-89820-1404.html

https://thuonghieucongluan.com.vn/thai-nguyen-tiep-nhan-5-tan-gao-ho-tro-nhan-dan-bi-anh-huong-ngap-lut-a235642.html

https://tienphong.vn/nam-thanh-nien-o-thai-nguyen-bi-lu-cuon-khi-di-bat-ca-post1672693.tpo

https://tienphong.vn/ngan-hang-dau-tien-cong-bo-giam-lai-suat-cho-vay-sau-bao-so-3-post1672728.tpo

https://tuoitre.vn/chu-tich-quoc-hoi-tran-thanh-man-trao-30-ti-dong-ho-tro-khac-phuc-bao-lu-tai-thai-nguyen-20240912191724375.htm

https://tuoitre.vn/sau-lu-nguoi-dan-thai-nguyen-noi-chua-bao-gio-bun-ngap-nhieu-den-vay-202409121653144.htm

https://vietnamnet.vn/muc-nuoc-song-cau-o-thai-nguyen-giam-dan-nguoi-dan-tat-bat-don-dep-sau-lu-2321461.html

https://vtcnews.vn/trieu-nu-cuoi-huong-ve-thai-nguyen-sau-con-bao-ar895714.html

""".strip()

default_urls_input = """

https://vnexpress.net/sam-altman-ai-thong-minh-hon-con-nguoi-trong-vai-nghin-ngay-toi-4796649.html

https://vnexpress.net/may-tram-chay-ai-gia-tram-trieu-dong-tai-viet-nam-4796490.html

https://www.vngcloud.vn/blog/what-are-large-language-models

https://arxiv.org/html/2408.16737v1

https://arxiv.org/html/2409.15700v1

https://arxiv.org/html/2409.09916v1

https://arxiv.org/html/2409.06903v1

https://arxiv.org/html/2409.12558v1

https://arxiv.org/html/2409.10516v2

https://rlhflow.github.io/posts/2024-05-29-multi-objective-reward-modeling

https://arxiv.org/html/2405.07863v2

https://arxiv.org/html/2406.12845

""".strip()