Datasets:
CZLC
/

Modalities:
Tabular
Text
Formats:
json
Languages:
Czech
Libraries:
Datasets
pandas
License:
sumeczech_downsampled / downloader_extractor.py
mdocekal's picture
Upload folder using huggingface_hub
40b0307 verified
#!/usr/bin/env python3
#
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
#
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import hashlib
from downloader_extractor_utils import *
URL = 'url'
HEADLINE = 'headline'
ABSTRACT = 'abstract'
TEXT = 'text'
SECTION = 'section'
SUBDOMAIN = 'subdomain'
FILENAME = 'filename'
OFFSET = 'offset'
LENGTH = 'length'
PUBLISHED = 'published'
DATASET = 'dataset'
class Extractor(object):
@staticmethod
def extract_document(input_document):
parsed_content = Extractor._parse(input_document)
Extractor._add_checksum(parsed_content)
return parsed_content
@staticmethod
def _parse(entry_dict):
# Parse, ignoring exceptions
parsed_document = Extractor._parse_entry(entry_dict)
# normalize spacing and quotes, and additional replacements
for section_key in [HEADLINE, ABSTRACT, TEXT]:
parsed_document[section_key].encode()
parsed_document[section_key] = parsed_document[section_key].replace('\r', '').replace('&nbsp;', ' ')
parsed_document[section_key] = re.sub(r'\[celá zpráva]', ' ', parsed_document[section_key], 0, re.I)
if section_key in [HEADLINE, ABSTRACT]: parsed_document[section_key] = parsed_document[section_key].replace('\n', ' ')
parsed_document[section_key] = re.sub(r'[ \t\xA0\u2028]+', ' ', parsed_document[section_key].strip())
parsed_document[section_key] = re.sub(r'[ ]*\n[ ]*', '\n', parsed_document[section_key])
parsed_document[section_key] = re.sub(r"['`‚‘’]{1,2}", '"', parsed_document[section_key])
parsed_document[section_key] = re.sub(r"[„“]", '"', parsed_document[section_key])
return parsed_document
@staticmethod
def _parse_entry(contents):
"""Parse one commoncrawl JSON.
Return:
- error status
- text
- abstract
- headline
- subdomain (i.e., domaci.novinky.cz)
More specifically:
return status, url, abstract_len, document_len, headline, abstract, document, section, subdomain, filename
"""
url = contents['url']
parse = urlparse(url)
domain = '.'.join(parse.netloc.rsplit('.', maxsplit=2)[-2:])
subdomain = parse.netloc.replace('www.', '')
if domain == subdomain:
section = (parse.path.split('/') + [''])[1]
if not section.isalnum():
section = subdomain
else:
section = subdomain
if 'blog' in section:
section = 'blogs'
# get domain/subdomain settings
# some subdomains require specific settings
# if not, use generic settings for the domain
domain_settings = domain_settings_dict.get(subdomain, domain_settings_dict.get(domain, None))
# parse
# The non-UTF8 pages sometimes are encoded in UTF-8 -- try it first,
# and fallback to the non-UTF8 encoding
try:
warc = contents['content'].encode('latin-1').decode('utf-8')
except:
warc = contents['content'].encode('latin-1').decode(domain_settings.encoding)
html = warc.split('\r\n\r\n', maxsplit=2)[-1].replace('\r', '')
soup = BeautifulSoup(html, 'html.parser')
# replace br by a newline
for br in soup('br'):
br.replace_with('\n')
# get headline
headline_text = domain_settings.headline_extractor(soup)
# get abstract
abstract_text = domain_settings.abstract_extractor(soup)
# get document
document_text = domain_settings.document_extractor(soup, domain)
published = domain_settings.date_extractor(soup)
if published is None: published = ""
return {URL: url,
HEADLINE: headline_text,
ABSTRACT: abstract_text,
TEXT: document_text,
SECTION: section,
SUBDOMAIN: subdomain,
FILENAME: contents[FILENAME],
OFFSET: contents[OFFSET],
LENGTH: contents[LENGTH],
PUBLISHED: published,
DATASET: contents[DATASET]}
@staticmethod
def _add_checksum(json_data):
json_data_for_checksum = {}
for field in ["headline", "abstract", "text", "section", "subdomain", "published", "url"]:
json_data_for_checksum[field] = json_data[field]
string_for_checksum = json.dumps(json_data_for_checksum,
ensure_ascii=True,
sort_keys=True,
indent=None,
separators=(",", ":"))
json_data['md5'] = hashlib.md5(string_for_checksum.encode('utf-8')).hexdigest()
return json_data