Datasets:
File size: 5,241 Bytes
40b0307 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 |
#!/usr/bin/env python3
#
# This file is part of SumeCzech corpus <http://hdl.handle.net/11234/1-2615>.
#
# Copyright 2018 Institute of Formal and Applied Linguistics, Faculty of
# Mathematics and Physics, Charles University in Prague, Czech Republic.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import json
import hashlib
from downloader_extractor_utils import *
URL = 'url'
HEADLINE = 'headline'
ABSTRACT = 'abstract'
TEXT = 'text'
SECTION = 'section'
SUBDOMAIN = 'subdomain'
FILENAME = 'filename'
OFFSET = 'offset'
LENGTH = 'length'
PUBLISHED = 'published'
DATASET = 'dataset'
class Extractor(object):
@staticmethod
def extract_document(input_document):
parsed_content = Extractor._parse(input_document)
Extractor._add_checksum(parsed_content)
return parsed_content
@staticmethod
def _parse(entry_dict):
# Parse, ignoring exceptions
parsed_document = Extractor._parse_entry(entry_dict)
# normalize spacing and quotes, and additional replacements
for section_key in [HEADLINE, ABSTRACT, TEXT]:
parsed_document[section_key].encode()
parsed_document[section_key] = parsed_document[section_key].replace('\r', '').replace(' ', ' ')
parsed_document[section_key] = re.sub(r'\[celá zpráva]', ' ', parsed_document[section_key], 0, re.I)
if section_key in [HEADLINE, ABSTRACT]: parsed_document[section_key] = parsed_document[section_key].replace('\n', ' ')
parsed_document[section_key] = re.sub(r'[ \t\xA0\u2028]+', ' ', parsed_document[section_key].strip())
parsed_document[section_key] = re.sub(r'[ ]*\n[ ]*', '\n', parsed_document[section_key])
parsed_document[section_key] = re.sub(r"['`‚‘’]{1,2}", '"', parsed_document[section_key])
parsed_document[section_key] = re.sub(r"[„“]", '"', parsed_document[section_key])
return parsed_document
@staticmethod
def _parse_entry(contents):
"""Parse one commoncrawl JSON.
Return:
- error status
- text
- abstract
- headline
- subdomain (i.e., domaci.novinky.cz)
More specifically:
return status, url, abstract_len, document_len, headline, abstract, document, section, subdomain, filename
"""
url = contents['url']
parse = urlparse(url)
domain = '.'.join(parse.netloc.rsplit('.', maxsplit=2)[-2:])
subdomain = parse.netloc.replace('www.', '')
if domain == subdomain:
section = (parse.path.split('/') + [''])[1]
if not section.isalnum():
section = subdomain
else:
section = subdomain
if 'blog' in section:
section = 'blogs'
# get domain/subdomain settings
# some subdomains require specific settings
# if not, use generic settings for the domain
domain_settings = domain_settings_dict.get(subdomain, domain_settings_dict.get(domain, None))
# parse
# The non-UTF8 pages sometimes are encoded in UTF-8 -- try it first,
# and fallback to the non-UTF8 encoding
try:
warc = contents['content'].encode('latin-1').decode('utf-8')
except:
warc = contents['content'].encode('latin-1').decode(domain_settings.encoding)
html = warc.split('\r\n\r\n', maxsplit=2)[-1].replace('\r', '')
soup = BeautifulSoup(html, 'html.parser')
# replace br by a newline
for br in soup('br'):
br.replace_with('\n')
# get headline
headline_text = domain_settings.headline_extractor(soup)
# get abstract
abstract_text = domain_settings.abstract_extractor(soup)
# get document
document_text = domain_settings.document_extractor(soup, domain)
published = domain_settings.date_extractor(soup)
if published is None: published = ""
return {URL: url,
HEADLINE: headline_text,
ABSTRACT: abstract_text,
TEXT: document_text,
SECTION: section,
SUBDOMAIN: subdomain,
FILENAME: contents[FILENAME],
OFFSET: contents[OFFSET],
LENGTH: contents[LENGTH],
PUBLISHED: published,
DATASET: contents[DATASET]}
@staticmethod
def _add_checksum(json_data):
json_data_for_checksum = {}
for field in ["headline", "abstract", "text", "section", "subdomain", "published", "url"]:
json_data_for_checksum[field] = json_data[field]
string_for_checksum = json.dumps(json_data_for_checksum,
ensure_ascii=True,
sort_keys=True,
indent=None,
separators=(",", ":"))
json_data['md5'] = hashlib.md5(string_for_checksum.encode('utf-8')).hexdigest()
return json_data
|