text
stringlengths 0
15.3k
|
---|
JANITOR_CPP = True |
except Exception: |
print('WARNING: C++ module could not be loaded. Janitor running in python mode') |
traceback.print_exc() |
JANITOR_CPP = False |
T = TypeVar('T') |
def form_ngrams(sequence: Iterator[T], n: int) -> Iterator[Tuple[T, ...]]: |
history = [] |
while n > 1: |
try: |
next_item = next(sequence) |
except StopIteration: |
return |
history.append(next_item) |
n -= 1 |
for item in sequence: |
history.append(item) |
yield tuple(history) |
del history[0] |
def word_ngrams(s: str, n: int) -> Iterator[str]: |
tokens = s.split() |
ngram_seqs = form_ngrams(iter(tokens), n) |
return (' '.join(ngram) for ngram in ngram_seqs) |
def split_indices(s: str) -> Iterator[Tuple[str, Tuple[int, int]]]: |
return ((m.group(0), (m.start(), m.end() - 1)) for m in re.finditer('\\S+', s)) |
def word_ngrams_indices(s: str, n: int) -> Iterator[Tuple[str, Tuple[int, int]]]: |
tokens_with_indices = split_indices(s) |
ngram_seqs_with_indices = form_ngrams(tokens_with_indices, n) |
ngram_indices_pairs = (zip(*ngram_with_indices) for ngram_with_indices in ngram_seqs_with_indices) |
return ((' '.join(ngram_seq), (indices[0][0], indices[-1][1])) for (ngram_seq, indices) in ngram_indices_pairs) |
class Janitor: |
def __init__(self, ngram_n: int=13, window_to_remove: int=200, too_dirty_cutoff: int=10, minimum_slice_length: int=200, delete_chars: str=string.punctuation) -> None: |
self.ngram_n = ngram_n |
self.window_to_remove = window_to_remove |
self.too_dirty_cutoff = too_dirty_cutoff |
self.minimum_slice_length = minimum_slice_length |
self.delete_chars = delete_chars |
self.dirt_ngrams = set() |
self.translation_table = str.maketrans(string.ascii_lowercase + string.ascii_uppercase, string.ascii_lowercase * 2, self.delete_chars) |
def save_contamination_ngrams(self, filename: str) -> None: |
with open(filename, 'wb') as fp: |
pickle.dump(filename, fp) |
def load_contamination_ngrams(self, filename: str) -> None: |
with open(filename, 'rb') as fp: |
self.dirt_ngrams = pickle.load(fp) |
def register_contaminant(self, dirt_string: str) -> None: |
if JANITOR_CPP: |
return self.register_contaminant_cpp(dirt_string) |
else: |
print('WARNING: Janitor running in python mode') |
return self.register_contaminant_python(dirt_string) |
def clean(self, dirty_string: str) -> List[str]: |
if JANITOR_CPP: |
return self.clean_cpp(dirty_string) |
else: |
print('WARNING: Janitor running in python mode') |
return self.clean_python(dirty_string) |
def _split_chunks(self, dirty_string: str, dirty_parts: Sequence[Tuple]) -> List[str]: |
clean_chunks = [] |
splice_idx = 0 |
end = -1 |
for (i, (ngram, start, end)) in enumerate(dirty_parts): |
if i >= self.too_dirty_cutoff: |
return [] |
start = max(0, start - self.window_to_remove) |
end = min(len(dirty_string), end + self.window_to_remove) |
if start - splice_idx > self.minimum_slice_length: |
clean_chunks.append(dirty_string[splice_idx:start]) |
splice_idx = end |
if end < len(dirty_string) - self.minimum_slice_length: |
clean_chunks.append(dirty_string[end + 1:]) |
return clean_chunks |
def register_contaminant_cpp(self, dirt_string) -> None: |
self.dirt_ngrams.update(janitor_util.clean_ngram(dirt_string, self.delete_chars, self.ngram_n)) |
def clean_cpp(self, dirty_string: str) -> List[str]: |
contamination_indices = janitor_util.clean_ngram_with_indices(dirty_string, self.delete_chars, self.ngram_n) |
return self._split_chunks(dirty_string, contamination_indices) |
def normalize_string(self, s: str) -> str: |
return s.translate(self.translation_table) |
def register_contaminant_python(self, dirt_string: str) -> None: |
self.dirt_ngrams.update(word_ngrams(self.normalize_string(dirt_string), self.ngram_n)) |
def clean_python(self, dirty_string: str) -> List[str]: |
contamination_indices = ((None, *idx_pair) for (dirty_ngram, idx_pair) in word_ngrams_indices(dirty_string, self.ngram_n) if self.normalize_string(dirty_ngram) in self.dirt_ngrams) |
return self._split_chunks(dirty_string, contamination_indices) |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.