Spaces:
Build error
Build error
File size: 4,326 Bytes
e62781a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
#!/usr/bin/env python3
# Copyright 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
"""Various retriever utilities."""
import regex
import unicodedata
import numpy as np
import scipy.sparse as sp
from sklearn.utils import murmurhash3_32
# ------------------------------------------------------------------------------
# Sparse matrix saving/loading helpers.
# ------------------------------------------------------------------------------
def save_sparse_csr(filename, matrix, metadata=None):
data = {
'data': matrix.data,
'indices': matrix.indices,
'indptr': matrix.indptr,
'shape': matrix.shape,
'metadata': metadata,
}
np.savez(filename, **data)
def load_sparse_csr(filename):
loader = np.load(filename, allow_pickle=True)
matrix = sp.csr_matrix((loader['data'], loader['indices'],
loader['indptr']), shape=loader['shape'])
return matrix, loader['metadata'].item(0) if 'metadata' in loader else None
# ------------------------------------------------------------------------------
# Token hashing.
# ------------------------------------------------------------------------------
def hash(token, num_buckets):
"""Unsigned 32 bit murmurhash for feature hashing."""
return murmurhash3_32(token, positive=True) % num_buckets
# ------------------------------------------------------------------------------
# Text cleaning.
# ------------------------------------------------------------------------------
STOPWORDS = {
'i', 'me', 'my', 'myself', 'we', 'our', 'ours', 'ourselves', 'you', 'your',
'yours', 'yourself', 'yourselves', 'he', 'him', 'his', 'himself', 'she',
'her', 'hers', 'herself', 'it', 'its', 'itself', 'they', 'them', 'their',
'theirs', 'themselves', 'what', 'which', 'who', 'whom', 'this', 'that',
'these', 'those', 'am', 'is', 'are', 'was', 'were', 'be', 'been', 'being',
'have', 'has', 'had', 'having', 'do', 'does', 'did', 'doing', 'a', 'an',
'the', 'and', 'but', 'if', 'or', 'because', 'as', 'until', 'while', 'of',
'at', 'by', 'for', 'with', 'about', 'against', 'between', 'into', 'through',
'during', 'before', 'after', 'above', 'below', 'to', 'from', 'up', 'down',
'in', 'out', 'on', 'off', 'over', 'under', 'again', 'further', 'then',
'once', 'here', 'there', 'when', 'where', 'why', 'how', 'all', 'any',
'both', 'each', 'few', 'more', 'most', 'other', 'some', 'such', 'no', 'nor',
'not', 'only', 'own', 'same', 'so', 'than', 'too', 'very', 's', 't', 'can',
'will', 'just', 'don', 'should', 'now', 'd', 'll', 'm', 'o', 're', 've',
'y', 'ain', 'aren', 'couldn', 'didn', 'doesn', 'hadn', 'hasn', 'haven',
'isn', 'ma', 'mightn', 'mustn', 'needn', 'shan', 'shouldn', 'wasn', 'weren',
'won', 'wouldn', "'ll", "'re", "'ve", "n't", "'s", "'d", "'m", "''", "``"
}
def normalize(text):
"""Resolve different type of unicode encodings."""
return unicodedata.normalize('NFD', text)
def filter_word(text):
"""Take out english stopwords, punctuation, and compound endings."""
text = normalize(text)
if regex.match(r'^\p{P}+$', text):
return True
if text.lower() in STOPWORDS:
return True
return False
def filter_ngram(gram, mode='any'):
"""Decide whether to keep or discard an n-gram.
Args:
gram: list of tokens (length N)
mode: Option to throw out ngram if
'any': any single token passes filter_word
'all': all tokens pass filter_word
'ends': book-ended by filterable tokens
"""
filtered = [filter_word(w) for w in gram]
if mode == 'any':
return any(filtered)
elif mode == 'all':
return all(filtered)
elif mode == 'ends':
return filtered[0] or filtered[-1]
else:
raise ValueError('Invalid mode: %s' % mode)
def get_field(d, field_list):
"""get the subfield associated to a list of elastic fields
E.g. ['file', 'filename'] to d['file']['filename']
"""
if isinstance(field_list, str):
return d[field_list]
else:
idx = d.copy()
for field in field_list:
idx = idx[field]
return idx
|