desc
stringlengths 3
26.7k
| decl
stringlengths 11
7.89k
| bodies
stringlengths 8
553k
|
---|---|---|
'Recalculates the post_count and topic_count in the forum.
Returns the forum with the recounted stats.
:param last_post: If set to ``True`` it will also try to update
the last post columns in the forum.'
| def recalculate(self, last_post=False):
| topic_count = Topic.query.filter_by(forum_id=self.id).count()
post_count = Post.query.filter((Post.topic_id == Topic.id), (Topic.forum_id == self.id)).count()
self.topic_count = topic_count
self.post_count = post_count
if last_post:
self.update_last_post()
self.save()
return self
|
'Saves a forum
:param moderators: If given, it will update the moderators in this
forum with the given iterable of user objects.
:param groups: A list with group objects.'
| def save(self, groups=None):
| if self.id:
db.session.merge(self)
else:
if (groups is None):
from flaskbb.user.models import Group
self.groups = Group.query.order_by(Group.name.asc()).all()
db.session.add(self)
db.session.commit()
return self
|
'Deletes forum. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects'
| def delete(self, users=None):
| db.session.delete(self)
db.session.commit()
ForumsRead.query.filter_by(forum_id=self.id).delete()
TopicsRead.query.filter_by(forum_id=self.id).delete()
if users:
users_list = []
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
users_list.append(user)
db.session.add_all(users_list)
db.session.commit()
return self
|
'Moves a bunch a topics to the forum. Returns ``True`` if all
topics were moved successfully to the forum.
:param topics: A iterable with topic objects.'
| def move_topics_to(self, topics):
| status = False
for topic in topics:
status = topic.move(self)
return status
|
'Returns the forum and forumsread object as a tuple for the user.
:param forum_id: The forum id
:param user: The user object is needed to check if we also need their
forumsread object.'
| @classmethod
def get_forum(cls, forum_id, user):
| if user.is_authenticated:
(forum, forumsread) = Forum.query.filter((Forum.id == forum_id)).options(db.joinedload('category')).outerjoin(ForumsRead, db.and_((ForumsRead.forum_id == Forum.id), (ForumsRead.user_id == user.id))).add_entity(ForumsRead).first_or_404()
else:
forum = Forum.query.filter((Forum.id == forum_id)).first_or_404()
forumsread = None
return (forum, forumsread)
|
'Get the topics for the forum. If the user is logged in,
it will perform an outerjoin for the topics with the topicsread and
forumsread relation to check if it is read or unread.
:param forum_id: The forum id
:param user: The user object
:param page: The page whom should be loaded
:param per_page: How many topics per page should be shown'
| @classmethod
def get_topics(cls, forum_id, user, page=1, per_page=20):
| if user.is_authenticated:
topics = Topic.query.filter_by(forum_id=forum_id).outerjoin(TopicsRead, db.and_((TopicsRead.topic_id == Topic.id), (TopicsRead.user_id == user.id))).add_entity(TopicsRead).order_by(Topic.important.desc(), Topic.last_updated.desc()).paginate(page, per_page, True)
else:
topics = Topic.query.filter_by(forum_id=forum_id).order_by(Topic.important.desc(), Topic.last_updated.desc()).paginate(page, per_page, True)
topics.items = [(topic, None) for topic in topics.items]
return topics
|
'Returns a slugified version from the category title'
| @property
def slug(self):
| return slugify(self.title)
|
'Returns the slugified url for the category'
| @property
def url(self):
| return url_for('forum.view_category', category_id=self.id, slug=self.slug)
|
'Set to a unique key specific to the object in the database.
Required for cache.memoize() to work across requests.'
| def __repr__(self):
| return '<{} {}>'.format(self.__class__.__name__, self.id)
|
'Deletes a category. If a list with involved user objects is passed,
it will also update their post counts
:param users: A list with user objects'
| def delete(self, users=None):
| db.session.delete(self)
db.session.commit()
if users:
for user in users:
user.post_count = Post.query.filter_by(user_id=user.id).count()
db.session.commit()
return self
|
'Get all categories with all associated forums.
It returns a list with tuples. Those tuples are containing the category
and their associated forums (whose are stored in a list).
For example::
[(<Category 1>, [(<Forum 2>, <ForumsRead>), (<Forum 1>, None)]),
(<Category 2>, [(<Forum 3>, None), (<Forum 4>, None)])]
:param user: The user object is needed to check if we also need their
forumsread object.'
| @classmethod
def get_all(cls, user):
| from flaskbb.user.models import Group
if user.is_authenticated:
user_groups = [gr.id for gr in user.groups]
user_forums = Forum.query.filter(Forum.groups.any(Group.id.in_(user_groups))).subquery()
forum_alias = aliased(Forum, user_forums)
forums = cls.query.join(forum_alias, (cls.id == forum_alias.category_id)).outerjoin(ForumsRead, db.and_((ForumsRead.forum_id == forum_alias.id), (ForumsRead.user_id == user.id))).add_entity(forum_alias).add_entity(ForumsRead).order_by(Category.position, Category.id, forum_alias.position).all()
else:
guest_group = Group.get_guest_group()
guest_forums = Forum.query.filter(Forum.groups.any((Group.id == guest_group.id))).subquery()
forum_alias = aliased(Forum, guest_forums)
forums = cls.query.join(forum_alias, (cls.id == forum_alias.category_id)).add_entity(forum_alias).order_by(Category.position, Category.id, forum_alias.position).all()
return get_categories_and_forums(forums, user)
|
'Get the forums for the category.
It returns a tuple with the category and the forums with their
forumsread object are stored in a list.
A return value can look like this for a category with two forums::
(<Category 1>, [(<Forum 1>, None), (<Forum 2>, None)])
:param category_id: The category id
:param user: The user object is needed to check if we also need their
forumsread object.'
| @classmethod
def get_forums(cls, category_id, user):
| from flaskbb.user.models import Group
if user.is_authenticated:
user_groups = [gr.id for gr in user.groups]
user_forums = Forum.query.filter(Forum.groups.any(Group.id.in_(user_groups))).subquery()
forum_alias = aliased(Forum, user_forums)
forums = cls.query.filter((cls.id == category_id)).join(forum_alias, (cls.id == forum_alias.category_id)).outerjoin(ForumsRead, db.and_((ForumsRead.forum_id == forum_alias.id), (ForumsRead.user_id == user.id))).add_entity(forum_alias).add_entity(ForumsRead).order_by(forum_alias.position).all()
else:
guest_group = Group.get_guest_group()
guest_forums = Forum.query.filter(Forum.groups.any((Group.id == guest_group.id))).subquery()
forum_alias = aliased(Forum, guest_forums)
forums = cls.query.filter((cls.id == category_id)).join(forum_alias, (cls.id == forum_alias.category_id)).add_entity(forum_alias).order_by(forum_alias.position).all()
if (not forums):
abort(404)
return get_forums(forums, user)
|
'Is ``True`` if the Plugin **can** be installed.'
| @property
def has_settings(self):
| if (self.settings_key is not None):
return True
return False
|
'Is ``True`` if the Plugin **can** be uninstalled.'
| @property
def uninstallable(self):
| warnings.warn('self.uninstallable is deprecated. Use self.installed instead.', FlaskBBPluginDeprecationWarning)
return self.installed
|
'Registers a blueprint.
:param blueprint: The blueprint which should be registered.'
| def register_blueprint(self, blueprint, **kwargs):
| current_app.register_blueprint(blueprint, **kwargs)
|
'Returns a pair of booleans:
* Is the topic locked?
* Is the forum the topic belongs to locked?
Except in the case of a topic instance being provided to the
constructor, all of these tuples are SQLA KeyedTuples.'
| def _determine_locked(self, request):
| if (self._topic is not None):
return (self._topic.locked, self._topic.forum.locked)
elif (self._post is not None):
return (self._post.topic.locked, self._post.topic.forum.locked)
elif (self._topic_id is not None):
return Topic.query.join(Forum, (Forum.id == Topic.forum_id)).filter((Topic.id == self._topic_id)).with_entities(Topic.locked, Forum.locked).first()
else:
return self._get_topic_from_request(request)
|
'Saves the object to the database.'
| def save(self):
| db.session.add(self)
db.session.commit()
return self
|
'Delete the object from the database.'
| def delete(self):
| db.session.delete(self)
db.session.commit()
return self
|
'Way into the database.'
| def process_bind_param(self, value, dialect):
| if (value is not None):
if (dialect.name in ('sqlite', 'mysql')):
return value.replace(tzinfo=None)
return value.astimezone(pytz.UTC)
|
'Way out of the database.'
| def process_result_value(self, value, dialect):
| if ((dialect.name in ('sqlite', 'mysql')) and (value is not None)):
return value.replace(tzinfo=pytz.UTC)
return value
|
'Performs the actual validation.'
| def _validate_recaptcha(self, response, remote_addr):
| try:
private_key = flaskbb_config['RECAPTCHA_PRIVATE_KEY']
except KeyError:
raise RuntimeError('No RECAPTCHA_PRIVATE_KEY config set')
data = url_encode({'secret': private_key, 'remoteip': remote_addr, 'response': response})
http_response = http.urlopen(RECAPTCHA_VERIFY_SERVER, to_bytes(data))
if (http_response.code != 200):
return False
json_resp = json.loads(to_unicode(http_response.read()))
if json_resp['success']:
return True
for error in json_resp.get('error-codes', []):
if (error in RECAPTCHA_ERROR_CODES):
raise ValidationError(RECAPTCHA_ERROR_CODES[error])
return False
|
'Returns the recaptcha input HTML.'
| def __call__(self, field, error=None, **kwargs):
| if (not flaskbb_config['RECAPTCHA_ENABLED']):
return
try:
public_key = flaskbb_config['RECAPTCHA_PUBLIC_KEY']
except KeyError:
raise RuntimeError('RECAPTCHA_PUBLIC_KEY config not set')
return self.recaptcha_html(public_key)
|
'Initialzes the widget.
:param years: The min year which should be chooseable.
Defatuls to ``1930``.'
| def __init__(self, years=range(1930, (datetime.utcnow().year + 1))):
| super(SelectBirthdayWidget, self).__init__()
self.FORMAT_CHOICES['%Y'] = [(x, str(x)) for x in years]
|
'Rendering paragraph tags. Like ``<p>`` with emoji support.'
| def paragraph(self, text):
| def emojify(match):
value = match.group(1)
if (value in EMOJIS):
filename = url_for('static', filename='emoji/{}'.format(EMOJIS[value]))
emoji = "<img class='{css}' alt='{alt}' src='{src}' />".format(css='emoji', alt=value, src=filename)
return emoji
return match.group(0)
def userify(match):
value = match.group(1)
user = "<a href='{url}'>@{user}</a>".format(url=url_for('user.profile', username=value, _external=False), user=value)
return user
text = _re_emoji.sub(emojify, text)
text = _re_user.sub(userify, text)
return ('<p>%s</p>\n' % text.strip(' '))
|
'Returns the correct gettext translations that should be used for
this request. This will never fail and return a dummy translation
object if used outside of the request or if a translation cannot be
found.'
| def get_translations(self):
| locale = get_locale()
cache = self.get_translations_cache()
translations = cache.get(str(locale))
if (translations is None):
translations = babel.support.Translations.load(dirname=self.flaskbb_translations, locales=locale, domain='messages')
if (not isinstance(translations, babel.support.Translations)):
return translations
for plugin in self.plugin_translations:
plugin_translation = babel.support.Translations.load(dirname=plugin, locales=locale, domain='messages')
translations.add(plugin_translation)
cache[str(locale)] = translations
return translations
|
'Return node list.
@rtype: list
@return: Node list.'
| @abstractmethod
def nodes(self):
| pass
|
'Return all edges in the graph.
@rtype: list
@return: List of all edges in the graph.'
| @abstractmethod
def edges(self):
| pass
|
'Return all nodes that are directly accessible from given node.
@type node: node
@param node: Node identifier
@rtype: list
@return: List of nodes directly accessible from given node.'
| @abstractmethod
def neighbors(self, node):
| pass
|
'Return whether the requested node exists.
@type node: node
@param node: Node identifier
@rtype: boolean
@return: Truth-value for node existence.'
| @abstractmethod
def has_node(self, node):
| pass
|
'Add given node to the graph.
@attention: While nodes can be of any type, it\'s strongly recommended
to use only numbers and single-line strings as node identifiers if you
intend to use write().
@type node: node
@param node: Node identifier.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value)
tuples.'
| @abstractmethod
def add_node(self, node, attrs=None):
| pass
|
'Add an edge to the graph connecting two nodes.
An edge, here, is a pair of nodes like C{(n, m)}.
@type edge: tuple
@param edge: Edge.
@type wt: number
@param wt: Edge weight.
@type label: string
@param label: Edge label.
@type attrs: list
@param attrs: List of node attributes specified as (attribute, value)
tuples.'
| @abstractmethod
def add_edge(self, edge, wt=1, label='', attrs=[]):
| pass
|
'Return whether an edge exists.
@type edge: tuple
@param edge: Edge.
@rtype: boolean
@return: Truth-value for edge existence.'
| @abstractmethod
def has_edge(self, edge):
| pass
|
'Get the weight of an edge.
@type edge: edge
@param edge: One edge.
@rtype: number
@return: Edge weight.'
| @abstractmethod
def edge_weight(self, edge):
| pass
|
'Remove a node from the graph.
@type node: node
@param node: Node identifier.'
| @abstractmethod
def del_node(self, node):
| pass
|
'Initialize the reader.
The `input` parameter refers to a file on the local filesystem,
which is expected to be in the UCI Bag-of-Words format.'
| def __init__(self, input):
| logger.info(('Initializing corpus reader from %s' % input))
self.input = input
with utils.smart_open(self.input) as fin:
self.num_docs = self.num_terms = self.num_nnz = 0
try:
self.num_docs = int(next(fin).strip())
self.num_terms = int(next(fin).strip())
self.num_nnz = int(next(fin).strip())
except StopIteration:
pass
logger.info(('accepted corpus with %i documents, %i features, %i non-zero entries' % (self.num_docs, self.num_terms, self.num_nnz)))
|
'Write blank header lines. Will be updated later, once corpus stats are known.'
| def write_headers(self):
| for _ in range(3):
self.fout.write(self.FAKE_HEADER)
self.last_docno = (-1)
self.headers_written = True
|
'Update headers with actual values.'
| def update_headers(self, num_docs, num_terms, num_nnz):
| offset = 0
values = [utils.to_utf8(str(n)) for n in [num_docs, num_terms, num_nnz]]
for value in values:
if (len(value) > len(self.FAKE_HEADER)):
raise ValueError('Invalid header: value too large!')
self.fout.seek(offset)
self.fout.write(value)
offset += len(self.FAKE_HEADER)
|
'Interpret a matrix in UCI bag-of-words format as a streamed gensim corpus
(yielding one document at a time).'
| def __iter__(self):
| for (docId, doc) in super(UciCorpus, self).__iter__():
(yield doc)
|
'Utility method to generate gensim-style Dictionary directly from
the corpus and vocabulary data.'
| def create_dictionary(self):
| dictionary = Dictionary()
dictionary.dfs = defaultdict(int)
dictionary.id2token = self.id2word
dictionary.token2id = dict(((v, k) for (k, v) in iteritems(self.id2word)))
dictionary.num_docs = self.num_docs
dictionary.num_nnz = self.num_nnz
for (docno, doc) in enumerate(self):
if ((docno % 10000) == 0):
logger.info(('PROGRESS: processing document %i of %i' % (docno, self.num_docs)))
for (word, count) in doc:
dictionary.dfs[word] += 1
dictionary.num_pos += count
return dictionary
|
'Save a corpus in the UCI Bag-of-Words format.
There are actually two files saved: `fname` and `fname.vocab`, where
`fname.vocab` is the vocabulary file.
This function is automatically called by `UciCorpus.serialize`; don\'t
call it directly, call `serialize` instead.'
| @staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=10000, metadata=False):
| if (id2word is None):
logger.info('no word id mapping provided; initializing from corpus')
id2word = utils.dict_from_corpus(corpus)
num_terms = len(id2word)
else:
num_terms = (1 + max(([(-1)] + id2word.keys())))
fname_vocab = utils.smart_extension(fname, '.vocab')
logger.info(('saving vocabulary of %i words to %s' % (num_terms, fname_vocab)))
with utils.smart_open(fname_vocab, 'wb') as fout:
for featureid in xrange(num_terms):
fout.write(utils.to_utf8(('%s\n' % id2word.get(featureid, '---'))))
logger.info(('storing corpus in UCI Bag-of-Words format: %s' % fname))
return UciWriter.write_corpus(fname, corpus, index=True, progress_cnt=progress_cnt)
|
'Interpret a matrix in Matrix Market format as a streamed gensim corpus
(yielding one document at a time).'
| def __iter__(self):
| for (doc_id, doc) in super(MmCorpus, self).__iter__():
(yield doc)
|
'Save a corpus in the Matrix Market format to disk.
This function is automatically called by `MmCorpus.serialize`; don\'t
call it directly, call `serialize` instead.'
| @staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False):
| logger.info(('storing corpus in Matrix Market format to %s' % fname))
num_terms = (len(id2word) if (id2word is not None) else None)
return matutils.MmWriter.write_corpus(fname, corpus, num_terms=num_terms, index=True, progress_cnt=progress_cnt, metadata=metadata)
|
'If `documents` are given, use them to initialize Dictionary (see `add_documents()`).'
| def __init__(self, documents=None, prune_at=2000000):
| self.token2id = {}
self.id2token = {}
self.dfs = {}
self.num_docs = 0
self.num_pos = 0
self.num_nnz = 0
if (documents is not None):
self.add_documents(documents, prune_at=prune_at)
|
'Return a list of all token ids.'
| def keys(self):
| return list(self.token2id.values())
|
'Return the number of token->id mappings in the dictionary.'
| def __len__(self):
| return len(self.token2id)
|
'Update dictionary from a collection of documents. Each document is a list
of tokens = **tokenized and normalized** strings (either utf8 or unicode).
This is a convenience wrapper for calling `doc2bow` on each document
with `allow_update=True`, which also prunes infrequent words, keeping the
total number of unique words <= `prune_at`. This is to save memory on very
large inputs. To disable this pruning, set `prune_at=None`.
>>> print(Dictionary(["máma mele maso".split(), "ema má máma".split()]))
Dictionary(5 unique tokens)'
| def add_documents(self, documents, prune_at=2000000):
| for (docno, document) in enumerate(documents):
if ((docno % 10000) == 0):
if ((prune_at is not None) and (len(self) > prune_at)):
self.filter_extremes(no_below=0, no_above=1.0, keep_n=prune_at)
logger.info('adding document #%i to %s', docno, self)
self.doc2bow(document, allow_update=True)
logger.info('built %s from %i documents (total %i corpus positions)', self, self.num_docs, self.num_pos)
|
'Convert `document` (a list of words) into the bag-of-words format = list
of `(token_id, token_count)` 2-tuples. Each word is assumed to be a
**tokenized and normalized** string (either unicode or utf8-encoded). No further preprocessing
is done on the words in `document`; apply tokenization, stemming etc. before
calling this method.
If `allow_update` is set, then also update dictionary in the process: create
ids for new words. At the same time, update document frequencies -- for
each word appearing in this document, increase its document frequency (`self.dfs`)
by one.
If `allow_update` is **not** set, this function is `const`, aka read-only.'
| def doc2bow(self, document, allow_update=False, return_missing=False):
| if isinstance(document, string_types):
raise TypeError('doc2bow expects an array of unicode tokens on input, not a single string')
counter = defaultdict(int)
for w in document:
counter[(w if isinstance(w, unicode) else unicode(w, 'utf-8'))] += 1
token2id = self.token2id
if (allow_update or return_missing):
missing = dict(((w, freq) for (w, freq) in iteritems(counter) if (w not in token2id)))
if allow_update:
for w in missing:
token2id[w] = len(token2id)
result = dict(((token2id[w], freq) for (w, freq) in iteritems(counter) if (w in token2id)))
if allow_update:
self.num_docs += 1
self.num_pos += sum(itervalues(counter))
self.num_nnz += len(result)
dfs = self.dfs
for tokenid in iterkeys(result):
dfs[tokenid] = (dfs.get(tokenid, 0) + 1)
result = sorted(iteritems(result))
if return_missing:
return (result, missing)
else:
return result
|
'Filter out tokens that appear in
1. less than `no_below` documents (absolute number) or
2. more than `no_above` documents (fraction of total corpus size, *not*
absolute number).
3. if tokens are given in keep_tokens (list of strings), they will be kept regardless of
the `no_below` and `no_above` settings
4. after (1), (2) and (3), keep only the first `keep_n` most frequent tokens (or
keep all if `None`).
After the pruning, shrink resulting gaps in word ids.
**Note**: Due to the gap shrinking, the same word may have a different
word id before and after the call to this function!'
| def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000, keep_tokens=None):
| no_above_abs = int((no_above * self.num_docs))
if keep_tokens:
keep_ids = [self.token2id[v] for v in keep_tokens if (v in self.token2id)]
good_ids = (v for v in itervalues(self.token2id) if ((no_below <= self.dfs.get(v, 0) <= no_above_abs) or (v in keep_ids)))
else:
good_ids = (v for v in itervalues(self.token2id) if (no_below <= self.dfs.get(v, 0) <= no_above_abs))
good_ids = sorted(good_ids, key=self.dfs.get, reverse=True)
if (keep_n is not None):
good_ids = good_ids[:keep_n]
bad_words = [(self[id], self.dfs.get(id, 0)) for id in set(self).difference(good_ids)]
logger.info('discarding %i tokens: %s...', (len(self) - len(good_ids)), bad_words[:10])
logger.info('keeping %i tokens which were in no less than %i and no more than %i (=%.1f%%) documents', len(good_ids), no_below, no_above_abs, (100.0 * no_above))
self.filter_tokens(good_ids=good_ids)
logger.info('resulting dictionary: %s', self)
|
'Filter out the \'remove_n\' most frequent tokens that appear in the documents.
After the pruning, shrink resulting gaps in word ids.
**Note**: Due to the gap shrinking, the same word may have a different
word id before and after the call to this function!'
| def filter_n_most_frequent(self, remove_n):
| most_frequent_ids = (v for v in itervalues(self.token2id))
most_frequent_ids = sorted(most_frequent_ids, key=self.dfs.get, reverse=True)
most_frequent_ids = most_frequent_ids[:remove_n]
most_frequent_words = [(self[id], self.dfs.get(id, 0)) for id in most_frequent_ids]
logger.info('discarding %i tokens: %s...', len(most_frequent_ids), most_frequent_words[:10])
self.filter_tokens(bad_ids=most_frequent_ids)
logger.info(('resulting dictionary: %s' % self))
|
'Remove the selected `bad_ids` tokens from all dictionary mappings, or, keep
selected `good_ids` in the mapping and remove the rest.
`bad_ids` and `good_ids` are collections of word ids to be removed.'
| def filter_tokens(self, bad_ids=None, good_ids=None):
| if (bad_ids is not None):
bad_ids = set(bad_ids)
self.token2id = dict(((token, tokenid) for (token, tokenid) in iteritems(self.token2id) if (tokenid not in bad_ids)))
self.dfs = dict(((tokenid, freq) for (tokenid, freq) in iteritems(self.dfs) if (tokenid not in bad_ids)))
if (good_ids is not None):
good_ids = set(good_ids)
self.token2id = dict(((token, tokenid) for (token, tokenid) in iteritems(self.token2id) if (tokenid in good_ids)))
self.dfs = dict(((tokenid, freq) for (tokenid, freq) in iteritems(self.dfs) if (tokenid in good_ids)))
self.compactify()
|
'Assign new word ids to all words.
This is done to make the ids more compact, e.g. after some tokens have
been removed via :func:`filter_tokens` and there are gaps in the id series.
Calling this method will remove the gaps.'
| def compactify(self):
| logger.debug('rebuilding dictionary, shrinking gaps')
idmap = dict(izip(itervalues(self.token2id), xrange(len(self.token2id))))
self.token2id = dict(((token, idmap[tokenid]) for (token, tokenid) in iteritems(self.token2id)))
self.id2token = {}
self.dfs = dict(((idmap[tokenid], freq) for (tokenid, freq) in iteritems(self.dfs)))
|
'Save this Dictionary to a text file, in format:
`num_docs`
`id[TAB]word_utf8[TAB]document frequency[NEWLINE]`. Sorted by word,
or by decreasing word frequency.
Note: text format should be use for corpus inspection. Use `save`/`load`
to store in binary format (pickle) for improved performance.'
| def save_as_text(self, fname, sort_by_word=True):
| logger.info('saving dictionary mapping to %s', fname)
with utils.smart_open(fname, 'wb') as fout:
numdocs_line = ('%d\n' % self.num_docs)
fout.write(utils.to_utf8(numdocs_line))
if sort_by_word:
for (token, tokenid) in sorted(iteritems(self.token2id)):
line = ('%i DCTB %s DCTB %i\n' % (tokenid, token, self.dfs.get(tokenid, 0)))
fout.write(utils.to_utf8(line))
else:
for (tokenid, freq) in sorted(iteritems(self.dfs), key=(lambda item: (- item[1]))):
line = ('%i DCTB %s DCTB %i\n' % (tokenid, self[tokenid], freq))
fout.write(utils.to_utf8(line))
|
'Merge another dictionary into this dictionary, mapping same tokens to the
same ids and new tokens to new ids. The purpose is to merge two corpora
created using two different dictionaries, one from `self` and one from `other`.
`other` can be any id=>word mapping (a dict, a Dictionary object, ...).
Return a transformation object which, when accessed as `result[doc_from_other_corpus]`,
will convert documents from a corpus built using the `other` dictionary
into a document using the new, merged dictionary (see :class:`gensim.interfaces.TransformationABC`).
Example:
>>> dict1 = Dictionary(some_documents)
>>> dict2 = Dictionary(other_documents) # ids not compatible with dict1!
>>> dict2_to_dict1 = dict1.merge_with(dict2)
>>> # now we can merge corpora from the two incompatible dictionaries into one
>>> merged_corpus = itertools.chain(some_corpus_from_dict1, dict2_to_dict1[some_corpus_from_dict2])'
| def merge_with(self, other):
| old2new = {}
for (other_id, other_token) in iteritems(other):
if (other_token in self.token2id):
new_id = self.token2id[other_token]
else:
new_id = len(self.token2id)
self.token2id[other_token] = new_id
self.dfs[new_id] = 0
old2new[other_id] = new_id
try:
self.dfs[new_id] += other.dfs[other_id]
except:
pass
try:
self.num_docs += other.num_docs
self.num_nnz += other.num_nnz
self.num_pos += other.num_pos
except:
pass
import gensim.models
return gensim.models.VocabTransform(old2new)
|
'Load a previously stored Dictionary from a text file.
Mirror function to `save_as_text`.'
| @staticmethod
def load_from_text(fname):
| result = Dictionary()
with utils.smart_open(fname) as f:
for (lineno, line) in enumerate(f):
line = utils.to_unicode(line)
if (lineno == 0):
if line.strip().isdigit():
result.num_docs = int(line.strip())
continue
else:
logging.warning('Text does not contain num_docs on the first line.')
try:
(wordid, word, docfreq) = line[:(-1)].split(' DCTB ')
except Exception:
raise ValueError(('invalid line in dictionary file %s: %s' % (fname, line.strip())))
wordid = int(wordid)
if (word in result.token2id):
raise KeyError(('token %s is defined as ID %d and as ID %d' % (word, wordid, result.token2id[word])))
result.token2id[word] = wordid
result.dfs[wordid] = int(docfreq)
return result
|
'Create Dictionary from an existing corpus. This can be useful if you only
have a term-document BOW matrix (represented by `corpus`), but not the
original text corpus.
This will scan the term-document count matrix for all word ids that
appear in it, then construct and return Dictionary which maps each
`word_id -> id2word[word_id]`.
`id2word` is an optional dictionary that maps the `word_id` to a token. In
case `id2word` isn\'t specified the mapping `id2word[word_id] = str(word_id)`
will be used.'
| @staticmethod
def from_corpus(corpus, id2word=None):
| result = Dictionary()
max_id = (-1)
for (docno, document) in enumerate(corpus):
if ((docno % 10000) == 0):
logger.info('adding document #%i to %s', docno, result)
result.num_docs += 1
result.num_nnz += len(document)
for (wordid, word_freq) in document:
max_id = max(wordid, max_id)
result.num_pos += word_freq
result.dfs[wordid] = (result.dfs.get(wordid, 0) + 1)
if (id2word is None):
result.token2id = dict(((unicode(i), i) for i in xrange((max_id + 1))))
else:
result.token2id = dict(((utils.to_unicode(token), id) for (id, token) in iteritems(id2word)))
for id in itervalues(result.token2id):
result.dfs[id] = result.dfs.get(id, 0)
logger.info('built %s from %i documents (total %i corpus positions)', result, result.num_docs, result.num_pos)
return result
|
'By default, keep track of debug statistics and mappings. If you find yourself
running out of memory (or are sure you don\'t need the debug info), set
`debug=False`.'
| def __init__(self, documents=None, id_range=32000, myhash=zlib.adler32, debug=True):
| self.myhash = myhash
self.id_range = id_range
self.debug = debug
self.token2id = {}
self.id2token = {}
self.dfs = {}
self.dfs_debug = {}
self.num_docs = 0
self.num_pos = 0
self.num_nnz = 0
self.allow_update = True
if (documents is not None):
self.add_documents(documents)
|
'Return all words that have mapped to the given id so far, as a set.
Only works if `self.debug` was enabled.'
| def __getitem__(self, tokenid):
| return self.id2token.get(tokenid, set())
|
'Calculate id of the given token. Also keep track of what words were mapped
to what ids, for debugging reasons.'
| def restricted_hash(self, token):
| h = (self.myhash(utils.to_utf8(token)) % self.id_range)
if self.debug:
self.token2id[token] = h
self.id2token.setdefault(h, set()).add(token)
return h
|
'Return the number of distinct ids = the entire dictionary size.'
| def __len__(self):
| return self.id_range
|
'Return a list of all token ids.'
| def keys(self):
| return range(len(self))
|
'Build dictionary from a collection of documents. Each document is a list
of tokens = **tokenized and normalized** utf-8 encoded strings.
This is only a convenience wrapper for calling `doc2bow` on each document
with `allow_update=True`.'
| def add_documents(self, documents):
| for (docno, document) in enumerate(documents):
if ((docno % 10000) == 0):
logger.info(('adding document #%i to %s' % (docno, self)))
_ = self.doc2bow(document, allow_update=True)
logger.info('built %s from %i documents (total %i corpus positions)', self, self.num_docs, self.num_pos)
|
'Convert `document` (a list of words) into the bag-of-words format = list
of `(token_id, token_count)` 2-tuples. Each word is assumed to be a
**tokenized and normalized** utf-8 encoded string. No further preprocessing
is done on the words in `document`; apply tokenization, stemming etc. before
calling this method.
If `allow_update` or `self.allow_update` is set, then also update dictionary
in the process: update overall corpus statistics and document frequencies.
For each id appearing in this document, increase its document frequency
(`self.dfs`) by one.'
| def doc2bow(self, document, allow_update=False, return_missing=False):
| result = {}
missing = {}
document = sorted(document)
for (word_norm, group) in itertools.groupby(document):
frequency = len(list(group))
tokenid = self.restricted_hash(word_norm)
result[tokenid] = (result.get(tokenid, 0) + frequency)
if self.debug:
self.dfs_debug[word_norm] = (self.dfs_debug.get(word_norm, 0) + 1)
if (allow_update or self.allow_update):
self.num_docs += 1
self.num_pos += len(document)
self.num_nnz += len(result)
if self.debug:
for tokenid in iterkeys(result):
self.dfs[tokenid] = (self.dfs.get(tokenid, 0) + 1)
result = sorted(iteritems(result))
if return_missing:
return (result, missing)
else:
return result
|
'Remove document frequency statistics for tokens that appear in
1. less than `no_below` documents (absolute number) or
2. more than `no_above` documents (fraction of total corpus size, *not*
absolute number).
3. after (1) and (2), keep only the first `keep_n` most frequent tokens (or
keep all if `None`).
**Note:** since HashDictionary\'s id range is fixed and doesn\'t depend on
the number of tokens seen, this doesn\'t really "remove" anything. It only
clears some supplementary statistics, for easier debugging and a smaller RAM
footprint.'
| def filter_extremes(self, no_below=5, no_above=0.5, keep_n=100000):
| no_above_abs = int((no_above * self.num_docs))
ok = [item for item in iteritems(self.dfs_debug) if (no_below <= item[1] <= no_above_abs)]
ok = frozenset((word for (word, freq) in sorted(ok, key=(lambda item: (- item[1])))[:keep_n]))
self.dfs_debug = dict(((word, freq) for (word, freq) in iteritems(self.dfs_debug) if (word in ok)))
self.token2id = dict(((token, tokenid) for (token, tokenid) in iteritems(self.token2id) if (token in self.dfs_debug)))
self.id2token = dict(((tokenid, set((token for token in tokens if (token in self.dfs_debug)))) for (tokenid, tokens) in iteritems(self.id2token)))
self.dfs = dict(((tokenid, freq) for (tokenid, freq) in iteritems(self.dfs) if self.id2token.get(tokenid, set())))
logger.info('kept statistics for which were in no less than %i and no more than %i (=%.1f%%) documents', no_below, no_above_abs, (100.0 * no_above))
|
'Save this HashDictionary to a text file, for easier debugging.
The format is:
`id[TAB]document frequency of this id[TAB]tab-separated set of words in UTF8 that map to this id[NEWLINE]`.
Note: use `save`/`load` to store in binary format instead (pickle).'
| def save_as_text(self, fname):
| logger.info(('saving HashDictionary mapping to %s' % fname))
with utils.smart_open(fname, 'wb') as fout:
for tokenid in self.keys():
words = sorted(self[tokenid])
if words:
words_df = [(word, self.dfs_debug.get(word, 0)) for word in words]
words_df = [('%s(%i)' % item) for item in sorted(words_df, key=(lambda item: (- item[1])))]
words_df = ' DCTB '.join(words_df)
fout.write(utils.to_utf8(('%i DCTB %i DCTB %s\n' % (tokenid, self.dfs.get(tokenid, 0), words_df))))
|
'Initialize the corpus from a file.
`labels` = are class labels present in the input file? => skip the first column'
| def __init__(self, fname, labels):
| logger.info(('loading corpus from %s' % fname))
self.fname = fname
self.length = None
self.labels = labels
head = ''.join(itertools.islice(utils.smart_open(self.fname), 5))
self.headers = csv.Sniffer().has_header(head)
self.dialect = csv.Sniffer().sniff(head)
logger.info(('sniffed CSV delimiter=%r, headers=%s' % (self.dialect.delimiter, self.headers)))
|
'Iterate over the corpus, returning one sparse vector at a time.'
| def __iter__(self):
| reader = csv.reader(utils.smart_open(self.fname), self.dialect)
if self.headers:
next(reader)
line_no = (-1)
for (line_no, line) in enumerate(reader):
if self.labels:
line.pop(0)
(yield list(enumerate(map(float, line))))
self.length = (line_no + 1)
|
'Initialize this abstract base class, by loading a previously saved index
from `index_fname` (or `fname.index` if `index_fname` is not set).
This index will allow subclasses to support the `corpus[docno]` syntax
(random access to document #`docno` in O(1)).
>>> # save corpus in SvmLightCorpus format with an index
>>> corpus = [[(1, 0.5)], [(0, 1.0), (1, 2.0)]]
>>> gensim.corpora.SvmLightCorpus.serialize(\'testfile.svmlight\', corpus)
>>> # load back as a document stream (*not* plain Python list)
>>> corpus_with_random_access = gensim.corpora.SvmLightCorpus(\'tstfile.svmlight\')
>>> print(corpus_with_random_access[1])
[(0, 1.0), (1, 2.0)]'
| def __init__(self, fname, index_fname=None):
| try:
if (index_fname is None):
index_fname = utils.smart_extension(fname, '.index')
self.index = utils.unpickle(index_fname)
self.index = numpy.asarray(self.index)
logger.info(('loaded corpus index from %s' % index_fname))
except:
self.index = None
self.length = None
|
'Iterate through the document stream `corpus`, saving the documents to `fname`
and recording byte offset of each document. Save the resulting index
structure to file `index_fname` (or `fname`.index is not set).
This relies on the underlying corpus class `serializer` providing (in
addition to standard iteration):
* `save_corpus` method that returns a sequence of byte offsets, one for
each saved document,
* the `docbyoffset(offset)` method, which returns a document
positioned at `offset` bytes within the persistent storage (file).
* metadata if set to true will ensure that serialize will write out article titles to a pickle file.
Example:
>>> MmCorpus.serialize(\'test.mm\', corpus)
>>> mm = MmCorpus(\'test.mm\') # `mm` document stream now has random access
>>> print(mm[42]) # retrieve document no. 42, etc.'
| @classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None, progress_cnt=None, labels=None, metadata=False):
| if (getattr(corpus, 'fname', None) == fname):
raise ValueError(('identical input vs. output corpus filename, refusing to serialize: %s' % fname))
if (index_fname is None):
index_fname = utils.smart_extension(fname, '.index')
if (progress_cnt is not None):
if (labels is not None):
offsets = serializer.save_corpus(fname, corpus, id2word, labels=labels, progress_cnt=progress_cnt, metadata=metadata)
else:
offsets = serializer.save_corpus(fname, corpus, id2word, progress_cnt=progress_cnt, metadata=metadata)
elif (labels is not None):
offsets = serializer.save_corpus(fname, corpus, id2word, labels=labels, metadata=metadata)
else:
offsets = serializer.save_corpus(fname, corpus, id2word, metadata=metadata)
if (offsets is None):
raise NotImplementedError(("called serialize on class %s which doesn't support indexing!" % serializer.__name__))
logger.info(('saving %s index to %s' % (serializer.__name__, index_fname)))
utils.pickle(offsets, index_fname)
|
'Return the index length if the corpus is indexed. Otherwise, make a pass
over self to calculate the corpus length and cache this number.'
| def __len__(self):
| if (self.index is not None):
return len(self.index)
if (self.length is None):
logger.info('caching corpus length')
self.length = sum((1 for doc in self))
return self.length
|
'Initializes the dataset. If `output_prefix` is not found,
builds the shards.
:type output_prefix: str
:param output_prefix: The absolute path to the file from which shard
filenames should be derived. The individual shards will be saved
as `output_prefix.0`, `output_prefix.1`, etc.
The `output_prefix` path then works as the filename to which
the ShardedCorpus object itself will be automatically saved.
Normally, gensim corpora do not do this, but ShardedCorpus needs
to remember several serialization settings: namely the shard
size and whether it was serialized in dense or sparse format. By
saving automatically, any new ShardedCorpus with the same
`output_prefix` will be able to find the information about the
data serialized with the given prefix.
If you want to *overwrite* your data serialized with some output
prefix, set the `overwrite` flag to True.
Of course, you can save your corpus separately as well using
the `save()` method.
:type corpus: gensim.interfaces.CorpusABC
:param corpus: The source corpus from which to build the dataset.
:type dim: int
:param dim: Specify beforehand what the dimension of a dataset item
should be. This is useful when initializing from a corpus that
doesn\'t advertise its dimension, or when it does and you want to
check that the corpus matches the expected dimension. **If `dim`
is left unused and `corpus` does not provide its dimension in
an expected manner, initialization will fail.**
:type shardsize: int
:param shardsize: How many data points should be in one shard. More
data per shard means less shard reloading but higher memory usage
and vice versa.
:type overwrite: bool
:param overwrite: If set, will build dataset from given corpus even
if `output_prefix` already exists.
:type sparse_serialization: bool
:param sparse_serialization: If set, will save the data in a sparse
form (as csr matrices). This is to speed up retrieval when you
know you will be using sparse matrices.
..note::
This property **should not change** during the lifetime of
the dataset. (If you find out you need to change from a sparse
to a dense representation, the best practice is to create
another ShardedCorpus object.)
:type sparse_retrieval: bool
:param sparse_retrieval: If set, will retrieve data as sparse vectors
(numpy csr matrices). If unset, will return ndarrays.
Note that retrieval speed for this option depends on how the dataset
was serialized. If `sparse_serialization` was set, then setting
`sparse_retrieval` will be faster. However, if the two settings
do not correspond, the conversion on the fly will slow the dataset
down.
:type gensim: bool
:param gensim: If set, will convert the output to gensim
sparse vectors (list of tuples (id, value)) to make it behave like
any other gensim corpus. This **will** slow the dataset down.'
| def __init__(self, output_prefix, corpus, dim=None, shardsize=4096, overwrite=False, sparse_serialization=False, sparse_retrieval=False, gensim=False):
| self.output_prefix = output_prefix
self.shardsize = shardsize
self.n_docs = 0
self.offsets = []
self.n_shards = 0
self.dim = dim
self.sparse_serialization = sparse_serialization
self.sparse_retrieval = sparse_retrieval
self.gensim = gensim
self.current_shard = None
self.current_shard_n = None
self.current_offset = None
logger.info('Initializing sharded corpus with prefix {0}'.format(output_prefix))
if ((not os.path.isfile(output_prefix)) or overwrite):
logger.info('Building from corpus...')
self.init_shards(output_prefix, corpus, shardsize)
logger.info('Saving ShardedCorpus object to {0}'.format(self.output_prefix))
self.save()
else:
logger.info('Cloning existing...')
self.init_by_clone()
|
'Initialize shards from the corpus.'
| def init_shards(self, output_prefix, corpus, shardsize=4096, dtype=_default_dtype):
| if (not gensim.utils.is_corpus(corpus)):
raise ValueError('Cannot initialize shards without a corpus to read from! (Got corpus type: {0})'.format(type(corpus)))
proposed_dim = self._guess_n_features(corpus)
if (proposed_dim != self.dim):
if (self.dim is None):
logger.info('Deriving dataset dimension from corpus: {0}'.format(proposed_dim))
else:
logger.warning('Dataset dimension derived from input corpus differs from initialization argument, using corpus.(corpus {0}, init arg {1})'.format(proposed_dim, self.dim))
self.dim = proposed_dim
self.offsets = [0]
start_time = time.clock()
logger.info('Running init from corpus.')
for (n, doc_chunk) in enumerate(gensim.utils.grouper(corpus, chunksize=shardsize)):
logger.info('Chunk no. {0} at {1} s'.format(n, (time.clock() - start_time)))
current_shard = numpy.zeros((len(doc_chunk), self.dim), dtype=dtype)
logger.debug('Current chunk dimension: {0} x {1}'.format(len(doc_chunk), self.dim))
for (i, doc) in enumerate(doc_chunk):
doc = dict(doc)
current_shard[i][list(doc)] = list(gensim.matutils.itervalues(doc))
if self.sparse_serialization:
current_shard = sparse.csr_matrix(current_shard)
self.save_shard(current_shard)
end_time = time.clock()
logger.info('Built {0} shards in {1} s.'.format(self.n_shards, (end_time - start_time)))
|
'Initialize by copying over attributes of another ShardedCorpus
instance saved to the output_prefix given at __init__().'
| def init_by_clone(self):
| temp = self.__class__.load(self.output_prefix)
self.n_shards = temp.n_shards
self.n_docs = temp.n_docs
self.offsets = temp.offsets
if (temp.dim != self.dim):
if (self.dim is None):
logger.info('Loaded dataset dimension: {0}'.format(temp.dim))
else:
logger.warning('Loaded dataset dimension differs from init arg dimension, using loaded dim. (loaded {0}, init {1})'.format(temp.dim, self.dim))
self.dim = temp.dim
|
'Pickle the given shard. If `n` is not given, will consider the shard
a new one.
If `filename` is given, will use that file name instead of generating
one.'
| def save_shard(self, shard, n=None, filename=None):
| new_shard = False
if (n is None):
n = self.n_shards
new_shard = True
if (not filename):
filename = self._shard_name(n)
gensim.utils.pickle(shard, filename)
if new_shard:
self.offsets.append((self.offsets[(-1)] + shard.shape[0]))
self.n_docs += shard.shape[0]
self.n_shards += 1
|
'Load (unpickle) the n-th shard as the "live" part of the dataset
into the Dataset object.'
| def load_shard(self, n):
| if (self.current_shard_n == n):
return
filename = self._shard_name(n)
if (not os.path.isfile(filename)):
raise ValueError('Attempting to load nonexistent shard no. {0}'.format(n))
shard = gensim.utils.unpickle(filename)
self.current_shard = shard
self.current_shard_n = n
self.current_offset = self.offsets[n]
|
'Reset to no shard at all. Used for saving.'
| def reset(self):
| self.current_shard = None
self.current_shard_n = None
self.current_offset = None
|
'Determine which shard the given offset belongs to. If the offset
is greater than the number of available documents, raises a
`ValueError`.
Assumes that all shards have the same size.'
| def shard_by_offset(self, offset):
| k = int((offset / self.shardsize))
if (offset >= self.n_docs):
raise ValueError('Too high offset specified ({0}), available docs: {1}'.format(offset, self.n_docs))
if (offset < 0):
raise ValueError('Negative offset {0} currently not supported.'.format(offset))
return k
k = (-1)
for (i, o) in enumerate(self.offsets):
if (o > offset):
k = (i - 1)
break
return k
|
'Determine whether the given offset falls within the current shard.'
| def in_current(self, offset):
| return ((self.current_offset <= offset) and (offset < self.offsets[(self.current_shard_n + 1)]))
|
'Determine whether the given offset falls within the next shard.
This is a very small speedup: typically, we will be iterating through
the data forward. Could save considerable time with a very large number
of smaller shards.'
| def in_next(self, offset):
| if (self.current_shard_n == self.n_shards):
return False
return ((self.offsets[(self.current_shard_n + 1)] <= offset) and (offset < self.offsets[(self.current_shard_n + 2)]))
|
'Re-process the dataset to new shard size. This may take pretty long.
Also, note that you need some space on disk for this one (we\'re
assuming there is enough disk space for double the size of the dataset
and that there is enough memory for old + new shardsize).
:type shardsize: int
:param shardsize: The new shard size.'
| def resize_shards(self, shardsize):
| n_new_shards = int(math.floor((self.n_docs / float(shardsize))))
if ((self.n_docs % shardsize) != 0):
n_new_shards += 1
new_shard_names = []
new_offsets = [0]
for new_shard_idx in xrange(n_new_shards):
new_start = (shardsize * new_shard_idx)
new_stop = (new_start + shardsize)
if (new_stop > self.n_docs):
assert (new_shard_idx == (n_new_shards - 1)), 'Shard no. {0} that ends at {1} over last document ({2}) is not the last projected shard ({3})???'.format(new_shard_idx, new_stop, self.n_docs, n_new_shards)
new_stop = self.n_docs
new_shard = self[new_start:new_stop]
new_shard_name = self._resized_shard_name(new_shard_idx)
new_shard_names.append(new_shard_name)
try:
self.save_shard(new_shard, new_shard_idx, new_shard_name)
except Exception:
for new_shard_name in new_shard_names:
os.remove(new_shard_name)
raise
new_offsets.append(new_stop)
old_shard_names = [self._shard_name(n) for n in xrange(self.n_shards)]
try:
for (old_shard_n, old_shard_name) in enumerate(old_shard_names):
os.remove(old_shard_name)
except Exception as e:
logger.error('Exception occurred during old shard no. {0} removal: {1}.\nAttempting to at least move new shards in.'.format(old_shard_n, str(e)))
finally:
try:
for (shard_n, new_shard_name) in enumerate(new_shard_names):
os.rename(new_shard_name, self._shard_name(shard_n))
except Exception as e:
print(e)
raise RuntimeError('Resizing completely failed for some reason. Sorry, dataset is probably ruined...')
finally:
self.n_shards = n_new_shards
self.offsets = new_offsets
self.shardsize = shardsize
self.reset()
|
'Generate the name for the n-th shard.'
| def _shard_name(self, n):
| return ((self.output_prefix + '.') + str(n))
|
'Generate the name for the n-th new shard temporary file when
resizing dataset. The file will then be re-named to standard shard name.'
| def _resized_shard_name(self, n):
| return ((self.output_prefix + '.resize-temp.') + str(n))
|
'Attempt to guess number of features in `corpus`.'
| def _guess_n_features(self, corpus):
| n_features = None
if hasattr(corpus, 'dim'):
n_features = corpus.dim
elif hasattr(corpus, 'dictionary'):
n_features = len(corpus.dictionary)
elif hasattr(corpus, 'n_out'):
n_features = corpus.n_out
elif hasattr(corpus, 'num_terms'):
n_features = corpus.num_terms
elif isinstance(corpus, TransformedCorpus):
try:
return self._guess_n_features(corpus.obj)
except TypeError:
return self._guess_n_features(corpus.corpus)
elif (not self.dim):
raise TypeError("Couldn't find number of features, refusing to guess (dimension set to {0},type of corpus: {1}).".format(self.dim, type(corpus)))
else:
logger.warning("Couldn't find number of features, trusting supplied dimension ({0})".format(self.dim))
n_features = self.dim
if (self.dim and (n_features != self.dim)):
logger.warning('Discovered inconsistent dataset dim ({0}) and feature count from corpus ({1}). Coercing to dimension given by argument.'.format(self.dim, n_features))
return n_features
|
'As opposed to getitem, this one only accepts ints as offsets.'
| def get_by_offset(self, offset):
| self._ensure_shard(offset)
result = self.current_shard[(offset - self.current_offset)]
return result
|
'Retrieve the given row of the dataset. Supports slice notation.'
| def __getitem__(self, offset):
| if isinstance(offset, list):
if self.sparse_serialization:
l_result = sparse.vstack([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_sparse2gensim(l_result)
elif (not self.sparse_retrieval):
l_result = numpy.array(l_result.todense())
else:
l_result = numpy.array([self.get_by_offset(i) for i in offset])
if self.gensim:
l_result = self._getitem_dense2gensim(l_result)
elif self.sparse_retrieval:
l_result = sparse.csr_matrix(l_result)
return l_result
elif isinstance(offset, slice):
start = offset.start
stop = offset.stop
if (stop > self.n_docs):
raise IndexError('Requested slice offset {0} out of range ({1} docs)'.format(stop, self.n_docs))
first_shard = self.shard_by_offset(start)
last_shard = (self.n_shards - 1)
if (not (stop == self.n_docs)):
last_shard = self.shard_by_offset(stop)
self.load_shard(first_shard)
if (first_shard == last_shard):
s_result = self.current_shard[(start - self.current_offset):(stop - self.current_offset)]
s_result = self._getitem_format(s_result)
return s_result
s_result = numpy.zeros(((stop - start), self.dim), dtype=self.current_shard.dtype)
if self.sparse_serialization:
s_result = sparse.csr_matrix((0, self.dim), dtype=self.current_shard.dtype)
result_start = 0
result_stop = (self.offsets[(self.current_shard_n + 1)] - start)
shard_start = (start - self.current_offset)
shard_stop = (self.offsets[(self.current_shard_n + 1)] - self.current_offset)
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
for shard_n in xrange((first_shard + 1), last_shard):
self.load_shard(shard_n)
result_start = result_stop
result_stop += self.shardsize
shard_start = 0
shard_stop = self.shardsize
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
self.load_shard(last_shard)
result_start = result_stop
result_stop += (stop - self.current_offset)
shard_start = 0
shard_stop = (stop - self.current_offset)
s_result = self.__add_to_slice(s_result, result_start, result_stop, shard_start, shard_stop)
s_result = self._getitem_format(s_result)
return s_result
else:
s_result = self.get_by_offset(offset)
s_result = self._getitem_format(s_result)
return s_result
|
'Add the rows of the current shard from `start` to `stop`
into rows `result_start` to `result_stop` of `s_result`.
Operation is based on the self.sparse_serialize setting. If the shard
contents are dense, then s_result is assumed to be an ndarray that
already supports row indices `result_start:result_stop`. If the shard
contents are sparse, assumes that s_result has `result_start` rows
and we should add them up to `result_stop`.
Returns the resulting s_result.'
| def __add_to_slice(self, s_result, result_start, result_stop, start, stop):
| if ((result_stop - result_start) != (stop - start)):
raise ValueError('Result start/stop range different than stop/startrange (%d - %d vs. %d - %d)'.format(result_start, result_stop, start, stop))
if (not self.sparse_serialization):
s_result[result_start:result_stop] = self.current_shard[start:stop]
return s_result
else:
if (s_result.shape != (result_start, self.dim)):
raise ValueError('Assuption about sparse s_result shape invalid: {0} expected rows, {1} real rows.'.format(result_start, s_result.shape[0]))
tmp_matrix = self.current_shard[start:stop]
s_result = sparse.vstack([s_result, tmp_matrix])
return s_result
|
'Change given sparse result matrix to gensim sparse vectors.
Uses the internals of the sparse matrix to make this fast.'
| def _getitem_sparse2gensim(self, result):
| def row_sparse2gensim(row_idx, csr_matrix):
indices = csr_matrix.indices[csr_matrix.indptr[row_idx]:csr_matrix.indptr[(row_idx + 1)]]
g_row = [(col_idx, csr_matrix[(row_idx, col_idx)]) for col_idx in indices]
return g_row
output = (row_sparse2gensim(i, result) for i in xrange(result.shape[0]))
return output
|
'Change given dense result matrix to gensim sparse vectors.'
| def _getitem_dense2gensim(self, result):
| if (len(result.shape) == 1):
output = gensim.matutils.full2sparse(result)
else:
output = (gensim.matutils.full2sparse(result[i]) for i in xrange(result.shape[0]))
return output
|
'Yield dataset items one by one (generator).'
| def __iter__(self):
| for i in xrange(len(self)):
(yield self[i])
|
'Save itself (the wrapper) in clean state (after calling `reset()`)
to the output_prefix file. If you wish to save to a different file,
use the `fname` argument as the first positional arg.'
| def save(self, *args, **kwargs):
| if (len(args) == 0):
args = tuple([self.output_prefix])
attrs_to_ignore = ['current_shard', 'current_shard_n', 'current_offset']
if ('ignore' not in kwargs):
kwargs['ignore'] = frozenset(attrs_to_ignore)
else:
kwargs['ignore'] = frozenset(([v for v in kwargs['ignore']] + attrs_to_ignore))
super(ShardedCorpus, self).save(*args, **kwargs)
|
'Load itself in clean state. `mmap` has no effect here.'
| @classmethod
def load(cls, fname, mmap=None):
| return super(ShardedCorpus, cls).load(fname, mmap)
|
'Implement a serialization interface. Do not call directly;
use the `serialize` method instead.
Note that you might need some ShardedCorpus init parameters, most
likely the dimension (`dim`). Again, pass these as `kwargs` to the
`serialize` method.
All this thing does is initialize a ShardedCorpus from a corpus
with the `output_prefix` argument set to the `fname` parameter
of this method. The initialization of a ShardedCorpus takes care of
serializing the data (in dense form) to shards.
Ignore the parameters id2word, progress_cnt and metadata. They
currently do nothing and are here only to provide a compatible
method signature with superclass.'
| @staticmethod
def save_corpus(fname, corpus, id2word=None, progress_cnt=1000, metadata=False, **kwargs):
| ShardedCorpus(fname, corpus, **kwargs)
|
'Iterate through the document stream `corpus`, saving the documents
as a ShardedCorpus to `fname`.
Use this method instead of calling `save_corpus` directly.
You may need to supply some kwargs that are used upon dataset creation
(namely: `dim`, unless the dataset can infer the dimension from the
given corpus).
Ignore the parameters id2word, index_fname, progress_cnt, labels
and metadata. They currently do nothing and are here only to
provide a compatible method signature with superclass.'
| @classmethod
def serialize(serializer, fname, corpus, id2word=None, index_fname=None, progress_cnt=None, labels=None, metadata=False, **kwargs):
| serializer.save_corpus(fname, corpus, id2word=id2word, progress_cnt=progress_cnt, metadata=metadata, **kwargs)
|
'Initialize the corpus from a file.
`id2word` and `line2words` are optional parameters.
If provided, `id2word` is a dictionary mapping between word_ids (integers)
and words (strings). If not provided, the mapping is constructed from
the documents.
`line2words` is a function which converts lines into tokens. Defaults to
simple splitting on spaces.'
| def __init__(self, fname, id2word=None, line2words=split_on_space):
| IndexedCorpus.__init__(self, fname)
logger.info(('loading corpus from %s' % fname))
self.fname = fname
self.line2words = line2words
self.num_docs = self._calculate_num_docs()
if (not id2word):
logger.info('extracting vocabulary from the corpus')
all_terms = set()
self.use_wordids = False
for doc in self:
all_terms.update((word for (word, wordCnt) in doc))
all_terms = sorted(all_terms)
self.id2word = dict(izip(xrange(len(all_terms)), all_terms))
else:
logger.info(('using provided word mapping (%i ids)' % len(id2word)))
self.id2word = id2word
self.num_terms = len(self.word2id)
self.use_wordids = True
logger.info(('loaded corpus with %i documents and %i terms from %s' % (self.num_docs, self.num_terms, fname)))
|
'Iterate over the corpus, returning one bag-of-words vector at a time.'
| def __iter__(self):
| with utils.smart_open(self.fname) as fin:
for (lineno, line) in enumerate(fin):
if (lineno > 0):
(yield self.line2doc(line))
|
'Save a corpus in the List-of-words format.
This function is automatically called by `LowCorpus.serialize`; don\'t
call it directly, call `serialize` instead.'
| @staticmethod
def save_corpus(fname, corpus, id2word=None, metadata=False):
| if (id2word is None):
logger.info('no word id mapping provided; initializing from corpus')
id2word = utils.dict_from_corpus(corpus)
logger.info(('storing corpus in List-Of-Words format into %s' % fname))
truncated = 0
offsets = []
with utils.smart_open(fname, 'wb') as fout:
fout.write(utils.to_utf8(('%i\n' % len(corpus))))
for doc in corpus:
words = []
for (wordid, value) in doc:
if (abs((int(value) - value)) > 1e-06):
truncated += 1
words.extend(([utils.to_unicode(id2word[wordid])] * int(value)))
offsets.append(fout.tell())
fout.write(utils.to_utf8(('%s\n' % ' '.join(words))))
if truncated:
logger.warning(('List-of-words format can only save vectors with integer elements; %i float entries were truncated to integer value' % truncated))
return offsets
|
'Return the document stored at file position `offset`.'
| def docbyoffset(self, offset):
| with utils.smart_open(self.fname) as f:
f.seek(offset)
return self.line2doc(f.readline())
|
'Args:
input (str): path to top-level directory to traverse for corpus documents.
dictionary (Dictionary): if a dictionary is provided, it will not be updated
with the given corpus on initialization. If none is provided, a new dictionary
will be built for the given corpus. If no corpus is given, the dictionary will
remain uninitialized.
metadata (bool): True to yield metadata with each document, else False (default).
character_filters (iterable of callable): each will be applied to the text of each
document in order, and should return a single string with the modified text.
For Python 2, the original text will not be unicode, so it may be useful to
convert to unicode as the first character filter. The default character filters
lowercase, convert to unicode (strict utf8), perform ASCII-folding, then collapse
multiple whitespaces.
tokenizer (callable): takes as input the document text, preprocessed by all filters
in `character_filters`; should return an iterable of tokens (strings).
token_filters (iterable of callable): each will be applied to the iterable of tokens
in order, and should return another iterable of tokens. These filters can add,
remove, or replace tokens, or do nothing at all. The default token filters
remove tokens less than 3 characters long and remove stopwords using the list
in `gensim.parsing.preprocessing.STOPWORDS`.'
| def __init__(self, input=None, dictionary=None, metadata=False, character_filters=None, tokenizer=None, token_filters=None):
| self.input = input
self.metadata = metadata
self.character_filters = character_filters
if (self.character_filters is None):
self.character_filters = [lower_to_unicode, deaccent, strip_multiple_whitespaces]
self.tokenizer = tokenizer
if (self.tokenizer is None):
self.tokenizer = simple_tokenize
self.token_filters = token_filters
if (self.token_filters is None):
self.token_filters = [remove_short, remove_stopwords]
self.length = None
self.dictionary = None
self.init_dictionary(dictionary)
|
'If `dictionary` is None, initialize to an empty Dictionary, and then if there
is an `input` for the corpus, add all documents from that `input`. If the
`dictionary` is already initialized, simply set it as the corpus\'s `dictionary`.'
| def init_dictionary(self, dictionary):
| self.dictionary = (dictionary if (dictionary is not None) else Dictionary())
if (self.input is not None):
if (dictionary is None):
logger.info('Initializing dictionary')
metadata_setting = self.metadata
self.metadata = False
self.dictionary.add_documents(self.get_texts())
self.metadata = metadata_setting
else:
logger.info('Input stream provided but dictionary already initialized')
else:
logger.warning('No input document stream provided; assuming dictionary will be initialized some other way.')
|
'The function that defines a corpus.
Iterating over the corpus must yield sparse vectors, one for each document.'
| def __iter__(self):
| if self.metadata:
for (text, metadata) in self.get_texts():
(yield (self.dictionary.doc2bow(text, allow_update=False), metadata))
else:
for text in self.get_texts():
(yield self.dictionary.doc2bow(text, allow_update=False))
|
'Yield documents from the underlying plain text collection (of one or more files).
Each item yielded from this method will be considered a document by subsequent
preprocessing methods.'
| def getstream(self):
| num_texts = 0
with utils.file_or_filename(self.input) as f:
for line in f:
(yield line)
num_texts += 1
self.length = num_texts
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.