text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Wrap a callable and return None if ES_DISABLED is False. <END_TASK> <USER_TASK:> Description: def es_required(fun): """Wrap a callable and return None if ES_DISABLED is False. This also adds an additional `es` argument to the callable giving you an ElasticSearch instance to use. """
@wraps(fun) def wrapper(*args, **kw): if getattr(settings, 'ES_DISABLED', False): log.debug('Search disabled for %s.' % fun) return return fun(*args, es=get_es(), **kw) return wrapper
<SYSTEM_TASK:> Returns the elasticsearch Elasticsearch object to use. <END_TASK> <USER_TASK:> Description: def get_es(self, default_builder=get_es): """Returns the elasticsearch Elasticsearch object to use. This uses the django get_es builder by default which takes into account settings in ``settings.py``. """
return super(S, self).get_es(default_builder=default_builder)
<SYSTEM_TASK:> Returns the list of indexes to act on based on ES_INDEXES setting <END_TASK> <USER_TASK:> Description: def get_indexes(self, default_indexes=None): """Returns the list of indexes to act on based on ES_INDEXES setting """
doctype = self.type.get_mapping_type_name() indexes = (settings.ES_INDEXES.get(doctype) or settings.ES_INDEXES['default']) if isinstance(indexes, six.string_types): indexes = [indexes] return super(S, self).get_indexes(default_indexes=indexes)
<SYSTEM_TASK:> Gets the index for this model. <END_TASK> <USER_TASK:> Description: def get_index(cls): """Gets the index for this model. The index for this model is specified in `settings.ES_INDEXES` which is a dict of mapping type -> index name. By default, this uses `.get_mapping_type()` to determine the mapping and returns the value in `settings.ES_INDEXES` for that or ``settings.ES_INDEXES['default']``. Override this to compute it differently. :returns: index name to use """
indexes = settings.ES_INDEXES index = indexes.get(cls.get_mapping_type_name()) or indexes['default'] if not (isinstance(index, six.string_types)): # FIXME - not sure what to do here, but we only want one # index and somehow this isn't one index. index = index[0] return index
<SYSTEM_TASK:> Returns the queryset of ids of all things to be indexed. <END_TASK> <USER_TASK:> Description: def get_indexable(cls): """Returns the queryset of ids of all things to be indexed. Defaults to:: cls.get_model().objects.order_by('id').values_list( 'id', flat=True) :returns: iterable of ids of objects to be indexed """
model = cls.get_model() return model.objects.order_by('id').values_list('id', flat=True)
<SYSTEM_TASK:> Create an elasticsearch `Elasticsearch` object and return it. <END_TASK> <USER_TASK:> Description: def get_es(urls=None, timeout=DEFAULT_TIMEOUT, force_new=False, **settings): """Create an elasticsearch `Elasticsearch` object and return it. This will aggressively re-use `Elasticsearch` objects with the following rules: 1. if you pass the same argument values to `get_es()`, then it will return the same `Elasticsearch` object 2. if you pass different argument values to `get_es()`, then it will return different `Elasticsearch` object 3. it caches each `Elasticsearch` object that gets created 4. if you pass in `force_new=True`, then you are guaranteed to get a fresh `Elasticsearch` object AND that object will not be cached :arg urls: list of uris; Elasticsearch hosts to connect to, defaults to ``['http://localhost:9200']`` :arg timeout: int; the timeout in seconds, defaults to 5 :arg force_new: Forces get_es() to generate a new Elasticsearch object rather than pulling it from cache. :arg settings: other settings to pass into Elasticsearch constructor; See `<http://elasticsearch-py.readthedocs.org/>`_ for more details. Examples:: # Returns cached Elasticsearch object es = get_es() # Returns a new Elasticsearch object es = get_es(force_new=True) es = get_es(urls=['localhost']) es = get_es(urls=['localhost:9200'], timeout=10, max_retries=3) """
# Cheap way of de-None-ifying things urls = urls or DEFAULT_URLS # v0.7: Check for 'hosts' instead of 'urls'. Take this out in v1.0. if 'hosts' in settings: raise DeprecationWarning('"hosts" is deprecated in favor of "urls".') if not force_new: key = _build_key(urls, timeout, **settings) if key in _cached_elasticsearch: return _cached_elasticsearch[key] es = Elasticsearch(urls, timeout=timeout, **settings) if not force_new: # We don't need to rebuild the key here since we built it in # the previous if block, so it's in the namespace. Having said # that, this is a little ew. _cached_elasticsearch[key] = es return es
<SYSTEM_TASK:> Returns facet counts as dict. <END_TASK> <USER_TASK:> Description: def _facet_counts(items): """Returns facet counts as dict. Given the `items()` on the raw dictionary from Elasticsearch this processes it and returns the counts keyed on the facet name provided in the original query. """
facets = {} for name, data in items: facets[name] = FacetResult(name, data) return facets
<SYSTEM_TASK:> Boost a value if we should in _process_queries <END_TASK> <USER_TASK:> Description: def _boosted_value(name, action, key, value, boost): """Boost a value if we should in _process_queries"""
if boost is not None: # Note: Most queries use 'value' for the key name except # Match queries which use 'query'. So we have to do some # switcheroo for that. value_key = 'query' if action in MATCH_ACTIONS else 'value' return {name: {'boost': boost, value_key: value}} return {name: value}
<SYSTEM_TASK:> OR and AND will create a new F, with the filters from both F <END_TASK> <USER_TASK:> Description: def _combine(self, other, conn='and'): """ OR and AND will create a new F, with the filters from both F objects combined with the connector `conn`. """
f = F() self_filters = copy.deepcopy(self.filters) other_filters = copy.deepcopy(other.filters) if not self.filters: f.filters = other_filters elif not other.filters: f.filters = self_filters elif conn in self.filters[0]: f.filters = self_filters f.filters[0][conn].extend(other_filters) elif conn in other.filters[0]: f.filters = other_filters f.filters[0][conn].extend(self_filters) else: f.filters = [{conn: self_filters + other_filters}] return f
<SYSTEM_TASK:> Converts strings in a data structure to Python types <END_TASK> <USER_TASK:> Description: def to_python(self, obj): """Converts strings in a data structure to Python types It converts datetime-ish things to Python datetimes. Override if you want something different. :arg obj: Python datastructure :returns: Python datastructure with strings converted to Python types .. Note:: This does the conversion in-place! """
if isinstance(obj, string_types): if len(obj) == 26: try: return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S.%f') except (TypeError, ValueError): pass elif len(obj) == 19: try: return datetime.strptime(obj, '%Y-%m-%dT%H:%M:%S') except (TypeError, ValueError): pass elif len(obj) == 10: try: return datetime.strptime(obj, '%Y-%m-%d') except (TypeError, ValueError): pass elif isinstance(obj, dict): for key, val in obj.items(): obj[key] = self.to_python(val) elif isinstance(obj, list): return [self.to_python(item) for item in obj] return obj
<SYSTEM_TASK:> Return a new S instance with query args combined with existing <END_TASK> <USER_TASK:> Description: def query(self, *queries, **kw): """ Return a new S instance with query args combined with existing set in a must boolean query. :arg queries: instances of Q :arg kw: queries in the form of ``field__action=value`` There are three special flags you can use: * ``must=True``: Specifies that the queries and kw queries **must match** in order for a document to be in the result. If you don't specify a special flag, this is the default. * ``should=True``: Specifies that the queries and kw queries **should match** in order for a document to be in the result. * ``must_not=True``: Specifies the queries and kw queries **must not match** in order for a document to be in the result. These flags work by putting those queries in the appropriate clause of an Elasticsearch boolean query. Examples: >>> s = S().query(foo='bar') >>> s = S().query(Q(foo='bar')) >>> s = S().query(foo='bar', bat__match='baz') >>> s = S().query(foo='bar', should=True) >>> s = S().query(foo='bar', should=True).query(baz='bat', must=True) Notes: 1. Don't specify multiple special flags, but if you did, `should` takes precedence. 2. If you don't specify any, it defaults to `must`. 3. You can specify special flags in the :py:class:`elasticutils.Q`, too. If you're building your query incrementally, using :py:class:`elasticutils.Q` helps a lot. See the documentation on :py:class:`elasticutils.Q` for more details on composing queries with Q. See the documentation on :py:class:`elasticutils.S` for more details on adding support for more query types. """
q = Q() for query in queries: q += query if 'or_' in kw: # Backwards compatibile with pre-0.7 version. or_query = kw.pop('or_') # or_query here is a dict of key/val pairs. or_ indicates # they're in a should clause, so we generate the # equivalent Q and then add it in. or_query['should'] = True q += Q(**or_query) q += Q(**kw) return self._clone(next_step=('query', q))
<SYSTEM_TASK:> Return a new S instance with filter args combined with <END_TASK> <USER_TASK:> Description: def filter(self, *filters, **kw): """ Return a new S instance with filter args combined with existing set with AND. :arg filters: this will be instances of F :arg kw: this will be in the form of ``field__action=value`` Examples: >>> s = S().filter(foo='bar') >>> s = S().filter(F(foo='bar')) >>> s = S().filter(foo='bar', bat='baz') >>> s = S().filter(foo='bar').filter(bat='baz') By default, everything is combined using AND. If you provide multiple filters in a single filter call, those are ANDed together. If you provide multiple filters in multiple filter calls, those are ANDed together. If you want something different, use the F class which supports ``&`` (and), ``|`` (or) and ``~`` (not) operators. Then call filter once with the resulting F instance. See the documentation on :py:class:`elasticutils.F` for more details on composing filters with F. See the documentation on :py:class:`elasticutils.S` for more details on adding support for new filter types. """
items = kw.items() if six.PY3: items = list(items) return self._clone( next_step=('filter', list(filters) + items))
<SYSTEM_TASK:> Return a new S instance with field boosts. <END_TASK> <USER_TASK:> Description: def boost(self, **kw): """ Return a new S instance with field boosts. ElasticUtils allows you to specify query-time field boosts with ``.boost()``. It takes a set of arguments where the keys are either field names or field name + ``__`` + field action. Examples:: q = (S().query(title='taco trucks', description__match='awesome') .boost(title=4.0, description__match=2.0)) If the key is a field name, then the boost will apply to all query bits that have that field name. For example:: q = (S().query(title='trucks', title__prefix='trucks', title__fuzzy='trucks') .boost(title=4.0)) applies a 4.0 boost to all three query bits because all three query bits are for the title field name. If the key is a field name and field action, then the boost will apply only to that field name and field action. For example:: q = (S().query(title='trucks', title__prefix='trucks', title__fuzzy='trucks') .boost(title__prefix=4.0)) will only apply the 4.0 boost to title__prefix. Boosts are relative to one another and all boosts default to 1.0. For example, if you had:: qs = (S().boost(title=4.0, summary=2.0) .query(title__match=value, summary__match=value, content__match=value, should=True)) ``title__match`` would be boosted twice as much as ``summary__match`` and ``summary__match`` twice as much as ``content__match``. """
new = self._clone() new.field_boosts.update(kw) return new
<SYSTEM_TASK:> Returns a new S instance with boosting query and demotion. <END_TASK> <USER_TASK:> Description: def demote(self, amount_, *queries, **kw): """ Returns a new S instance with boosting query and demotion. You can demote documents that match query criteria:: q = (S().query(title='trucks') .demote(0.5, description__match='gross')) q = (S().query(title='trucks') .demote(0.5, Q(description__match='gross'))) This is implemented using the boosting query in Elasticsearch. Anything you specify with ``.query()`` goes into the positive section. The negative query and negative boost portions are specified as the first and second arguments to ``.demote()``. .. Note:: Calling this again will overwrite previous ``.demote()`` calls. """
q = Q() for query in queries: q += query q += Q(**kw) return self._clone(next_step=('demote', (amount_, q)))
<SYSTEM_TASK:> Return a new S instance with raw facet args combined with <END_TASK> <USER_TASK:> Description: def facet_raw(self, **kw): """ Return a new S instance with raw facet args combined with existing set. """
items = kw.items() if six.PY3: items = list(items) return self._clone(next_step=('facet_raw', items))
<SYSTEM_TASK:> Set suggestion options. <END_TASK> <USER_TASK:> Description: def suggest(self, name, term, **kwargs): """Set suggestion options. :arg name: The name to use for the suggestions. :arg term: The term to suggest similar looking terms for. Additional keyword options: * ``field`` -- The field to base suggestions upon, defaults to _all Results will have a ``_suggestions`` property containing the suggestions for all terms. .. Note:: Suggestions are only supported since Elasticsearch 0.90. Calling this multiple times will add multiple suggest clauses to the query. """
return self._clone(next_step=('suggest', (name, term, kwargs)))
<SYSTEM_TASK:> Return a new S instance with extra args combined with existing <END_TASK> <USER_TASK:> Description: def extra(self, **kw): """ Return a new S instance with extra args combined with existing set. """
new = self._clone() actions = ['values_list', 'values_dict', 'order_by', 'query', 'filter', 'facet'] for key, vals in kw.items(): assert key in actions if hasattr(vals, 'items'): new.steps.append((key, vals.items())) else: new.steps.append((key, vals)) return new
<SYSTEM_TASK:> Return the portion of the query that controls highlighting. <END_TASK> <USER_TASK:> Description: def _build_highlight(self, fields, options): """Return the portion of the query that controls highlighting."""
ret = {'fields': dict((f, {}) for f in fields), 'order': 'score'} ret.update(options) return ret
<SYSTEM_TASK:> Takes a list of queries and returns query clause value <END_TASK> <USER_TASK:> Description: def _process_queries(self, queries): """Takes a list of queries and returns query clause value :arg queries: list of Q instances :returns: dict which is the query clause value """
# First, let's mush everything into a single Q. Then we can # parse that into bits. new_q = Q() for query in queries: new_q += query # Now we have a single Q that needs to be processed. should_q = [self._process_query(query) for query in new_q.should_q] must_q = [self._process_query(query) for query in new_q.must_q] must_not_q = [self._process_query(query) for query in new_q.must_not_q] if len(must_q) > 1 or (len(should_q) + len(must_not_q) > 0): # If there's more than one must_q or there are must_not_q # or should_q, then we need to wrap the whole thing in a # boolean query. bool_query = {} if must_q: bool_query['must'] = must_q if should_q: bool_query['should'] = should_q if must_not_q: bool_query['must_not'] = must_not_q return {'bool': bool_query} if must_q: # There's only one must_q query and that's it, so we hoist # that. return must_q[0] return {}
<SYSTEM_TASK:> Perform the search, then convert that raw format into a <END_TASK> <USER_TASK:> Description: def _do_search(self): """ Perform the search, then convert that raw format into a SearchResults instance and return it. """
if self._results_cache is None: response = self.raw() ResultsClass = self.get_results_class() results = self.to_python(response.get('hits', {}).get('hits', [])) self._results_cache = ResultsClass( self.type, response, results, self.fields) return self._results_cache
<SYSTEM_TASK:> Returns the Elasticsearch object to use. <END_TASK> <USER_TASK:> Description: def get_es(self, default_builder=get_es): """Returns the Elasticsearch object to use. :arg default_builder: The function that takes a bunch of arguments and generates a elasticsearch Elasticsearch object. .. Note:: If you desire special behavior regarding building the Elasticsearch object for this S, subclass S and override this method. """
# .es() calls are incremental, so we go through them all and # update bits that are specified. args = {} for action, value in self.steps: if action == 'es': args.update(**value) # TODO: store the Elasticsearch on the S if we've already # created one since we don't need to do it multiple times. return default_builder(**args)
<SYSTEM_TASK:> Returns the list of doctypes to use. <END_TASK> <USER_TASK:> Description: def get_doctypes(self, default_doctypes=DEFAULT_DOCTYPES): """Returns the list of doctypes to use."""
for action, value in reversed(self.steps): if action == 'doctypes': return list(value) if self.type is not None: return [self.type.get_mapping_type_name()] return default_doctypes
<SYSTEM_TASK:> Returns an `Elasticsearch`. <END_TASK> <USER_TASK:> Description: def get_es(self): """Returns an `Elasticsearch`. * If there's an s, then it returns that `Elasticsearch`. * If the es was provided in the constructor, then it returns that `Elasticsearch`. * Otherwise, it creates a new `Elasticsearch` and returns that. Override this if that behavior isn't correct for you. """
if self.s: return self.s.get_es() return self.es or get_es()
<SYSTEM_TASK:> Perform the mlt call, then convert that raw format into a <END_TASK> <USER_TASK:> Description: def _do_search(self): """ Perform the mlt call, then convert that raw format into a SearchResults instance and return it. """
if self._results_cache is None: response = self.raw() results = self.to_python(response.get('hits', {}).get('hits', [])) self._results_cache = DictSearchResults( self.type, response, results, None) return self._results_cache
<SYSTEM_TASK:> Adds or updates a document to the index <END_TASK> <USER_TASK:> Description: def index(cls, document, id_=None, overwrite_existing=True, es=None, index=None): """Adds or updates a document to the index :arg document: Python dict of key/value pairs representing the document .. Note:: This must be serializable into JSON. :arg id_: the id of the document .. Note:: If you don't provide an ``id_``, then Elasticsearch will make up an id for your document and it'll look like a character name from a Lovecraft novel. :arg overwrite_existing: if ``True`` overwrites existing documents of the same ID and doctype :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `cls.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `cls.get_index()`. .. Note:: If you need the documents available for searches immediately, make sure to refresh the index by calling ``refresh_index()``. """
if es is None: es = cls.get_es() if index is None: index = cls.get_index() kw = {} if not overwrite_existing: kw['op_type'] = 'create' es.index(index=index, doc_type=cls.get_mapping_type_name(), body=document, id=id_, **kw)
<SYSTEM_TASK:> Adds or updates a batch of documents. <END_TASK> <USER_TASK:> Description: def bulk_index(cls, documents, id_field='id', es=None, index=None): """Adds or updates a batch of documents. :arg documents: List of Python dicts representing individual documents to be added to the index .. Note:: This must be serializable into JSON. :arg id_field: The name of the field to use as the document id. This defaults to 'id'. :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `cls.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `cls.get_index()`. .. Note:: If you need the documents available for searches immediately, make sure to refresh the index by calling ``refresh_index()``. """
if es is None: es = cls.get_es() if index is None: index = cls.get_index() documents = (dict(d, _id=d[id_field]) for d in documents) bulk_index( es, documents, index=index, doc_type=cls.get_mapping_type_name(), raise_on_error=True )
<SYSTEM_TASK:> Removes a particular item from the search index. <END_TASK> <USER_TASK:> Description: def unindex(cls, id_, es=None, index=None): """Removes a particular item from the search index. :arg id_: The Elasticsearch id for the document to remove from the index. :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `cls.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `cls.get_index()`. """
if es is None: es = cls.get_es() if index is None: index = cls.get_index() es.delete(index=index, doc_type=cls.get_mapping_type_name(), id=id_)
<SYSTEM_TASK:> Refreshes the index. <END_TASK> <USER_TASK:> Description: def refresh_index(cls, es=None, index=None): """Refreshes the index. Elasticsearch will update the index periodically automatically. If you need to see the documents you just indexed in your search results right now, you should call `refresh_index` as soon as you're done indexing. This is particularly helpful for unit tests. :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `cls.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `cls.get_index()`. """
if es is None: es = cls.get_es() if index is None: index = cls.get_index() es.indices.refresh(index=index)
<SYSTEM_TASK:> Monkey patch for elasticsearch-py 1.0+ to make it work with ES 0.90 <END_TASK> <USER_TASK:> Description: def monkeypatch_es(): """Monkey patch for elasticsearch-py 1.0+ to make it work with ES 0.90 1. tweaks elasticsearch.client.bulk to normalize return status codes .. Note:: We can nix this whe we drop support for ES 0.90. """
if _monkeypatched_es: return def normalize_bulk_return(fun): """Set's "ok" based on "status" if "status" exists""" @wraps(fun) def _fixed_bulk(self, *args, **kwargs): def fix_item(item): # Go through all the possible sections of item looking # for 'ok' and adding an additional 'status'. for key, val in item.items(): if 'ok' in val: val['status'] = 201 return item ret = fun(self, *args, **kwargs) if 'items' in ret: ret['items'] = [fix_item(item) for item in ret['items']] return ret return _fixed_bulk Elasticsearch.bulk = normalize_bulk_return(Elasticsearch.bulk)
<SYSTEM_TASK:> Ensure the storage path exists. <END_TASK> <USER_TASK:> Description: def _ensure_file_path(self): """ Ensure the storage path exists. If it doesn't, create it with "go-rwx" permissions. """
storage_root = os.path.dirname(self.file_path) needs_storage_root = storage_root and not os.path.isdir(storage_root) if needs_storage_root: # pragma: no cover os.makedirs(storage_root) if not os.path.isfile(self.file_path): # create the file without group/world permissions with open(self.file_path, 'w'): pass user_read_write = 0o600 os.chmod(self.file_path, user_read_write)
<SYSTEM_TASK:> Add edit handler that includes "related" panels to applicable <END_TASK> <USER_TASK:> Description: def add_relationship_panels(self): """ Add edit handler that includes "related" panels to applicable model classes that don't explicitly define their own edit handler. """
from wagtailplus.utils.edit_handlers import add_panel_to_edit_handler from wagtailplus.wagtailrelations.edit_handlers import RelatedPanel for model in self.applicable_models: add_panel_to_edit_handler(model, RelatedPanel, _(u'Related'))
<SYSTEM_TASK:> Adds relationship methods to applicable model classes. <END_TASK> <USER_TASK:> Description: def add_relationship_methods(self): """ Adds relationship methods to applicable model classes. """
Entry = apps.get_model('wagtailrelations', 'Entry') @cached_property def related(instance): return instance.get_related() @cached_property def related_live(instance): return instance.get_related_live() @cached_property def related_with_scores(instance): return instance.get_related_with_scores() def get_related(instance): entry = Entry.objects.get_for_model(instance)[0] return entry.get_related() def get_related_live(instance): entry = Entry.objects.get_for_model(instance)[0] return entry.get_related_live() def get_related_with_scores(instance): try: entry = Entry.objects.get_for_model(instance)[0] return entry.get_related_with_scores() except IntegrityError: return [] for model in self.applicable_models: model.add_to_class( 'get_related', get_related ) model.add_to_class( 'get_related_live', get_related_live ) model.add_to_class( 'get_related_with_scores', get_related_with_scores ) model.add_to_class( 'related', related ) model.add_to_class( 'related_live', related_live ) model.add_to_class( 'related_with_scores', related_with_scores )
<SYSTEM_TASK:> Adds rollback panel to applicable model class's edit handlers. <END_TASK> <USER_TASK:> Description: def add_rollback_panels(self): """ Adds rollback panel to applicable model class's edit handlers. """
from wagtailplus.utils.edit_handlers import add_panel_to_edit_handler from wagtailplus.wagtailrollbacks.edit_handlers import HistoryPanel for model in self.applicable_models: add_panel_to_edit_handler(model, HistoryPanel, _(u'History'))
<SYSTEM_TASK:> Adds rollback methods to applicable model classes. <END_TASK> <USER_TASK:> Description: def add_rollback_methods(): """ Adds rollback methods to applicable model classes. """
# Modified Page.save_revision method. def page_rollback(instance, revision_id, user=None, submitted_for_moderation=False, approved_go_live_at=None, changed=True): old_revision = instance.revisions.get(pk=revision_id) new_revision = instance.revisions.create( content_json = old_revision.content_json, user = user, submitted_for_moderation = submitted_for_moderation, approved_go_live_at = approved_go_live_at ) update_fields = [] instance.latest_revision_created_at = new_revision.created_at update_fields.append('latest_revision_created_at') if changed: instance.has_unpublished_changes = True update_fields.append('has_unpublished_changes') if update_fields: instance.save(update_fields=update_fields) logger.info( "Page edited: \"%s\" id=%d revision_id=%d", instance.title, instance.id, new_revision.id ) if submitted_for_moderation: logger.info( "Page submitted for moderation: \"%s\" id=%d revision_id=%d", instance.title, instance.id, new_revision.id ) return new_revision Page = apps.get_model('wagtailcore', 'Page') Page.add_to_class('rollback', page_rollback)
<SYSTEM_TASK:> Returns list of related Entry instances for specified page. <END_TASK> <USER_TASK:> Description: def get_related(page): """ Returns list of related Entry instances for specified page. :param page: the page instance. :rtype: list. """
related = [] entry = Entry.get_for_model(page) if entry: related = entry.related return related
<SYSTEM_TASK:> Returns admin URL for specified entry instance. <END_TASK> <USER_TASK:> Description: def get_related_entry_admin_url(entry): """ Returns admin URL for specified entry instance. :param entry: the entry instance. :return: str. """
namespaces = { Document: 'wagtaildocs:edit', Link: 'wagtaillinks:edit', Page: 'wagtailadmin_pages:edit', } for cls, url in namespaces.iteritems(): if issubclass(entry.content_type.model_class(), cls): return urlresolvers.reverse(url, args=(entry.object_id,)) return ''
<SYSTEM_TASK:> Given a dictionary of attributes, find the corresponding link instance and <END_TASK> <USER_TASK:> Description: def expand_db_attributes(attrs, for_editor): """ Given a dictionary of attributes, find the corresponding link instance and return its HTML representation. :param attrs: dictionary of link attributes. :param for_editor: whether or not HTML is for editor. :rtype: str. """
try: editor_attrs = '' link = Link.objects.get(id=attrs['id']) if for_editor: editor_attrs = 'data-linktype="link" data-id="{0}" '.format( link.id ) return '<a {0}href="{1}" title="{2}">'.format( editor_attrs, escape(link.get_absolute_url()), link.title ) except Link.DoesNotExist: return '<a>'
<SYSTEM_TASK:> Index documents of a specified mapping type. <END_TASK> <USER_TASK:> Description: def index_objects(mapping_type, ids, chunk_size=100, es=None, index=None): """Index documents of a specified mapping type. This allows for asynchronous indexing. If a mapping_type extends Indexable, you can add a ``post_save`` hook for the model that it's based on like this:: @receiver(dbsignals.post_save, sender=MyModel) def update_in_index(sender, instance, **kw): from elasticutils.contrib.django import tasks tasks.index_objects.delay(MyMappingType, [instance.id]) :arg mapping_type: the mapping type for these ids :arg ids: the list of ids of things to index :arg chunk_size: the size of the chunk for bulk indexing .. Note:: The default chunk_size is 100. The number of documents you can bulk index at once depends on the size of the documents. :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `mapping_type.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `mapping_type.get_index()`. """
if settings.ES_DISABLED: return log.debug('Indexing objects {0}-{1}. [{2}]'.format( ids[0], ids[-1], len(ids))) # Get the model this mapping type is based on. model = mapping_type.get_model() # Retrieve all the objects that we're going to index and do it in # bulk. for id_list in chunked(ids, chunk_size): documents = [] for obj in model.objects.filter(id__in=id_list): try: documents.append(mapping_type.extract_document(obj.id, obj)) except Exception as exc: log.exception('Unable to extract document {0}: {1}'.format( obj, repr(exc))) if documents: mapping_type.bulk_index(documents, id_field='id', es=es, index=index)
<SYSTEM_TASK:> Remove documents of a specified mapping_type from the index. <END_TASK> <USER_TASK:> Description: def unindex_objects(mapping_type, ids, es=None, index=None): """Remove documents of a specified mapping_type from the index. This allows for asynchronous deleting. If a mapping_type extends Indexable, you can add a ``pre_delete`` hook for the model that it's based on like this:: @receiver(dbsignals.pre_delete, sender=MyModel) def remove_from_index(sender, instance, **kw): from elasticutils.contrib.django import tasks tasks.unindex_objects.delay(MyMappingType, [instance.id]) :arg mapping_type: the mapping type for these ids :arg ids: the list of ids of things to remove :arg es: The `Elasticsearch` to use. If you don't specify an `Elasticsearch`, it'll use `mapping_type.get_es()`. :arg index: The name of the index to use. If you don't specify one it'll use `mapping_type.get_index()`. """
if settings.ES_DISABLED: return for id_ in ids: mapping_type.unindex(id_, es=es, index=index)
<SYSTEM_TASK:> Create the cipher object to encrypt or decrypt a payload. <END_TASK> <USER_TASK:> Description: def _create_cipher(self, password, salt, IV): """ Create the cipher object to encrypt or decrypt a payload. """
from Crypto.Protocol.KDF import PBKDF2 from Crypto.Cipher import AES pw = PBKDF2(password, salt, dkLen=self.block_size) return AES.new(pw[:self.block_size], AES.MODE_CFB, IV)
<SYSTEM_TASK:> Initialize a new password file and set the reference password. <END_TASK> <USER_TASK:> Description: def _init_file(self): """ Initialize a new password file and set the reference password. """
self.keyring_key = self._get_new_password() # set a reference password, used to check that the password provided # matches for subsequent checks. self.set_password('keyring-setting', 'password reference', 'password reference value') self._write_config_value('keyring-setting', 'scheme', self.scheme) self._write_config_value('keyring-setting', 'version', self.version)
<SYSTEM_TASK:> Check if the file exists and has the expected password reference. <END_TASK> <USER_TASK:> Description: def _check_file(self): """ Check if the file exists and has the expected password reference. """
if not os.path.exists(self.file_path): return False self._migrate() config = configparser.RawConfigParser() config.read(self.file_path) try: config.get( escape_for_ini('keyring-setting'), escape_for_ini('password reference'), ) except (configparser.NoSectionError, configparser.NoOptionError): return False try: self._check_scheme(config) except AttributeError: # accept a missing scheme return True return self._check_version(config)
<SYSTEM_TASK:> check for a valid version <END_TASK> <USER_TASK:> Description: def _check_version(self, config): """ check for a valid version an existing scheme implies an existing version as well return True, if version is valid, and False otherwise """
try: self.file_version = config.get( escape_for_ini('keyring-setting'), escape_for_ini('version'), ) except (configparser.NoSectionError, configparser.NoOptionError): return False return True
<SYSTEM_TASK:> Unlock this keyring by getting the password for the keyring from the <END_TASK> <USER_TASK:> Description: def _unlock(self): """ Unlock this keyring by getting the password for the keyring from the user. """
self.keyring_key = getpass.getpass( 'Please enter password for encrypted keyring: ') try: ref_pw = self.get_password('keyring-setting', 'password reference') assert ref_pw == 'password reference value' except AssertionError: self._lock() raise ValueError("Incorrect Password")
<SYSTEM_TASK:> Returns form class to use in the view. <END_TASK> <USER_TASK:> Description: def get_form_class(self): """ Returns form class to use in the view. :rtype: django.forms.ModelForm. """
if self.object.link_type == Link.LINK_TYPE_EMAIL: return EmailLinkForm elif self.object.link_type == Link.LINK_TYPE_EXTERNAL: return ExternalLinkForm return None
<SYSTEM_TASK:> Write the password to the registry <END_TASK> <USER_TASK:> Description: def set_password(self, service, username, password): """Write the password to the registry """
# encrypt the password password_encrypted = _win_crypto.encrypt(password.encode('utf-8')) # encode with base64 password_base64 = base64.encodestring(password_encrypted) # encode again to unicode password_saved = password_base64.decode('ascii') # store the password key_name = self._key_for_service(service) hkey = winreg.CreateKey(winreg.HKEY_CURRENT_USER, key_name) winreg.SetValueEx(hkey, username, 0, winreg.REG_SZ, password_saved)
<SYSTEM_TASK:> Encrypt the password. <END_TASK> <USER_TASK:> Description: def encrypt(self, password): """Encrypt the password. """
if not password or not self._crypter: return password or b'' return self._crypter.encrypt(password)
<SYSTEM_TASK:> Open the password file in the specified mode <END_TASK> <USER_TASK:> Description: def _open(self, mode='r'): """Open the password file in the specified mode """
open_file = None writeable = 'w' in mode or 'a' in mode or '+' in mode try: # NOTE: currently the MemOpener does not split off any filename # which causes errors on close() # so we add a dummy name and open it separately if (self.filename.startswith('mem://') or self.filename.startswith('ram://')): open_file = fs.opener.fsopendir(self.filename).open('kr.cfg', mode) else: if not hasattr(self, '_pyfs'): # reuse the pyfilesystem and path self._pyfs, self._path = fs.opener.opener.parse( self.filename, writeable=writeable) # cache if permitted if self._cache_timeout is not None: self._pyfs = fs.remote.CacheFS( self._pyfs, cache_timeout=self._cache_timeout) open_file = self._pyfs.open(self._path, mode) except fs.errors.ResourceNotFoundError: if self._can_create: segments = fs.opener.opener.split_segments(self.filename) if segments: # this seems broken, but pyfilesystem uses it, so we must fs_name, credentials, url1, url2, path = segments.groups() assert fs_name, 'Should be a remote filesystem' host = '' # allow for domain:port if ':' in url2: split_url2 = url2.split('/', 1) if len(split_url2) > 1: url2 = split_url2[1] else: url2 = '' host = split_url2[0] pyfs = fs.opener.opener.opendir( '%s://%s' % (fs_name, host)) # cache if permitted if self._cache_timeout is not None: pyfs = fs.remote.CacheFS( pyfs, cache_timeout=self._cache_timeout) # NOTE: fs.path.split does not function in the same # way os os.path.split... at least under windows url2_path, url2_filename = os.path.split(url2) if url2_path and not pyfs.exists(url2_path): pyfs.makedir(url2_path, recursive=True) else: # assume local filesystem full_url = fs.opener._expand_syspath(self.filename) # NOTE: fs.path.split does not function in the same # way os os.path.split... at least under windows url2_path, url2 = os.path.split(full_url) pyfs = fs.osfs.OSFS(url2_path) try: # reuse the pyfilesystem and path self._pyfs = pyfs self._path = url2 return pyfs.open(url2, mode) except fs.errors.ResourceNotFoundError: if writeable: raise else: pass # NOTE: ignore read errors as the underlying caller can fail safely if writeable: raise else: pass return open_file
<SYSTEM_TASK:> load the passwords from the config file <END_TASK> <USER_TASK:> Description: def config(self): """load the passwords from the config file """
if not hasattr(self, '_config'): raw_config = configparser.RawConfigParser() f = self._open() if f: raw_config.readfp(f) f.close() self._config = raw_config return self._config
<SYSTEM_TASK:> Returns queryset limited to categories with live Entry instances. <END_TASK> <USER_TASK:> Description: def get_queryset(self): """ Returns queryset limited to categories with live Entry instances. :rtype: django.db.models.query.QuerySet. """
queryset = super(LiveEntryCategoryManager, self).get_queryset() return queryset.filter(tag__in=[ entry_tag.tag for entry_tag in EntryTag.objects.filter(entry__live=True) ])
<SYSTEM_TASK:> Returns queryset of Entry instances assigned to specified <END_TASK> <USER_TASK:> Description: def get_for_tag(self, tag): """ Returns queryset of Entry instances assigned to specified tag, which can be a PK value, a slug value, or a Tag instance. :param tag: tag PK, slug, or instance. :rtype: django.db.models.query.QuerySet. """
tag_filter = {'tag': tag} if isinstance(tag, six.integer_types): tag_filter = {'tag_id': tag} elif isinstance(tag, str): tag_filter = {'tag__slug': tag} return self.filter(id__in=[ entry_tag.entry_id for entry_tag in EntryTag.objects.filter(**tag_filter) ])
<SYSTEM_TASK:> Returns queryset of EntryTag instances for specified category. <END_TASK> <USER_TASK:> Description: def for_category(self, category, live_only=False): """ Returns queryset of EntryTag instances for specified category. :param category: the Category instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet. """
filters = {'tag': category.tag} if live_only: filters.update({'entry__live': True}) return self.filter(**filters)
<SYSTEM_TASK:> Returns queryset of Entry instances related to specified <END_TASK> <USER_TASK:> Description: def related_to(self, entry, live_only=False): """ Returns queryset of Entry instances related to specified Entry instance. :param entry: the Entry instance. :param live_only: flag to include only "live" entries. :rtype: django.db.models.query.QuerySet. """
filters = {'tag__in': entry.tags} if live_only: filters.update({'entry__live': True}) return self.filter(**filters).exclude(entry=entry)
<SYSTEM_TASK:> Returns a ChosenView class that extends specified chooser class. <END_TASK> <USER_TASK:> Description: def chosen_view_factory(chooser_cls): """ Returns a ChosenView class that extends specified chooser class. :param chooser_cls: the class to extend. :rtype: class. """
class ChosenView(chooser_cls): #noinspection PyUnusedLocal def get(self, request, *args, **kwargs): """ Returns GET response. :param request: the request instance. :rtype: django.http.HttpResponse. """ #noinspection PyAttributeOutsideInit self.object = self.get_object() return render_modal_workflow( self.request, None, '{0}/chosen.js'.format(self.template_dir), {'obj': self.get_json(self.object)} ) def get_object(self, queryset=None): """ Returns chosen object instance. :param queryset: the queryset instance. :rtype: django.db.models.Model. """ if queryset is None: queryset = self.get_queryset() pk = self.kwargs.get('pk', None) try: return queryset.get(pk=pk) except self.models.DoesNotExist: raise Http404() def post(self, request, *args, **kwargs): """ Returns POST response. :param request: the request instance. :rtype: django.http.HttpResponse. """ return self.get(request, *args, **kwargs) return ChosenView
<SYSTEM_TASK:> Creates EntryTag for Entry corresponding to specified <END_TASK> <USER_TASK:> Description: def create_entry_tag(sender, instance, created, **kwargs): """ Creates EntryTag for Entry corresponding to specified ItemBase instance. :param sender: the sending ItemBase class. :param instance: the ItemBase instance. """
from ..models import ( Entry, EntryTag ) entry = Entry.objects.get_for_model(instance.content_object)[0] tag = instance.tag if not EntryTag.objects.filter(tag=tag, entry=entry).exists(): EntryTag.objects.create(tag=tag, entry=entry)
<SYSTEM_TASK:> Deletes EntryTag for Entry corresponding to specified <END_TASK> <USER_TASK:> Description: def delete_entry_tag(sender, instance, **kwargs): """ Deletes EntryTag for Entry corresponding to specified TaggedItemBase instance. :param sender: the sending TaggedItemBase class. :param instance: the TaggedItemBase instance. """
from ..models import ( Entry, EntryTag ) entry = Entry.objects.get_for_model(instance.content_object)[0] tag = instance.tag EntryTag.objects.filter(tag=tag, entry=entry).delete()
<SYSTEM_TASK:> Deletes Entry instance corresponding to specified instance. <END_TASK> <USER_TASK:> Description: def delete_entry(sender, instance, **kwargs): """ Deletes Entry instance corresponding to specified instance. :param sender: the sending class. :param instance: the instance being deleted. """
from ..models import Entry Entry.objects.get_for_model(instance)[0].delete()
<SYSTEM_TASK:> Updates attributes for Entry instance corresponding to <END_TASK> <USER_TASK:> Description: def update_entry_attributes(sender, instance, **kwargs): """ Updates attributes for Entry instance corresponding to specified instance. :param sender: the sending class. :param instance: the instance being saved. """
from ..models import Entry entry = Entry.objects.get_for_model(instance)[0] default_url = getattr(instance, 'get_absolute_url', '') entry.title = getattr(instance, 'title', str(instance)) entry.url = getattr(instance, 'url', default_url) entry.live = bool(getattr(instance, 'live', True)) entry.save()
<SYSTEM_TASK:> Returns paginated queryset of PageRevision instances for <END_TASK> <USER_TASK:> Description: def get_revisions(page, page_num=1): """ Returns paginated queryset of PageRevision instances for specified Page instance. :param page: the page instance. :param page_num: the pagination page number. :rtype: django.db.models.query.QuerySet. """
revisions = page.revisions.order_by('-created_at') current = page.get_latest_revision() if current: revisions.exclude(id=current.id) paginator = Paginator(revisions, 5) try: revisions = paginator.page(page_num) except PageNotAnInteger: revisions = paginator.page(1) except EmptyPage: revisions = paginator.page(paginator.num_pages) return revisions
<SYSTEM_TASK:> Returns GET response for specified page revisions. <END_TASK> <USER_TASK:> Description: def page_revisions(request, page_id, template_name='wagtailrollbacks/edit_handlers/revisions.html'): """ Returns GET response for specified page revisions. :param request: the request instance. :param page_id: the page ID. :param template_name: the template name. :rtype: django.http.HttpResponse. """
page = get_object_or_404(Page, pk=page_id) page_perms = page.permissions_for_user(request.user) if not page_perms.can_edit(): raise PermissionDenied page_num = request.GET.get('p', 1) revisions = get_revisions(page, page_num) return render( request, template_name, { 'page': page, 'revisions': revisions, 'p': page_num, } )
<SYSTEM_TASK:> Returns GET response for specified page preview. <END_TASK> <USER_TASK:> Description: def preview_page_version(request, revision_id): """ Returns GET response for specified page preview. :param request: the request instance. :param reversion_pk: the page revision ID. :rtype: django.http.HttpResponse. """
revision = get_object_or_404(PageRevision, pk=revision_id) if not revision.page.permissions_for_user(request.user).can_publish(): raise PermissionDenied page = revision.as_page_object() request.revision_id = revision_id return page.serve_preview(request, page.default_preview_mode)
<SYSTEM_TASK:> Helper to actually write the keyring to Google <END_TASK> <USER_TASK:> Description: def _save_keyring(self, keyring_dict): """Helper to actually write the keyring to Google"""
import gdata result = self.OK file_contents = base64.urlsafe_b64encode(pickle.dumps(keyring_dict)) try: if self.docs_entry: extra_headers = {'Content-Type': 'text/plain', 'Content-Length': len(file_contents)} self.docs_entry = self.client.Put( file_contents, self.docs_entry.GetEditMediaLink().href, extra_headers=extra_headers ) else: from gdata.docs.service import DocumentQuery # check for existence of folder, create if required folder_query = DocumentQuery(categories=['folder']) folder_query['title'] = self.collection folder_query['title-exact'] = 'true' docs = self.client.QueryDocumentListFeed(folder_query.ToUri()) if docs.entry: folder_entry = docs.entry[0] else: folder_entry = self.client.CreateFolder(self.collection) file_handle = io.BytesIO(file_contents) media_source = gdata.MediaSource( file_handle=file_handle, content_type='text/plain', content_length=len(file_contents), file_name='temp') self.docs_entry = self.client.Upload( media_source, self._get_doc_title(), folder_or_uri=folder_entry ) except gdata.service.RequestError as ex: try: if ex.message['reason'].lower().find('conflict') != -1: result = self.CONFLICT else: # Google docs has a bug when updating a shared document # using PUT from any account other that the owner. # It returns an error 400 "Sorry, there was an error saving # the file. Please try again" # *despite* actually updating the document! # Workaround by re-reading to see if it actually updated msg = 'Sorry, there was an error saving the file' if ex.message['body'].find(msg) != -1: new_docs_entry, new_keyring_dict = self._read() if new_keyring_dict == keyring_dict: result = self.OK else: result = self.FAIL else: result = self.FAIL except Exception: result = self.FAIL return result
<SYSTEM_TASK:> Make HTTP request, raising an exception if it fails. <END_TASK> <USER_TASK:> Description: def _request(method, url, session=None, **kwargs): """Make HTTP request, raising an exception if it fails. """
url = BASE_URL + url if session: request_func = getattr(session, method) else: request_func = getattr(requests, method) response = request_func(url, **kwargs) # raise an exception if request is not successful if not response.status_code == requests.codes.ok: raise DweepyError('HTTP {0} response'.format(response.status_code)) response_json = response.json() if response_json['this'] == 'failed': raise DweepyError(response_json['because']) return response_json['with']
<SYSTEM_TASK:> Send a dweet to dweet.io for a thing with a known name <END_TASK> <USER_TASK:> Description: def dweet_for(thing_name, payload, key=None, session=None): """Send a dweet to dweet.io for a thing with a known name """
if key is not None: params = {'key': key} else: params = None return _send_dweet(payload, '/dweet/for/{0}'.format(thing_name), params=params, session=session)
<SYSTEM_TASK:> Read all the dweets for a dweeter <END_TASK> <USER_TASK:> Description: def get_dweets_for(thing_name, key=None, session=None): """Read all the dweets for a dweeter """
if key is not None: params = {'key': key} else: params = None return _request('get', '/get/dweets/for/{0}'.format(thing_name), params=params, session=None)
<SYSTEM_TASK:> Remove an alert for the given thing <END_TASK> <USER_TASK:> Description: def remove_alert(thing_name, key, session=None): """Remove an alert for the given thing """
return _request('get', '/remove/alert/for/{0}'.format(thing_name), params={'key': key}, session=session)
<SYSTEM_TASK:> list all product sets for current user <END_TASK> <USER_TASK:> Description: def get_product_sets(self): """ list all product sets for current user """
# ensure we are using api url without a specific product set id api_url = super(ProductSetAPI, self).base_url return self.client.get(api_url)
<SYSTEM_TASK:> Check if the timeout has been reached and raise a `StopIteration` if so. <END_TASK> <USER_TASK:> Description: def _check_stream_timeout(started, timeout): """Check if the timeout has been reached and raise a `StopIteration` if so. """
if timeout: elapsed = datetime.datetime.utcnow() - started if elapsed.seconds > timeout: raise StopIteration
<SYSTEM_TASK:> Yields dweets as received from dweet.io's streaming API <END_TASK> <USER_TASK:> Description: def _listen_for_dweets_from_response(response): """Yields dweets as received from dweet.io's streaming API """
streambuffer = '' for byte in response.iter_content(): if byte: streambuffer += byte.decode('ascii') try: dweet = json.loads(streambuffer.splitlines()[1]) except (IndexError, ValueError): continue if isstr(dweet): yield json.loads(dweet) streambuffer = ''
<SYSTEM_TASK:> Executes a `packer build` <END_TASK> <USER_TASK:> Description: def build(self, parallel=True, debug=False, force=False, machine_readable=False): """Executes a `packer build` :param bool parallel: Run builders in parallel :param bool debug: Run in debug mode :param bool force: Force artifact output even if exists :param bool machine_readable: Make output machine-readable """
self.packer_cmd = self.packer.build self._add_opt('-parallel=true' if parallel else None) self._add_opt('-debug' if debug else None) self._add_opt('-force' if force else None) self._add_opt('-machine-readable' if machine_readable else None) self._append_base_arguments() self._add_opt(self.packerfile) return self.packer_cmd()
<SYSTEM_TASK:> Implements the `packer fix` function <END_TASK> <USER_TASK:> Description: def fix(self, to_file=None): """Implements the `packer fix` function :param string to_file: File to output fixed template to """
self.packer_cmd = self.packer.fix self._add_opt(self.packerfile) result = self.packer_cmd() if to_file: with open(to_file, 'w') as f: f.write(result.stdout.decode()) result.fixed = json.loads(result.stdout.decode()) return result
<SYSTEM_TASK:> Implmenets the `packer push` function <END_TASK> <USER_TASK:> Description: def push(self, create=True, token=False): """Implmenets the `packer push` function UNTESTED! Must be used alongside an Atlas account """
self.packer_cmd = self.packer.push self._add_opt('-create=true' if create else None) self._add_opt('-tokn={0}'.format(token) if token else None) self._add_opt(self.packerfile) return self.packer_cmd()
<SYSTEM_TASK:> Appends base arguments to packer commands. <END_TASK> <USER_TASK:> Description: def _append_base_arguments(self): """Appends base arguments to packer commands. -except, -only, -var and -var-file are appeneded to almost all subcommands in packer. As such this can be called to add these flags to the subcommand. """
if self.exc and self.only: raise PackerException('Cannot provide both "except" and "only"') elif self.exc: self._add_opt('-except={0}'.format(self._join_comma(self.exc))) elif self.only: self._add_opt('-only={0}'.format(self._join_comma(self.only))) for var, value in self.vars.items(): self._add_opt("-var") self._add_opt("{0}={1}".format(var, value)) if self.var_file: self._add_opt('-var-file={0}'.format(self.var_file))
<SYSTEM_TASK:> Parses the machine-readable output `packer inspect` provides. <END_TASK> <USER_TASK:> Description: def _parse_inspection_output(self, output): """Parses the machine-readable output `packer inspect` provides. See the inspect method for more info. This has been tested vs. Packer v0.7.5 """
parts = {'variables': [], 'builders': [], 'provisioners': []} for line in output.splitlines(): line = line.split(',') if line[2].startswith('template'): del line[0:2] component = line[0] if component == 'template-variable': variable = {"name": line[1], "value": line[2]} parts['variables'].append(variable) elif component == 'template-builder': builder = {"name": line[1], "type": line[2]} parts['builders'].append(builder) elif component == 'template-provisioner': provisioner = {"type": line[1]} parts['provisioners'].append(provisioner) return parts
<SYSTEM_TASK:> Perform an HTTP POST request for a given url. <END_TASK> <USER_TASK:> Description: def post(self, url, data, headers=None): """ Perform an HTTP POST request for a given url. Returns the response object. """
return self._request('POST', url, data, headers=headers)
<SYSTEM_TASK:> Perform an HTTP PUT request for a given url. <END_TASK> <USER_TASK:> Description: def put(self, url, data, headers=None): """ Perform an HTTP PUT request for a given url. Returns the response object. """
return self._request('PUT', url, data, headers=headers)
<SYSTEM_TASK:> Query a fulltext index by key and query or just a plain Lucene query, <END_TASK> <USER_TASK:> Description: def query(self, *args): """ Query a fulltext index by key and query or just a plain Lucene query, i1 = gdb.nodes.indexes.get('people',type='fulltext', provider='lucene') i1.query('name','do*') i1.query('name:do*') In this example, the last two line are equivalent. """
if not args or len(args) > 2: raise TypeError('query() takes 2 or 3 arguments (a query or a key ' 'and a query) (%d given)' % (len(args) + 1)) elif len(args) == 1: query, = args return self.get('text').query(text_type(query)) else: key, query = args index_key = self.get(key) if isinstance(query, string_types): return index_key.query(query) else: if query.fielded: raise ValueError('Queries with an included key should ' 'not include a field.') return index_key.query(text_type(query))
<SYSTEM_TASK:> Send an HTTP request to the REST API. <END_TASK> <USER_TASK:> Description: def do_call(self, path, method, body=None, headers=None): """ Send an HTTP request to the REST API. :param string path: A URL :param string method: The HTTP method (GET, POST, etc.) to use in the request. :param string body: A string representing any data to be sent in the body of the HTTP request. :param dictionary headers: "{header-name: header-value}" dictionary. """
url = urljoin(self.base_url, path) try: resp = requests.request(method, url, data=body, headers=headers, auth=self.auth, timeout=self.timeout) except requests.exceptions.Timeout as out: raise NetworkError("Timeout while trying to connect to RabbitMQ") except requests.exceptions.RequestException as err: # All other requests exceptions inherit from RequestException raise NetworkError("Error during request %s %s" % (type(err), err)) try: content = resp.json() except ValueError as out: content = None # 'success' HTTP status codes are 200-206 if resp.status_code < 200 or resp.status_code > 206: raise HTTPError(content, resp.status_code, resp.text, path, body) else: if content: return content else: return None
<SYSTEM_TASK:> Wrapper around http.do_call that transforms some HTTPError into <END_TASK> <USER_TASK:> Description: def _call(self, path, method, body=None, headers=None): """ Wrapper around http.do_call that transforms some HTTPError into our own exceptions """
try: resp = self.http.do_call(path, method, body, headers) except http.HTTPError as err: if err.status == 401: raise PermissionError('Insufficient permissions to query ' + '%s with user %s :%s' % (path, self.user, err)) raise return resp
<SYSTEM_TASK:> A convenience function used in the event that you need to confirm that <END_TASK> <USER_TASK:> Description: def get_whoami(self): """ A convenience function used in the event that you need to confirm that the broker thinks you are who you think you are. :returns dict whoami: Dict structure contains: * administrator: whether the user is has admin privileges * name: user name * auth_backend: backend used to determine admin rights """
path = Client.urls['whoami'] whoami = self._call(path, 'GET') return whoami
<SYSTEM_TASK:> A convenience function for getting back only the vhost names instead of <END_TASK> <USER_TASK:> Description: def get_vhost_names(self): """ A convenience function for getting back only the vhost names instead of the larger vhost dicts. :returns list vhost_names: A list of just the vhost names. """
vhosts = self.get_all_vhosts() vhost_names = [i['name'] for i in vhosts] return vhost_names
<SYSTEM_TASK:> Returns the attributes of a single named vhost in a dict. <END_TASK> <USER_TASK:> Description: def get_vhost(self, vname): """ Returns the attributes of a single named vhost in a dict. :param string vname: Name of the vhost to get. :returns dict vhost: Attribute dict for the named vhost """
vname = quote(vname, '') path = Client.urls['vhosts_by_name'] % vname vhost = self._call(path, 'GET', headers=Client.json_headers) return vhost
<SYSTEM_TASK:> Creates a vhost on the server to house exchanges. <END_TASK> <USER_TASK:> Description: def create_vhost(self, vname): """ Creates a vhost on the server to house exchanges. :param string vname: The name to give to the vhost on the server :returns: boolean """
vname = quote(vname, '') path = Client.urls['vhosts_by_name'] % vname return self._call(path, 'PUT', headers=Client.json_headers)
<SYSTEM_TASK:> Deletes a vhost from the server. Note that this also deletes any <END_TASK> <USER_TASK:> Description: def delete_vhost(self, vname): """ Deletes a vhost from the server. Note that this also deletes any exchanges or queues that belong to this vhost. :param string vname: Name of the vhost to delete from the server. """
vname = quote(vname, '') path = Client.urls['vhosts_by_name'] % vname return self._call(path, 'DELETE')
<SYSTEM_TASK:> Set permissions for a given username on a given vhost. Both <END_TASK> <USER_TASK:> Description: def set_vhost_permissions(self, vname, username, config, rd, wr): """ Set permissions for a given username on a given vhost. Both must already exist. :param string vname: Name of the vhost to set perms on. :param string username: User to set permissions for. :param string config: Permission pattern for configuration operations for this user in this vhost. :param string rd: Permission pattern for read operations for this user in this vhost :param string wr: Permission pattern for write operations for this user in this vhost. Permission patterns are regex strings. If you're unfamiliar with this, you should definitely check out this section of the RabbitMQ docs: http://www.rabbitmq.com/admin-guide.html#access-control """
vname = quote(vname, '') body = json.dumps({"configure": config, "read": rd, "write": wr}) path = Client.urls['vhost_permissions'] % (vname, username) return self._call(path, 'PUT', body, headers=Client.json_headers)
<SYSTEM_TASK:> Delete permission for a given username on a given vhost. Both <END_TASK> <USER_TASK:> Description: def delete_permission(self, vname, username): """ Delete permission for a given username on a given vhost. Both must already exist. :param string vname: Name of the vhost to set perms on. :param string username: User to set permissions for. """
vname = quote(vname, '') path = Client.urls['vhost_permissions'] % (vname, username) return self._call(path, 'DELETE')
<SYSTEM_TASK:> Gets a single exchange which requires a vhost and name. <END_TASK> <USER_TASK:> Description: def get_exchange(self, vhost, name): """ Gets a single exchange which requires a vhost and name. :param string vhost: The vhost containing the target exchange :param string name: The name of the exchange :returns: dict """
vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['exchange_by_name'] % (vhost, name) exch = self._call(path, 'GET') return exch
<SYSTEM_TASK:> Delete the named exchange from the named vhost. The API returns a 204 <END_TASK> <USER_TASK:> Description: def delete_exchange(self, vhost, name): """ Delete the named exchange from the named vhost. The API returns a 204 on success, in which case this method returns True, otherwise the error is raised. :param string vhost: Vhost where target exchange was created :param string name: The name of the exchange to delete. :returns bool: True on success. """
vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['exchange_by_name'] % (vhost, name) self._call(path, 'DELETE') return True
<SYSTEM_TASK:> Get all queues, or all queues in a vhost if vhost is not None. <END_TASK> <USER_TASK:> Description: def get_queues(self, vhost=None): """ Get all queues, or all queues in a vhost if vhost is not None. Returns a list. :param string vhost: The virtual host to list queues for. If This is None (the default), all queues for the broker instance are returned. :returns: A list of dicts, each representing a queue. :rtype: list of dicts """
if vhost: vhost = quote(vhost, '') path = Client.urls['queues_by_vhost'] % vhost else: path = Client.urls['all_queues'] queues = self._call(path, 'GET') return queues or list()
<SYSTEM_TASK:> Get a single queue, which requires both vhost and name. <END_TASK> <USER_TASK:> Description: def get_queue(self, vhost, name): """ Get a single queue, which requires both vhost and name. :param string vhost: The virtual host for the queue being requested. If the vhost is '/', note that it will be translated to '%2F' to conform to URL encoding requirements. :param string name: The name of the queue being requested. :returns: A dictionary of queue properties. :rtype: dict """
vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['queues_by_name'] % (vhost, name) queue = self._call(path, 'GET') return queue
<SYSTEM_TASK:> Get the number of messages currently sitting in either the queue <END_TASK> <USER_TASK:> Description: def get_queue_depths(self, vhost, names=None): """ Get the number of messages currently sitting in either the queue names listed in 'names', or all queues in 'vhost' if no 'names' are given. :param str vhost: Vhost where queues in 'names' live. :param list names: OPTIONAL - Specific queues to show depths for. If None, show depths for all queues in 'vhost'. """
vhost = quote(vhost, '') if not names: # get all queues in vhost path = Client.urls['queues_by_vhost'] % vhost queues = self._call(path, 'GET') for queue in queues: depth = queue['messages'] print("\t%s: %s" % (queue, depth)) else: # get the named queues only. for name in names: depth = self.get_queue_depth(vhost, name) print("\t%s: %s" % (name, depth))
<SYSTEM_TASK:> Purge all messages from one or more queues. <END_TASK> <USER_TASK:> Description: def purge_queues(self, queues): """ Purge all messages from one or more queues. :param list queues: A list of ('qname', 'vhost') tuples. :returns: True on success """
for name, vhost in queues: vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['purge_queue'] % (vhost, name) self._call(path, 'DELETE') return True
<SYSTEM_TASK:> Purge all messages from a single queue. This is a convenience method <END_TASK> <USER_TASK:> Description: def purge_queue(self, vhost, name): """ Purge all messages from a single queue. This is a convenience method so you aren't forced to supply a list containing a single tuple to the purge_queues method. :param string vhost: The vhost of the queue being purged. :param string name: The name of the queue being purged. :rtype: None """
vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['purge_queue'] % (vhost, name) return self._call(path, 'DELETE')
<SYSTEM_TASK:> Create a queue. The API documentation specifies that all of the body <END_TASK> <USER_TASK:> Description: def create_queue(self, vhost, name, **kwargs): """ Create a queue. The API documentation specifies that all of the body elements are optional, so this method only requires arguments needed to form the URI :param string vhost: The vhost to create the queue in. :param string name: The name of the queue More on these operations can be found at: http://www.rabbitmq.com/amqp-0-9-1-reference.html """
vhost = quote(vhost, '') name = quote(name, '') path = Client.urls['queues_by_name'] % (vhost, name) body = json.dumps(kwargs) return self._call(path, 'PUT', body, headers=Client.json_headers)
<SYSTEM_TASK:> Deletes the named queue from the named vhost. <END_TASK> <USER_TASK:> Description: def delete_queue(self, vhost, qname): """ Deletes the named queue from the named vhost. :param string vhost: Vhost housing the queue to be deleted. :param string qname: Name of the queue to delete. Note that if you just want to delete the messages from a queue, you should use purge_queue instead of deleting/recreating a queue. """
vhost = quote(vhost, '') qname = quote(qname, '') path = Client.urls['queues_by_name'] % (vhost, qname) return self._call(path, 'DELETE', headers=Client.json_headers)
<SYSTEM_TASK:> Get a connection by name. To get the names, use get_connections. <END_TASK> <USER_TASK:> Description: def get_connection(self, name): """ Get a connection by name. To get the names, use get_connections. :param string name: Name of connection to get :returns dict conn: A connection attribute dictionary. """
name = quote(name, '') path = Client.urls['connections_by_name'] % name conn = self._call(path, 'GET') return conn
<SYSTEM_TASK:> Close the named connection. The API returns a 204 on success, <END_TASK> <USER_TASK:> Description: def delete_connection(self, name): """ Close the named connection. The API returns a 204 on success, in which case this method returns True, otherwise the error is raised. :param string name: The name of the connection to delete. :returns bool: True on success. """
name = quote(name, '') path = Client.urls['connections_by_name'] % name self._call(path, 'DELETE') return True
<SYSTEM_TASK:> Get a channel by name. To get the names, use get_channels. <END_TASK> <USER_TASK:> Description: def get_channel(self, name): """ Get a channel by name. To get the names, use get_channels. :param string name: Name of channel to get :returns dict conn: A channel attribute dictionary. """
name = quote(name, '') path = Client.urls['channels_by_name'] % name chan = self._call(path, 'GET') return chan
<SYSTEM_TASK:> Creates a binding between an exchange and a queue on a given vhost. <END_TASK> <USER_TASK:> Description: def create_binding(self, vhost, exchange, queue, rt_key=None, args=None): """ Creates a binding between an exchange and a queue on a given vhost. :param string vhost: vhost housing the exchange/queue to bind :param string exchange: the target exchange of the binding :param string queue: the queue to bind to the exchange :param string rt_key: the routing key to use for the binding :param list args: extra arguments to associate w/ the binding. :returns: boolean """
vhost = quote(vhost, '') exchange = quote(exchange, '') queue = quote(queue, '') body = json.dumps({'routing_key': rt_key, 'arguments': args or []}) path = Client.urls['bindings_between_exch_queue'] % (vhost, exchange, queue) binding = self._call(path, 'POST', body=body, headers=Client.json_headers) return binding
<SYSTEM_TASK:> Deletes a binding between an exchange and a queue on a given vhost. <END_TASK> <USER_TASK:> Description: def delete_binding(self, vhost, exchange, queue, rt_key): """ Deletes a binding between an exchange and a queue on a given vhost. :param string vhost: vhost housing the exchange/queue to bind :param string exchange: the target exchange of the binding :param string queue: the queue to bind to the exchange :param string rt_key: the routing key to use for the binding """
vhost = quote(vhost, '') exchange = quote(exchange, '') queue = quote(queue, '') body = '' path = Client.urls['rt_bindings_between_exch_queue'] % (vhost, exchange, queue, rt_key) return self._call(path, 'DELETE', headers=Client.json_headers)