text_prompt
stringlengths
100
17.7k
code_prompt
stringlengths
7
9.86k
<SYSTEM_TASK:> Key used for storing search docs in local cache. <END_TASK> <USER_TASK:> Description: def search_document_cache_key(self): """Key used for storing search docs in local cache."""
return "elasticsearch_django:{}.{}.{}".format( self._meta.app_label, self._meta.model_name, self.pk )
<SYSTEM_TASK:> Return True if the field can be serialized into a JSON doc. <END_TASK> <USER_TASK:> Description: def _is_field_serializable(self, field_name): """Return True if the field can be serialized into a JSON doc."""
return ( self._meta.get_field(field_name).get_internal_type() in self.SIMPLE_UPDATE_FIELD_TYPES )
<SYSTEM_TASK:> Clean the list of update_fields based on the index being updated.\ <END_TASK> <USER_TASK:> Description: def clean_update_fields(self, index, update_fields): """ Clean the list of update_fields based on the index being updated.\ If any field in the update_fields list is not in the set of properties defined by the index mapping for this model, then we ignore it. If a field _is_ in the mapping, but the underlying model field is a related object, and thereby not directly serializable, then this method will raise a ValueError. """
search_fields = get_model_index_properties(self, index) clean_fields = [f for f in update_fields if f in search_fields] ignore = [f for f in update_fields if f not in search_fields] if ignore: logger.debug( "Ignoring fields from partial update: %s", [f for f in update_fields if f not in search_fields], ) for f in clean_fields: if not self._is_field_serializable(f): raise ValueError( "'%s' cannot be automatically serialized into a search document property. Please override as_search_document_update.", f, ) return clean_fields
<SYSTEM_TASK:> Return a partial update document based on which fields have been updated. <END_TASK> <USER_TASK:> Description: def as_search_document_update(self, *, index, update_fields): """ Return a partial update document based on which fields have been updated. If an object is saved with the `update_fields` argument passed through, then it is assumed that this is a 'partial update'. In this scenario we need a {property: value} dictionary containing just the fields we want to update. This method handles two possible update strategies - 'full' or 'partial'. The default 'full' strategy simply returns the value of `as_search_document` - thereby replacing the entire document each time. The 'partial' strategy is more intelligent - it will determine whether the fields passed are in the search document mapping, and return a partial update document that contains only those that are. In addition, if any field that _is_ included cannot be automatically serialized (e.g. a RelatedField object), then this method will raise a ValueError. In this scenario, you should override this method in your subclass. >>> def as_search_document_update(self, index, update_fields): ... if 'user' in update_fields: ... update_fields.remove('user') ... doc = super().as_search_document_update(index, update_fields) ... doc['user'] = self.user.get_full_name() ... return doc ... return super().as_search_document_update(index, update_fields) You may also wish to subclass this method to perform field-specific logic - in this example if only the timestamp is being saved, then ignore the update if the timestamp is later than a certain time. >>> def as_search_document_update(self, index, update_fields): ... if update_fields == ['timestamp']: ... if self.timestamp > today(): ... return {} ... return super().as_search_document_update(index, update_fields) """
if UPDATE_STRATEGY == UPDATE_STRATEGY_FULL: return self.as_search_document(index=index) if UPDATE_STRATEGY == UPDATE_STRATEGY_PARTIAL: # in partial mode we update the intersection of update_fields and # properties found in the mapping file. return { k: getattr(self, k) for k in self.clean_update_fields( index=index, update_fields=update_fields ) }
<SYSTEM_TASK:> Return an object as represented in a bulk api operation. <END_TASK> <USER_TASK:> Description: def as_search_action(self, *, index, action): """ Return an object as represented in a bulk api operation. Bulk API operations have a very specific format. This function will call the standard `as_search_document` method on the object and then wrap that up in the correct format for the action specified. https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-bulk.html Args: index: string, the name of the index in which the action is to be taken. Bulk operations are only every carried out on a single index at a time. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. Returns a dictionary. """
if action not in ("index", "update", "delete"): raise ValueError("Action must be 'index', 'update' or 'delete'.") document = { "_index": index, "_type": self.search_doc_type, "_op_type": action, "_id": self.pk, } if action == "index": document["_source"] = self.as_search_document(index=index) elif action == "update": document["doc"] = self.as_search_document(index=index) return document
<SYSTEM_TASK:> Fetch the object's document from a search index by id. <END_TASK> <USER_TASK:> Description: def fetch_search_document(self, *, index): """Fetch the object's document from a search index by id."""
assert self.pk, "Object must have a primary key before being indexed." client = get_client() return client.get(index=index, doc_type=self.search_doc_type, id=self.pk)
<SYSTEM_TASK:> Create or replace search document in named index. <END_TASK> <USER_TASK:> Description: def index_search_document(self, *, index): """ Create or replace search document in named index. Checks the local cache to see if the document has changed, and if not aborts the update, else pushes to ES, and then resets the local cache. Cache timeout is set as "cache_expiry" in the settings, and defaults to 60s. """
cache_key = self.search_document_cache_key new_doc = self.as_search_document(index=index) cached_doc = cache.get(cache_key) if new_doc == cached_doc: logger.debug("Search document for %r is unchanged, ignoring update.", self) return [] cache.set(cache_key, new_doc, timeout=get_setting("cache_expiry", 60)) get_client().index( index=index, doc_type=self.search_doc_type, body=new_doc, id=self.pk )
<SYSTEM_TASK:> Partial update of a document in named index. <END_TASK> <USER_TASK:> Description: def update_search_document(self, *, index, update_fields): """ Partial update of a document in named index. Partial updates are invoked via a call to save the document with 'update_fields'. These fields are passed to the as_search_document method so that it can build a partial document. NB we don't just call as_search_document and then strip the fields _not_ in update_fields as we are trying to avoid possibly expensive operations in building the source document. The canonical example for this method is updating a single timestamp on a model - we don't want to have to walk the model relations and build a document in this case - we just want to push the timestamp. When POSTing a partial update the `as_search_document` doc must be passed to the `client.update` wrapped in a "doc" node, see: https://www.elastic.co/guide/en/elasticsearch/reference/current/docs-update.html """
doc = self.as_search_document_update(index=index, update_fields=update_fields) if not doc: logger.debug("Ignoring object update as document is empty.") return get_client().update( index=index, doc_type=self.search_doc_type, body={"doc": doc}, id=self.pk )
<SYSTEM_TASK:> Delete document from named index. <END_TASK> <USER_TASK:> Description: def delete_search_document(self, *, index): """Delete document from named index."""
cache.delete(self.search_document_cache_key) get_client().delete(index=index, doc_type=self.search_doc_type, id=self.pk)
<SYSTEM_TASK:> Returns the value of a plugin setting. <END_TASK> <USER_TASK:> Description: def pluginSetting(name, namespace=None, typ=None): ''' Returns the value of a plugin setting. :param name: the name of the setting. It is not the full path, but just the last name of it :param namespace: The namespace. If not passed or None, the namespace will be inferred from the caller method. Normally, this should not be passed, since it suffices to let this function find out the plugin from where it is being called, and it will automatically use the corresponding plugin namespace ''' def _find_in_cache(name, key): for setting in _settings[namespace]: if setting["name"] == name: return setting[key] return None def _type_map(t): """Return setting python type"""
if t == BOOL: return bool elif t == NUMBER: return float else: return unicode namespace = namespace or _callerName().split(".")[0] full_name = namespace + "/" + name if settings.contains(full_name): if typ is None: typ = _type_map(_find_in_cache(name, 'type')) v = settings.value(full_name, None, type=typ) try: if isinstance(v, QPyNullVariant): v = None except: pass return v else: return _find_in_cache(name, 'default')
<SYSTEM_TASK:> Create an index and apply mapping if appropriate. <END_TASK> <USER_TASK:> Description: def create_index(index): """Create an index and apply mapping if appropriate."""
logger.info("Creating search index: '%s'", index) client = get_client() return client.indices.create(index=index, body=get_index_mapping(index))
<SYSTEM_TASK:> Remove all orphaned documents from an index. <END_TASK> <USER_TASK:> Description: def prune_index(index): """Remove all orphaned documents from an index. This function works by scanning the remote index, and in each returned batch of documents looking up whether they appear in the default index queryset. If they don't (they've been deleted, or no longer fit the qs filters) then they are deleted from the index. The deletion is done in one hit after the entire remote index has been scanned. The elasticsearch.helpers.scan function returns each document one at a time, so this function can swamp the database with SELECT requests. Please use sparingly. Returns a list of ids of all the objects deleted. """
logger.info("Pruning missing objects from index '%s'", index) prunes = [] responses = [] client = get_client() for model in get_index_models(index): for hit in scan_index(index, model): obj = _prune_hit(hit, model) if obj: prunes.append(obj) logger.info( "Found %s objects of type '%s' for deletion from '%s'.", len(prunes), model, index, ) if len(prunes) > 0: actions = bulk_actions(prunes, index, "delete") response = helpers.bulk( client, actions, chunk_size=get_setting("chunk_size") ) responses.append(response) return responses
<SYSTEM_TASK:> Check whether a document should be pruned. <END_TASK> <USER_TASK:> Description: def _prune_hit(hit, model): """ Check whether a document should be pruned. This method uses the SearchDocumentManagerMixin.in_search_queryset method to determine whether a 'hit' (search document) should be pruned from an index, and if so it returns the hit as a Django object(id=hit_id). Args: hit: dict object the represents a document as returned from the scan_index function. (Contains object id and index.) model: the Django model (not object) from which the document was derived. Used to get the correct model manager and bulk action. Returns: an object of type model, with id=hit_id. NB this is not the object itself, which by definition may not exist in the underlying database, but a temporary object with the document id - which is enough to create a 'delete' action. """
hit_id = hit["_id"] hit_index = hit["_index"] if model.objects.in_search_queryset(hit_id, index=hit_index): logger.debug( "%s with id=%s exists in the '%s' index queryset.", model, hit_id, hit_index ) return None else: logger.debug( "%s with id=%s does not exist in the '%s' index queryset and will be pruned.", model, hit_id, hit_index, ) # we don't need the full obj for a delete action, just the id. # (the object itself may not even exist.) return model(pk=hit_id)
<SYSTEM_TASK:> Yield all documents of model type in an index. <END_TASK> <USER_TASK:> Description: def scan_index(index, model): """ Yield all documents of model type in an index. This function calls the elasticsearch.helpers.scan function, and yields all the documents in the index that match the doc_type produced by a specific Django model. Args: index: string, the name of the index to scan, must be a configured index as returned from settings.get_index_names. model: a Django model type, used to filter the the documents that are scanned. Yields each document of type model in index, one at a time. """
# see https://www.elastic.co/guide/en/elasticsearch/reference/current/query-dsl-type-query.html query = {"query": {"type": {"value": model._meta.model_name}}} client = get_client() for hit in helpers.scan(client, index=index, query=query): yield hit
<SYSTEM_TASK:> Yield bulk api 'actions' from a collection of objects. <END_TASK> <USER_TASK:> Description: def bulk_actions(objects, index, action): """ Yield bulk api 'actions' from a collection of objects. The output from this method can be fed in to the bulk api helpers - each document returned by get_documents is decorated with the appropriate bulk api op_type. Args: objects: iterable (queryset, list, ...) of SearchDocumentMixin objects. If the objects passed in is a generator, then this function will yield the results rather than returning them. index: string, the name of the index to target - the index name is embedded into the return value and is used by the bulk api. action: string ['index' | 'update' | 'delete'] - this decides how the final document is formatted. """
assert ( index != "_all" ), "index arg must be a valid index name. '_all' is a reserved term." logger.info("Creating bulk '%s' actions for '%s'", action, index) for obj in objects: try: logger.debug("Appending '%s' action for '%r'", action, obj) yield obj.as_search_action(index=index, action=action) except Exception: logger.exception("Unable to create search action for %s", obj)
<SYSTEM_TASK:> Check that an index mapping JSON file exists. <END_TASK> <USER_TASK:> Description: def _validate_mapping(index, strict=False): """Check that an index mapping JSON file exists."""
try: settings.get_index_mapping(index) except IOError: if strict: raise ImproperlyConfigured("Index '%s' has no mapping file." % index) else: logger.warning("Index '%s' has no mapping, relying on ES instead.", index)
<SYSTEM_TASK:> Check that a model configured for an index subclasses the required classes. <END_TASK> <USER_TASK:> Description: def _validate_model(model): """Check that a model configured for an index subclasses the required classes."""
if not hasattr(model, "as_search_document"): raise ImproperlyConfigured("'%s' must implement `as_search_document`." % model) if not hasattr(model.objects, "get_search_queryset"): raise ImproperlyConfigured( "'%s.objects must implement `get_search_queryset`." % model )
<SYSTEM_TASK:> Connect up post_save, post_delete signals for models. <END_TASK> <USER_TASK:> Description: def _connect_signals(): """Connect up post_save, post_delete signals for models."""
for index in settings.get_index_names(): for model in settings.get_index_models(index): _connect_model_signals(model)
<SYSTEM_TASK:> Connect signals for a single model. <END_TASK> <USER_TASK:> Description: def _connect_model_signals(model): """Connect signals for a single model."""
dispatch_uid = "%s.post_save" % model._meta.model_name logger.debug("Connecting search index model post_save signal: %s", dispatch_uid) signals.post_save.connect(_on_model_save, sender=model, dispatch_uid=dispatch_uid) dispatch_uid = "%s.post_delete" % model._meta.model_name logger.debug("Connecting search index model post_delete signal: %s", dispatch_uid) signals.post_delete.connect( _on_model_delete, sender=model, dispatch_uid=dispatch_uid )
<SYSTEM_TASK:> Wrapper around the instance manager method. <END_TASK> <USER_TASK:> Description: def _in_search_queryset(*, instance, index) -> bool: """Wrapper around the instance manager method."""
try: return instance.__class__.objects.in_search_queryset(instance.id, index=index) except Exception: logger.exception("Error checking object in_search_queryset.") return False
<SYSTEM_TASK:> Validate config and connect signals. <END_TASK> <USER_TASK:> Description: def ready(self): """Validate config and connect signals."""
super(ElasticAppConfig, self).ready() _validate_config(settings.get_setting("strict_validation")) _connect_signals()
<SYSTEM_TASK:> Return the JSON mapping file for an index. <END_TASK> <USER_TASK:> Description: def get_index_mapping(index): """Return the JSON mapping file for an index. Mappings are stored as JSON files in the mappings subdirectory of this app. They must be saved as {{index}}.json. Args: index: string, the name of the index to look for. """
# app_path = apps.get_app_config('elasticsearch_django').path mappings_dir = get_setting("mappings_dir") filename = "%s.json" % index path = os.path.join(mappings_dir, filename) with open(path, "r") as f: return json.load(f)
<SYSTEM_TASK:> Return the list of properties specified for a model in an index. <END_TASK> <USER_TASK:> Description: def get_model_index_properties(instance, index): """Return the list of properties specified for a model in an index."""
mapping = get_index_mapping(index) doc_type = instance._meta.model_name.lower() return list(mapping["mappings"][doc_type]["properties"].keys())
<SYSTEM_TASK:> Return list of models configured for a named index. <END_TASK> <USER_TASK:> Description: def get_index_models(index): """Return list of models configured for a named index. Args: index: string, the name of the index to look up. """
models = [] for app_model in get_index_config(index).get("models"): app, model = app_model.split(".") models.append(apps.get_model(app, model)) return models
<SYSTEM_TASK:> Return list of all indexes in which a model is configured. <END_TASK> <USER_TASK:> Description: def get_model_indexes(model): """Return list of all indexes in which a model is configured. A model may be configured to appear in multiple indexes. This function will return the names of the indexes as a list of strings. This is useful if you want to know which indexes need updating when a model is saved. Args: model: a Django model class. """
indexes = [] for index in get_index_names(): for app_model in get_index_models(index): if app_model == model: indexes.append(index) return indexes
<SYSTEM_TASK:> Returns an indented HTML pretty-print version of JSON. <END_TASK> <USER_TASK:> Description: def pprint(data): """ Returns an indented HTML pretty-print version of JSON. Take the event_payload JSON, indent it, order the keys and then present it as a <code> block. That's about as good as we can get until someone builds a custom syntax function. """
pretty = json.dumps(data, sort_keys=True, indent=4, separators=(",", ": ")) html = pretty.replace(" ", "&nbsp;").replace("\n", "<br>") return mark_safe("<code>%s</code>" % html)
<SYSTEM_TASK:> Context manager used to temporarily disable auto_sync. <END_TASK> <USER_TASK:> Description: def disable_search_updates(): """ Context manager used to temporarily disable auto_sync. This is useful when performing bulk updates on objects - when you may not want to flood the indexing process. >>> with disable_search_updates(): ... for obj in model.objects.all(): ... obj.save() The function works by temporarily removing the apps._on_model_save signal handler from the model.post_save signal receivers, and then restoring them after. """
_receivers = signals.post_save.receivers.copy() signals.post_save.receivers = _strip_on_model_save() yield signals.post_save.receivers = _receivers
<SYSTEM_TASK:> Trap the timeout. In Async mode requestTimedOut is called after replyFinished <END_TASK> <USER_TASK:> Description: def requestTimedOut(self, reply): """Trap the timeout. In Async mode requestTimedOut is called after replyFinished"""
# adapt http_call_result basing on receiving qgs timer timout signal self.exception_class = RequestsExceptionTimeout self.http_call_result.exception = RequestsExceptionTimeout("Timeout error")
<SYSTEM_TASK:> Handle SSL errors, logging them if debug is on and ignoring them <END_TASK> <USER_TASK:> Description: def sslErrors(self, ssl_errors): """ Handle SSL errors, logging them if debug is on and ignoring them if disable_ssl_certificate_validation is set. """
if ssl_errors: for v in ssl_errors: self.msg_log("SSL Error: %s" % v.errorString()) if self.disable_ssl_certificate_validation: self.reply.ignoreSslErrors()
<SYSTEM_TASK:> Run do_index_command on each specified index and log the output. <END_TASK> <USER_TASK:> Description: def handle(self, *args, **options): """Run do_index_command on each specified index and log the output."""
for index in options.pop("indexes"): data = {} try: data = self.do_index_command(index, **options) except TransportError as ex: logger.warning("ElasticSearch threw an error: %s", ex) data = {"index": index, "status": ex.status_code, "reason": ex.error} finally: logger.info(data)
<SYSTEM_TASK:> Creates a template. <END_TASK> <USER_TASK:> Description: def create(self, Name, Subject, HtmlBody=None, TextBody=None, Alias=None): """ Creates a template. :param Name: Name of template :param Subject: The content to use for the Subject when this template is used to send email. :param HtmlBody: The content to use for the HtmlBody when this template is used to send email. :param TextBody: The content to use for the HtmlBody when this template is used to send email. :return: """
assert TextBody or HtmlBody, "Provide either email TextBody or HtmlBody or both" data = {"Name": Name, "Subject": Subject, "HtmlBody": HtmlBody, "TextBody": TextBody, "Alias": Alias} return self._init_instance(self.call("POST", "/templates", data=data))
<SYSTEM_TASK:> Helper method for instantiating PostmarkClient from dict-like objects. <END_TASK> <USER_TASK:> Description: def from_config(cls, config, prefix="postmark_", is_uppercase=False): """ Helper method for instantiating PostmarkClient from dict-like objects. """
kwargs = {} for arg in get_args(cls): key = prefix + arg if is_uppercase: key = key.upper() else: key = key.lower() if key in config: kwargs[arg] = config[key] return cls(**kwargs)
<SYSTEM_TASK:> Split a container into n-sized chunks. <END_TASK> <USER_TASK:> Description: def chunks(container, n): """ Split a container into n-sized chunks. """
for i in range(0, len(container), n): yield container[i : i + n]
<SYSTEM_TASK:> Helper to iterate over remote data via count & offset pagination. <END_TASK> <USER_TASK:> Description: def sizes(count, offset=0, max_chunk=500): """ Helper to iterate over remote data via count & offset pagination. """
if count is None: chunk = max_chunk while True: yield chunk, offset offset += chunk else: while count: chunk = min(count, max_chunk) count = max(0, count - max_chunk) yield chunk, offset offset += chunk
<SYSTEM_TASK:> Constructs appropriate exception from list of responses and raises it. <END_TASK> <USER_TASK:> Description: def raise_for_response(self, responses): """ Constructs appropriate exception from list of responses and raises it. """
exception_messages = [self.client.format_exception_message(response) for response in responses] if len(exception_messages) == 1: message = exception_messages[0] else: message = "[%s]" % ", ".join(exception_messages) raise PostmarkerException(message)
<SYSTEM_TASK:> Converts list to string with comma separated values. For string is no-op. <END_TASK> <USER_TASK:> Description: def list_to_csv(value): """ Converts list to string with comma separated values. For string is no-op. """
if isinstance(value, (list, tuple, set)): value = ",".join(value) return value
<SYSTEM_TASK:> Converts incoming attachment into dictionary. <END_TASK> <USER_TASK:> Description: def prepare_attachments(attachment): """ Converts incoming attachment into dictionary. """
if isinstance(attachment, tuple): result = {"Name": attachment[0], "Content": attachment[1], "ContentType": attachment[2]} if len(attachment) == 4: result["ContentID"] = attachment[3] elif isinstance(attachment, MIMEBase): payload = attachment.get_payload() content_type = attachment.get_content_type() # Special case for message/rfc822 # Even if RFC implies such attachments being not base64-encoded, # Postmark requires all attachments to be encoded in this way if content_type == "message/rfc822" and not isinstance(payload, str): payload = b64encode(payload[0].get_payload(decode=True)).decode() result = { "Name": attachment.get_filename() or "attachment.txt", "Content": payload, "ContentType": content_type, } content_id = attachment.get("Content-ID") if content_id: if content_id.startswith("<") and content_id.endswith(">"): content_id = content_id[1:-1] if (attachment.get("Content-Disposition") or "").startswith("inline"): content_id = "cid:%s" % content_id result["ContentID"] = content_id elif isinstance(attachment, str): content_type = guess_content_type(attachment) filename = os.path.basename(attachment) with open(attachment, "rb") as fd: data = fd.read() result = {"Name": filename, "Content": b64encode(data).decode("utf-8"), "ContentType": content_type} else: result = attachment return result
<SYSTEM_TASK:> Additionally encodes headers. <END_TASK> <USER_TASK:> Description: def as_dict(self): """ Additionally encodes headers. :return: """
data = super(BaseEmail, self).as_dict() data["Headers"] = [{"Name": name, "Value": value} for name, value in data["Headers"].items()] for field in ("To", "Cc", "Bcc"): if field in data: data[field] = list_to_csv(data[field]) data["Attachments"] = [prepare_attachments(attachment) for attachment in data["Attachments"]] return data
<SYSTEM_TASK:> Attaches given binary data. <END_TASK> <USER_TASK:> Description: def attach_binary(self, content, filename): """ Attaches given binary data. :param bytes content: Binary data to be attached. :param str filename: :return: None. """
content_type = guess_content_type(filename) payload = {"Name": filename, "Content": b64encode(content).decode("utf-8"), "ContentType": content_type} self.attach(payload)
<SYSTEM_TASK:> Instantiates ``Email`` instance from ``MIMEText`` instance. <END_TASK> <USER_TASK:> Description: def from_mime(cls, message, manager): """ Instantiates ``Email`` instance from ``MIMEText`` instance. :param message: ``email.mime.text.MIMEText`` instance. :param manager: :py:class:`EmailManager` instance. :return: :py:class:`Email` """
text, html, attachments = deconstruct_multipart(message) subject = prepare_header(message["Subject"]) sender = prepare_header(message["From"]) to = prepare_header(message["To"]) cc = prepare_header(message["Cc"]) bcc = prepare_header(message["Bcc"]) reply_to = prepare_header(message["Reply-To"]) tag = getattr(message, "tag", None) return cls( manager=manager, From=sender, To=to, TextBody=text, HtmlBody=html, Subject=subject, Cc=cc, Bcc=bcc, ReplyTo=reply_to, Attachments=attachments, Tag=tag, )
<SYSTEM_TASK:> Converts all available emails to dictionaries. <END_TASK> <USER_TASK:> Description: def as_dict(self, **extra): """ Converts all available emails to dictionaries. :return: List of dictionaries. """
return [self._construct_email(email, **extra) for email in self.emails]
<SYSTEM_TASK:> Converts incoming data to properly structured dictionary. <END_TASK> <USER_TASK:> Description: def _construct_email(self, email, **extra): """ Converts incoming data to properly structured dictionary. """
if isinstance(email, dict): email = Email(manager=self._manager, **email) elif isinstance(email, (MIMEText, MIMEMultipart)): email = Email.from_mime(email, self._manager) elif not isinstance(email, Email): raise ValueError email._update(extra) return email.as_dict()
<SYSTEM_TASK:> Sends a single email. <END_TASK> <USER_TASK:> Description: def send( self, message=None, From=None, To=None, Cc=None, Bcc=None, Subject=None, Tag=None, HtmlBody=None, TextBody=None, Metadata=None, ReplyTo=None, Headers=None, TrackOpens=None, TrackLinks="None", Attachments=None, ): """ Sends a single email. :param message: :py:class:`Email` or ``email.mime.text.MIMEText`` instance. :param str From: The sender email address. :param To: Recipient's email address. Multiple recipients could be specified as a list or string with comma separated values. :type To: str or list :param Cc: Cc recipient's email address. Multiple Cc recipients could be specified as a list or string with comma separated values. :type Cc: str or list :param Bcc: Bcc recipient's email address. Multiple Bcc recipients could be specified as a list or string with comma separated values. :type Bcc: str or list :param str Subject: Email subject. :param str Tag: Email tag. :param str HtmlBody: HTML email message. :param str TextBody: Plain text email message. :param str ReplyTo: Reply To override email address. :param dict Headers: Dictionary of custom headers to include. :param bool TrackOpens: Activate open tracking for this email. :param str TrackLinks: Activate link tracking for links in the HTML or Text bodies of this email. :param list Attachments: List of attachments. :return: Information about sent email. :rtype: `dict` """
assert not (message and (From or To)), "You should specify either message or From and To parameters" assert TrackLinks in ("None", "HtmlAndText", "HtmlOnly", "TextOnly") if message is None: message = self.Email( From=From, To=To, Cc=Cc, Bcc=Bcc, Subject=Subject, Tag=Tag, HtmlBody=HtmlBody, TextBody=TextBody, Metadata=Metadata, ReplyTo=ReplyTo, Headers=Headers, TrackOpens=TrackOpens, TrackLinks=TrackLinks, Attachments=Attachments, ) elif isinstance(message, (MIMEText, MIMEMultipart)): message = Email.from_mime(message, self) elif not isinstance(message, Email): raise TypeError("message should be either Email or MIMEText or MIMEMultipart instance") return message.send()
<SYSTEM_TASK:> Activates the bounce instance and updates it with the latest data. <END_TASK> <USER_TASK:> Description: def activate(self): """ Activates the bounce instance and updates it with the latest data. :return: Activation status. :rtype: `str` """
response = self._manager.activate(self.ID) self._update(response["Bounce"]) return response["Message"]
<SYSTEM_TASK:> Returns many bounces. <END_TASK> <USER_TASK:> Description: def all( self, count=500, offset=0, type=None, inactive=None, emailFilter=None, tag=None, messageID=None, fromdate=None, todate=None, ): """ Returns many bounces. :param int count: Number of bounces to return per request. :param int offset: Number of bounces to skip. :param str type: Filter by type of bounce. :param bool inactive: Filter by emails that were deactivated by Postmark due to the bounce. :param str emailFilter: Filter by email address. :param str tag: Filter by tag. :param str messageID: Filter by messageID. :param date fromdate: Filter messages starting from the date specified (inclusive). :param date todate: Filter messages up to the date specified (inclusive). :return: A list of :py:class:`Bounce` instances. :rtype: `list` """
responses = self.call_many( "GET", "/bounces/", count=count, offset=offset, type=type, inactive=inactive, emailFilter=emailFilter, tag=tag, messageID=messageID, fromdate=fromdate, todate=todate, ) return self.expand_responses(responses, "Bounces")
<SYSTEM_TASK:> Helper to support handy dictionaries merging on all Python versions. <END_TASK> <USER_TASK:> Description: def update_kwargs(self, kwargs, count, offset): """ Helper to support handy dictionaries merging on all Python versions. """
kwargs.update({self.count_key: count, self.offset_key: offset}) return kwargs
<SYSTEM_TASK:> Gets a brief overview of statistics for all of your outbound email. <END_TASK> <USER_TASK:> Description: def overview(self, tag=None, fromdate=None, todate=None): """ Gets a brief overview of statistics for all of your outbound email. """
return self.call("GET", "/stats/outbound", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets a total count of recipients who have marked your email as spam. <END_TASK> <USER_TASK:> Description: def spam(self, tag=None, fromdate=None, todate=None): """ Gets a total count of recipients who have marked your email as spam. """
return self.call("GET", "/stats/outbound/spam", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets total counts of recipients who opened your emails. <END_TASK> <USER_TASK:> Description: def opens(self, tag=None, fromdate=None, todate=None): """ Gets total counts of recipients who opened your emails. This is only recorded when open tracking is enabled for that email. """
return self.call("GET", "/stats/outbound/opens", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets an overview of the platforms used to open your emails. <END_TASK> <USER_TASK:> Description: def opens_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the platforms used to open your emails. This is only recorded when open tracking is enabled for that email. """
return self.call("GET", "/stats/outbound/opens/platforms", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets an overview of the email clients used to open your emails. <END_TASK> <USER_TASK:> Description: def emailclients(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the email clients used to open your emails. This is only recorded when open tracking is enabled for that email. """
return self.call("GET", "/stats/outbound/opens/emailclients", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets the length of time that recipients read emails along with counts for each time. <END_TASK> <USER_TASK:> Description: def readtimes(self, tag=None, fromdate=None, todate=None): """ Gets the length of time that recipients read emails along with counts for each time. This is only recorded when open tracking is enabled for that email. Read time tracking stops at 20 seconds, so any read times above that will appear in the 20s+ field. """
return self.call("GET", "/stats/outbound/opens/readtimes", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets total counts of unique links that were clicked. <END_TASK> <USER_TASK:> Description: def clicks(self, tag=None, fromdate=None, todate=None): """ Gets total counts of unique links that were clicked. """
return self.call("GET", "/stats/outbound/clicks", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets an overview of the browsers used to open links in your emails. <END_TASK> <USER_TASK:> Description: def browserfamilies(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browsers used to open links in your emails. This is only recorded when Link Tracking is enabled for that email. """
return self.call("GET", "/stats/outbound/clicks/browserfamilies", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Gets an overview of the browser platforms used to open your emails. <END_TASK> <USER_TASK:> Description: def clicks_platforms(self, tag=None, fromdate=None, todate=None): """ Gets an overview of the browser platforms used to open your emails. This is only recorded when Link Tracking is enabled for that email. """
return self.call("GET", "/stats/outbound/clicks/platforms", tag=tag, fromdate=fromdate, todate=todate)
<SYSTEM_TASK:> Return the global line rate of the coverage report. If the <END_TASK> <USER_TASK:> Description: def line_rate(self, filename=None): """ Return the global line rate of the coverage report. If the `filename` file is given, return the line rate of the file. """
if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['line-rate'])
<SYSTEM_TASK:> Return the global branch rate of the coverage report. If the <END_TASK> <USER_TASK:> Description: def branch_rate(self, filename=None): """ Return the global branch rate of the coverage report. If the `filename` file is given, return the branch rate of the file. """
if filename is None: el = self.xml else: el = self._get_class_element_by_filename(filename) return float(el.attrib['branch-rate'])
<SYSTEM_TASK:> Return a list of uncovered line numbers for each of the missed <END_TASK> <USER_TASK:> Description: def missed_statements(self, filename): """ Return a list of uncovered line numbers for each of the missed statements found for the file `filename`. """
el = self._get_class_element_by_filename(filename) lines = el.xpath('./lines/line[@hits=0]') return [int(l.attrib['number']) for l in lines]
<SYSTEM_TASK:> Return a list of extrapolated uncovered line numbers for the <END_TASK> <USER_TASK:> Description: def missed_lines(self, filename): """ Return a list of extrapolated uncovered line numbers for the file `filename` according to `Cobertura.line_statuses`. """
statuses = self.line_statuses(filename) statuses = extrapolate_coverage(statuses) return [lno for lno, status in statuses if status is False]
<SYSTEM_TASK:> Return a list of namedtuple `Line` for each line of code found in the <END_TASK> <USER_TASK:> Description: def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the source file with the given `filename`. """
lines = [] try: with self.filesystem.open(filename) as f: line_statuses = dict(self.line_statuses(filename)) for lineno, source in enumerate(f, start=1): line_status = line_statuses.get(lineno) line = Line(lineno, source, line_status, None) lines.append(line) except self.filesystem.FileNotFound as file_not_found: lines.append( Line(0, '%s not found' % file_not_found.path, None, None) ) return lines
<SYSTEM_TASK:> Return the total number of uncovered statements for the file <END_TASK> <USER_TASK:> Description: def total_misses(self, filename=None): """ Return the total number of uncovered statements for the file `filename`. If `filename` is not given, return the total number of uncovered statements for all files. """
if filename is not None: return len(self.missed_statements(filename)) total = 0 for filename in self.files(): total += len(self.missed_statements(filename)) return total
<SYSTEM_TASK:> Return the total number of covered statements for the file <END_TASK> <USER_TASK:> Description: def total_hits(self, filename=None): """ Return the total number of covered statements for the file `filename`. If `filename` is not given, return the total number of covered statements for all files. """
if filename is not None: return len(self.hit_statements(filename)) total = 0 for filename in self.files(): total += len(self.hit_statements(filename)) return total
<SYSTEM_TASK:> Return the total number of statements for the file <END_TASK> <USER_TASK:> Description: def total_statements(self, filename=None): """ Return the total number of statements for the file `filename`. If `filename` is not given, return the total number of statements for all files. """
if filename is not None: statements = self._get_lines_by_filename(filename) return len(statements) total = 0 for filename in self.files(): statements = self._get_lines_by_filename(filename) total += len(statements) return total
<SYSTEM_TASK:> Return the list of available files in the coverage report. <END_TASK> <USER_TASK:> Description: def files(self): """ Return the list of available files in the coverage report. """
# maybe replace with a trie at some point? see has_file FIXME already_seen = set() filenames = [] for el in self.xml.xpath("//class"): filename = el.attrib['filename'] if filename in already_seen: continue already_seen.add(filename) filenames.append(filename) return filenames
<SYSTEM_TASK:> Return `True` if coverage of has improved, `False` otherwise. <END_TASK> <USER_TASK:> Description: def has_better_coverage(self): """ Return `True` if coverage of has improved, `False` otherwise. This does not ensure that all changes have been covered. If this is what you want, use `CoberturaDiff.has_all_changes_covered()` instead. """
for filename in self.files(): if self.diff_total_misses(filename) > 0: return False return True
<SYSTEM_TASK:> Return `True` if all changes have been covered, `False` otherwise. <END_TASK> <USER_TASK:> Description: def has_all_changes_covered(self): """ Return `True` if all changes have been covered, `False` otherwise. """
for filename in self.files(): for hunk in self.file_source_hunks(filename): for line in hunk: if line.reason is None: continue # line untouched if line.status is False: return False # line not covered return True
<SYSTEM_TASK:> Return a list of namedtuple `Line` for each line of code found in the <END_TASK> <USER_TASK:> Description: def file_source(self, filename): """ Return a list of namedtuple `Line` for each line of code found in the given file `filename`. """
if self.cobertura1.has_file(filename) and \ self.cobertura1.filesystem.has_file(filename): lines1 = self.cobertura1.source_lines(filename) line_statuses1 = dict(self.cobertura1.line_statuses( filename)) else: lines1 = [] line_statuses1 = {} lines2 = self.cobertura2.source_lines(filename) line_statuses2 = dict(self.cobertura2.line_statuses(filename)) # Build a dict of lineno2 -> lineno1 lineno_map = reconcile_lines(lines2, lines1) lines = [] for lineno, source in enumerate(lines2, start=1): status = None reason = None if lineno not in lineno_map: # line was added or removed, just use whatever coverage status # is available as there is nothing to compare against. status = line_statuses2.get(lineno) reason = 'line-edit' else: other_lineno = lineno_map[lineno] line_status1 = line_statuses1.get(other_lineno) line_status2 = line_statuses2.get(lineno) if line_status1 is line_status2: status = None # unchanged reason = None elif line_status1 is True and line_status2 is False: status = False # decreased reason = 'cov-down' elif line_status1 is False and line_status2 is True: status = True # increased reason = 'cov-up' line = Line(lineno, source, status, reason) lines.append(line) return lines
<SYSTEM_TASK:> Like `CoberturaDiff.file_source`, but returns a list of line hunks of <END_TASK> <USER_TASK:> Description: def file_source_hunks(self, filename): """ Like `CoberturaDiff.file_source`, but returns a list of line hunks of the lines that have changed for the given file `filename`. An empty list means that the file has no lines that have a change in coverage status. """
lines = self.file_source(filename) hunks = hunkify_lines(lines) return hunks
<SYSTEM_TASK:> Flushes the queue periodically. <END_TASK> <USER_TASK:> Description: def monitor(self): """Flushes the queue periodically."""
while self.monitor_running.is_set(): if time.time() - self.last_flush > self.batch_time: if not self.queue.empty(): logger.info("Queue Flush: time without flush exceeded") self.flush_queue() time.sleep(self.batch_time)
<SYSTEM_TASK:> Add a list of data records to the record queue in the proper format. <END_TASK> <USER_TASK:> Description: def put_records(self, records, partition_key=None): """Add a list of data records to the record queue in the proper format. Convinience method that calls self.put_record for each element. Parameters ---------- records : list Lists of records to send. partition_key: str Hash that determines which shard a given data record belongs to. """
for record in records: self.put_record(record, partition_key)
<SYSTEM_TASK:> Add data to the record queue in the proper format. <END_TASK> <USER_TASK:> Description: def put_record(self, data, partition_key=None): """Add data to the record queue in the proper format. Parameters ---------- data : str Data to send. partition_key: str Hash that determines which shard a given data record belongs to. """
# Byte encode the data data = encode_data(data) # Create a random partition key if not provided if not partition_key: partition_key = uuid.uuid4().hex # Build the record record = { 'Data': data, 'PartitionKey': partition_key } # Flush the queue if it reaches the batch size if self.queue.qsize() >= self.batch_size: logger.info("Queue Flush: batch size reached") self.pool.submit(self.flush_queue) # Append the record logger.debug('Putting record "{}"'.format(record['Data'][:100])) self.queue.put(record)
<SYSTEM_TASK:> Flushes the queue and waits for the executor to finish. <END_TASK> <USER_TASK:> Description: def close(self): """Flushes the queue and waits for the executor to finish."""
logger.info('Closing producer') self.flush_queue() self.monitor_running.clear() self.pool.shutdown() logger.info('Producer closed')
<SYSTEM_TASK:> Grab all the current records in the queue and send them. <END_TASK> <USER_TASK:> Description: def flush_queue(self): """Grab all the current records in the queue and send them."""
records = [] while not self.queue.empty() and len(records) < self.batch_size: records.append(self.queue.get()) if records: self.send_records(records) self.last_flush = time.time()
<SYSTEM_TASK:> Send records to the Kinesis stream. <END_TASK> <USER_TASK:> Description: def send_records(self, records, attempt=0): """Send records to the Kinesis stream. Falied records are sent again with an exponential backoff decay. Parameters ---------- records : array Array of formated records to send. attempt: int Number of times the records have been sent without success. """
# If we already tried more times than we wanted, save to a file if attempt > self.max_retries: logger.warning('Writing {} records to file'.format(len(records))) with open('failed_records.dlq', 'ab') as f: for r in records: f.write(r.get('Data')) return # Sleep before retrying if attempt: time.sleep(2 ** attempt * .1) response = self.kinesis_client.put_records(StreamName=self.stream_name, Records=records) failed_record_count = response['FailedRecordCount'] # Grab failed records if failed_record_count: logger.warning('Retrying failed records') failed_records = [] for i, record in enumerate(response['Records']): if record.get('ErrorCode'): failed_records.append(records[i]) # Recursive call attempt += 1 self.send_records(failed_records, attempt=attempt)
<SYSTEM_TASK:> Assumes the list is sorted. <END_TASK> <USER_TASK:> Description: def rangify(number_list): """Assumes the list is sorted."""
if not number_list: return number_list ranges = [] range_start = prev_num = number_list[0] for num in number_list[1:]: if num != (prev_num + 1): ranges.append((range_start, prev_num)) range_start = num prev_num = num ranges.append((range_start, prev_num)) return ranges
<SYSTEM_TASK:> Return a list of line hunks given a list of lines `lines`. The number of <END_TASK> <USER_TASK:> Description: def hunkify_lines(lines, context=3): """ Return a list of line hunks given a list of lines `lines`. The number of context lines can be control with `context` which will return line hunks surrounded with `context` lines before and after the code change. """
# Find contiguous line changes ranges = [] range_start = None for i, line in enumerate(lines): if line.status is not None: if range_start is None: range_start = i continue elif range_start is not None: range_stop = i ranges.append((range_start, range_stop)) range_start = None else: # Append the last range if range_start is not None: range_stop = i ranges.append((range_start, range_stop)) # add context ranges_w_context = [] for range_start, range_stop in ranges: range_start = range_start - context range_start = range_start if range_start >= 0 else 0 range_stop = range_stop + context ranges_w_context.append((range_start, range_stop)) # merge overlapping hunks merged_ranges = ranges_w_context[:1] for range_start, range_stop in ranges_w_context[1:]: prev_start, prev_stop = merged_ranges[-1] if range_start <= prev_stop: range_start = prev_start merged_ranges[-1] = (range_start, range_stop) else: merged_ranges.append((range_start, range_stop)) # build final hunks hunks = [] for range_start, range_stop in merged_ranges: hunk = lines[range_start:range_stop] hunks.append(hunk) return hunks
<SYSTEM_TASK:> show coverage summary of a Cobertura report <END_TASK> <USER_TASK:> Description: def show(cobertura_file, format, output, source, source_prefix): """show coverage summary of a Cobertura report"""
cobertura = Cobertura(cobertura_file, source=source) Reporter = reporters[format] reporter = Reporter(cobertura) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') isatty = True if output is None else output.isatty() click.echo(report, file=output, nl=isatty)
<SYSTEM_TASK:> compare coverage of two Cobertura reports <END_TASK> <USER_TASK:> Description: def diff( cobertura_file1, cobertura_file2, color, format, output, source1, source2, source_prefix1, source_prefix2, source): """compare coverage of two Cobertura reports"""
cobertura1 = Cobertura( cobertura_file1, source=source1, source_prefix=source_prefix1 ) cobertura2 = Cobertura( cobertura_file2, source=source2, source_prefix=source_prefix2 ) Reporter = delta_reporters[format] reporter_args = [cobertura1, cobertura2] reporter_kwargs = {'show_source': source} isatty = True if output is None else output.isatty() if format == 'text': color = isatty if color is None else color is True reporter_kwargs['color'] = color reporter = Reporter(*reporter_args, **reporter_kwargs) report = reporter.generate() if not isinstance(report, bytes): report = report.encode('utf-8') click.echo(report, file=output, nl=isatty, color=color) exit_code = get_exit_code(reporter.differ, source) raise SystemExit(exit_code)
<SYSTEM_TASK:> Yield a file-like object for file `filename`. <END_TASK> <USER_TASK:> Description: def open(self, filename): """ Yield a file-like object for file `filename`. This function is a context manager. """
filename = self.real_filename(filename) if not os.path.exists(filename): raise self.FileNotFound(filename) with codecs.open(filename, encoding='utf-8') as f: yield f
<SYSTEM_TASK:> store nick, user, host in kwargs if prefix is correct format <END_TASK> <USER_TASK:> Description: def nickmask(prefix: str, kwargs: Dict[str, Any]) -> None: """ store nick, user, host in kwargs if prefix is correct format """
if "!" in prefix and "@" in prefix: # From a user kwargs["nick"], remainder = prefix.split("!", 1) kwargs["user"], kwargs["host"] = remainder.split("@", 1) else: # From a server, probably the host kwargs["host"] = prefix
<SYSTEM_TASK:> Parse message according to rfc 2812 for routing <END_TASK> <USER_TASK:> Description: def split_line(msg: str) -> Tuple[str, str, List[str]]: """ Parse message according to rfc 2812 for routing """
match = RE_IRCLINE.match(msg) if not match: raise ValueError("Invalid line") prefix = match.group("prefix") or "" command = match.group("command") params = (match.group("params") or "").split() message = match.group("message") or "" if message: params.append(message) return prefix, command, params
<SYSTEM_TASK:> Alias for more readable command construction <END_TASK> <USER_TASK:> Description: def f(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None) -> str: """ Alias for more readable command construction """
if default is not None: return str(kwargs.get(field, default)) return str(kwargs[field])
<SYSTEM_TASK:> Util for joining multiple fields with commas <END_TASK> <USER_TASK:> Description: def pack(field: str, kwargs: Dict[str, Any], default: Optional[Any] = None, sep: str=',') -> str: """ Util for joining multiple fields with commas """
if default is not None: value = kwargs.get(field, default) else: value = kwargs[field] if isinstance(value, str): return value elif isinstance(value, collections.abc.Iterable): return sep.join(str(f) for f in value) else: return str(value)
<SYSTEM_TASK:> Open a connection to the defined server. <END_TASK> <USER_TASK:> Description: async def connect(self) -> None: """Open a connection to the defined server."""
def protocol_factory() -> Protocol: return Protocol(client=self) _, protocol = await self.loop.create_connection( protocol_factory, host=self.host, port=self.port, ssl=self.ssl ) # type: Tuple[Any, Any] if self.protocol: self.protocol.close() self.protocol = protocol # TODO: Delete the following code line. It is currently kept in order # to not break the current existing codebase. Removing it requires a # heavy change in the test codebase. protocol.client = self self.trigger("client_connect")
<SYSTEM_TASK:> Decorate a function to be invoked when the given event occurs. <END_TASK> <USER_TASK:> Description: def on(self, event: str, func: Optional[Callable] = None) -> Callable: """ Decorate a function to be invoked when the given event occurs. The function may be a coroutine. Your function should accept **kwargs in case an event is triggered with unexpected kwargs. Example ------- import asyncio import bottom client = bottom.Client(...) @client.on("test") async def func(one, two, **kwargs): print(one) print(two) print(kwargs) events.trigger("test", **{"one": 1, "two": 2, "extra": "foo"}) loop = asyncio.get_event_loop() # Run all queued events loop.stop() loop.run_forever() """
if func is None: return functools.partial(self.on, event) # type: ignore wrapped = func if not asyncio.iscoroutinefunction(wrapped): wrapped = asyncio.coroutine(wrapped) self._event_handlers[event.upper()].append(wrapped) # Always return original return func
<SYSTEM_TASK:> client callback entrance <END_TASK> <USER_TASK:> Description: def _handle(self, nick, target, message, **kwargs): """ client callback entrance """
for regex, (func, pattern) in self.routes.items(): match = regex.match(message) if match: self.client.loop.create_task( func(nick, target, message, match, **kwargs))
<SYSTEM_TASK:> factory function that returns a proxy object for an owserver at <END_TASK> <USER_TASK:> Description: def proxy(host='localhost', port=4304, flags=0, persistent=False, verbose=False, ): """factory function that returns a proxy object for an owserver at host, port. """
# resolve host name/port try: gai = socket.getaddrinfo(host, port, 0, socket.SOCK_STREAM, socket.IPPROTO_TCP) except socket.gaierror as err: raise ConnError(*err.args) # gai is a (non empty) list of tuples, search for the first working one assert gai for (family, _type, _proto, _, sockaddr) in gai: assert _type is socket.SOCK_STREAM and _proto is socket.IPPROTO_TCP owp = _Proxy(family, sockaddr, flags, verbose) try: # check if there is an owserver listening owp.ping() except ConnError as err: # no connection, go over to next sockaddr lasterr = err.args continue else: # ok, live owserver found, stop searching break else: # no server listening on (family, sockaddr) found: raise ConnError(*lasterr) # init errno to errmessage mapping # FIXME: should this be only optional? owp._init_errcodes() if persistent: owp = clone(owp, persistent=True) # here we should have all connections closed assert not isinstance(owp, _PersistentProxy) or owp.conn is None return owp
<SYSTEM_TASK:> factory function for cloning a proxy object <END_TASK> <USER_TASK:> Description: def clone(proxy, persistent=True): """factory function for cloning a proxy object"""
if not isinstance(proxy, _Proxy): raise TypeError('argument is not a Proxy object') if persistent: pclass = _PersistentProxy else: pclass = _Proxy return pclass(proxy._family, proxy._sockaddr, proxy.flags & ~FLG_PERSISTENCE, proxy.verbose, proxy.errmess)
<SYSTEM_TASK:> send message to server and return response <END_TASK> <USER_TASK:> Description: def req(self, msgtype, payload, flags, size=0, offset=0, timeout=0): """send message to server and return response"""
if timeout < 0: raise ValueError("timeout cannot be negative!") tohead = _ToServerHeader(payload=len(payload), type=msgtype, flags=flags, size=size, offset=offset) tstartcom = monotonic() # set timer when communication begins self._send_msg(tohead, payload) while True: fromhead, data = self._read_msg() if fromhead.payload >= 0: # we received a valid answer and return the result return fromhead.ret, fromhead.flags, data assert msgtype != MSG_NOP # we did not exit the loop because payload is negative # Server said PING to keep connection alive during lenghty op # check if timeout has expired if timeout: tcom = monotonic() - tstartcom if tcom > timeout: raise OwnetTimeout(tcom, timeout)
<SYSTEM_TASK:> send message to server <END_TASK> <USER_TASK:> Description: def _send_msg(self, header, payload): """send message to server"""
if self.verbose: print('->', repr(header)) print('..', repr(payload)) assert header.payload == len(payload) try: sent = self.socket.send(header + payload) except IOError as err: raise ConnError(*err.args) # FIXME FIXME FIXME: # investigate under which situations socket.send should be retried # instead of aborted. # FIXME FIXME FIXME if sent < len(header + payload): raise ShortWrite(sent, len(header + payload)) assert sent == len(header + payload), sent
<SYSTEM_TASK:> read message from server <END_TASK> <USER_TASK:> Description: def _read_msg(self): """read message from server"""
# # NOTE: # '_recv_socket(nbytes)' was implemented as # 'socket.recv(nbytes, socket.MSG_WAITALL)' # but socket.MSG_WAITALL proved not reliable # def _recv_socket(nbytes): """read nbytes bytes from self.socket""" # # code below is written under the assumption that # 'nbytes' is smallish so that the 'while len(buf) < nbytes' loop # is entered rarerly # try: buf = self.socket.recv(nbytes) except IOError as err: raise ConnError(*err.args) if not buf: raise ShortRead(0, nbytes) while len(buf) < nbytes: try: tmp = self.socket.recv(nbytes - len(buf)) except IOError as err: raise ConnError(*err.args) if not tmp: if self.verbose: print('ee', repr(buf)) raise ShortRead(len(buf), nbytes) buf += tmp assert len(buf) == nbytes, (buf, len(buf), nbytes) return buf data = _recv_socket(_FromServerHeader.header_size) header = _FromServerHeader(data) if self.verbose: print('<-', repr(header)) # error conditions if header.version != 0: raise MalformedHeader('bad version', header) if header.payload > MAX_PAYLOAD: raise MalformedHeader('huge payload, unwilling to read', header) if header.payload > 0: payload = _recv_socket(header.payload) if self.verbose: print('..', repr(payload)) assert header.size <= header.payload payload = payload[:header.size] else: payload = bytes() return header, payload
<SYSTEM_TASK:> sends a NOP packet and waits response; returns None <END_TASK> <USER_TASK:> Description: def ping(self): """sends a NOP packet and waits response; returns None"""
ret, data = self.sendmess(MSG_NOP, bytes()) if data or ret > 0: raise ProtocolError('invalid reply to ping message') if ret < 0: raise OwnetError(-ret, self.errmess[-ret])
<SYSTEM_TASK:> returns True if there is an entity at path <END_TASK> <USER_TASK:> Description: def present(self, path, timeout=0): """returns True if there is an entity at path"""
ret, data = self.sendmess(MSG_PRESENCE, str2bytez(path), timeout=timeout) assert ret <= 0 and not data, (ret, data) if ret < 0: return False else: return True
<SYSTEM_TASK:> list entities at path <END_TASK> <USER_TASK:> Description: def dir(self, path='/', slash=True, bus=False, timeout=0): """list entities at path"""
if slash: msg = MSG_DIRALLSLASH else: msg = MSG_DIRALL if bus: flags = self.flags | FLG_BUS_RET else: flags = self.flags & ~FLG_BUS_RET ret, data = self.sendmess(msg, str2bytez(path), flags, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) if data: return bytes2str(data).split(',') else: return []
<SYSTEM_TASK:> read data at path <END_TASK> <USER_TASK:> Description: def read(self, path, size=MAX_PAYLOAD, offset=0, timeout=0): """read data at path"""
if size > MAX_PAYLOAD: raise ValueError("size cannot exceed %d" % MAX_PAYLOAD) ret, data = self.sendmess(MSG_READ, str2bytez(path), size=size, offset=offset, timeout=timeout) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path) return data
<SYSTEM_TASK:> write data at path <END_TASK> <USER_TASK:> Description: def write(self, path, data, offset=0, timeout=0): """write data at path path is a string, data binary; it is responsability of the caller ensure proper encoding. """
# fixme: check of path type delayed to str2bytez if not isinstance(data, (bytes, bytearray, )): raise TypeError("'data' argument must be binary") ret, rdata = self.sendmess(MSG_WRITE, str2bytez(path) + data, size=len(data), offset=offset, timeout=timeout) assert not rdata, (ret, rdata) if ret < 0: raise OwnetError(-ret, self.errmess[-ret], path)
<SYSTEM_TASK:> Return True if names are not incompatible. <END_TASK> <USER_TASK:> Description: def _is_compatible_with(self, other): """ Return True if names are not incompatible. This checks that the gender of titles and compatibility of suffixes """
title = self._compare_title(other) suffix = self._compare_suffix(other) return title and suffix
<SYSTEM_TASK:> Return False if titles have different gender associations <END_TASK> <USER_TASK:> Description: def _compare_title(self, other): """Return False if titles have different gender associations"""
# If title is omitted, assume a match if not self.title or not other.title: return True titles = set(self.title_list + other.title_list) return not (titles & MALE_TITLES and titles & FEMALE_TITLES)
<SYSTEM_TASK:> Return false if suffixes are mutually exclusive <END_TASK> <USER_TASK:> Description: def _compare_suffix(self, other): """Return false if suffixes are mutually exclusive"""
# If suffix is omitted, assume a match if not self.suffix or not other.suffix: return True # Check if more than one unique suffix suffix_set = set(self.suffix_list + other.suffix_list) unique_suffixes = suffix_set & UNIQUE_SUFFIXES for key in EQUIVALENT_SUFFIXES: if key in unique_suffixes: unique_suffixes.remove(key) unique_suffixes.add(EQUIVALENT_SUFFIXES[key]) return len(unique_suffixes) < 2
<SYSTEM_TASK:> Return comparison of first, middle, and last components <END_TASK> <USER_TASK:> Description: def _compare_components(self, other, settings, ratio=False): """Return comparison of first, middle, and last components"""
first = compare_name_component( self.first_list, other.first_list, settings['first'], ratio, ) if settings['check_nickname']: if first is False: first = compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ) or compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ) elif ratio and first is not 100: first = max( compare_name_component( self.nickname_list, other.first_list, settings['first'], ratio ), compare_name_component( self.first_list, other.nickname_list, settings['first'], ratio ), first, ) middle = compare_name_component( self.middle_list, other.middle_list, settings['middle'], ratio, ) last = compare_name_component( self.last_list, other.last_list, settings['last'], ratio, ) return first, middle, last