Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
ElasticsearchDocumentStore.get_documents_by_id
(self, ids: List[str], index: Optional[str] = None)
Fetch documents by specifying a list of text id strings
Fetch documents by specifying a list of text id strings
def get_documents_by_id(self, ids: List[str], index: Optional[str] = None) -> List[Document]: """Fetch documents by specifying a list of text id strings""" index = index or self.index query = {"query": {"ids": {"values": ids}}} result = self.client.search(index=index, body=query)["hits"]["hits"] documents = [self._convert_es_hit_to_document(hit, return_embedding=self.return_embedding) for hit in result] return documents
[ "def", "get_documents_by_id", "(", "self", ",", "ids", ":", "List", "[", "str", "]", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "List", "[", "Document", "]", ":", "index", "=", "index", "or", "self", ".", "index", "query", "=", "{", "\"query\"", ":", "{", "\"ids\"", ":", "{", "\"values\"", ":", "ids", "}", "}", "}", "result", "=", "self", ".", "client", ".", "search", "(", "index", "=", "index", ",", "body", "=", "query", ")", "[", "\"hits\"", "]", "[", "\"hits\"", "]", "documents", "=", "[", "self", ".", "_convert_es_hit_to_document", "(", "hit", ",", "return_embedding", "=", "self", ".", "return_embedding", ")", "for", "hit", "in", "result", "]", "return", "documents" ]
[ 280, 4 ]
[ 286, 24 ]
python
en
['en', 'en', 'en']
True
ElasticsearchDocumentStore.get_metadata_values_by_key
( self, key: str, query: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None, )
Get values associated with a metadata key. The output is in the format: [{"value": "my-value-1", "count": 23}, {"value": "my-value-2", "count": 12}, ... ] :param key: the meta key name to get the values for. :param query: narrow down the scope to documents matching the query string. :param filters: narrow down the scope to documents that match the given filters. :param index: Elasticsearch index where the meta values should be searched. If not supplied, self.index will be used.
Get values associated with a metadata key. The output is in the format: [{"value": "my-value-1", "count": 23}, {"value": "my-value-2", "count": 12}, ... ]
def get_metadata_values_by_key( self, key: str, query: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None, ) -> List[dict]: """ Get values associated with a metadata key. The output is in the format: [{"value": "my-value-1", "count": 23}, {"value": "my-value-2", "count": 12}, ... ] :param key: the meta key name to get the values for. :param query: narrow down the scope to documents matching the query string. :param filters: narrow down the scope to documents that match the given filters. :param index: Elasticsearch index where the meta values should be searched. If not supplied, self.index will be used. """ body: dict = {"size": 0, "aggs": {"metadata_agg": {"terms": {"field": key}}}} if query: body["query"] = { "bool": { "should": [{"multi_match": {"query": query, "type": "most_fields", "fields": self.search_fields, }}] } } if filters: filter_clause = [] for key, values in filters.items(): filter_clause.append({"terms": {key: values}}) if not body.get("query"): body["query"] = {"bool": {}} body["query"]["bool"].update({"filter": filter_clause}) result = self.client.search(body=body, index=index) buckets = result["aggregations"]["metadata_agg"]["buckets"] for bucket in buckets: bucket["count"] = bucket.pop("doc_count") bucket["value"] = bucket.pop("key") return buckets
[ "def", "get_metadata_values_by_key", "(", "self", ",", "key", ":", "str", ",", "query", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "List", "[", "dict", "]", ":", "body", ":", "dict", "=", "{", "\"size\"", ":", "0", ",", "\"aggs\"", ":", "{", "\"metadata_agg\"", ":", "{", "\"terms\"", ":", "{", "\"field\"", ":", "key", "}", "}", "}", "}", "if", "query", ":", "body", "[", "\"query\"", "]", "=", "{", "\"bool\"", ":", "{", "\"should\"", ":", "[", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "query", ",", "\"type\"", ":", "\"most_fields\"", ",", "\"fields\"", ":", "self", ".", "search_fields", ",", "}", "}", "]", "}", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "if", "not", "body", ".", "get", "(", "\"query\"", ")", ":", "body", "[", "\"query\"", "]", "=", "{", "\"bool\"", ":", "{", "}", "}", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", ".", "update", "(", "{", "\"filter\"", ":", "filter_clause", "}", ")", "result", "=", "self", ".", "client", ".", "search", "(", "body", "=", "body", ",", "index", "=", "index", ")", "buckets", "=", "result", "[", "\"aggregations\"", "]", "[", "\"metadata_agg\"", "]", "[", "\"buckets\"", "]", "for", "bucket", "in", "buckets", ":", "bucket", "[", "\"count\"", "]", "=", "bucket", ".", "pop", "(", "\"doc_count\"", ")", "bucket", "[", "\"value\"", "]", "=", "bucket", ".", "pop", "(", "\"key\"", ")", "return", "buckets" ]
[ 288, 4 ]
[ 324, 22 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.write_documents
( self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000 )
Indexes documents for later queries in Elasticsearch. Behaviour if a document with the same ID already exists in ElasticSearch: a) (Default) Throw Elastic's standard error message for duplicate IDs. b) If `self.update_existing_documents=True` for DocumentStore: Overwrite existing documents. (This is only relevant if you pass your own ID when initializing a `Document`. If don't set custom IDs for your Documents or just pass a list of dictionaries here, they will automatically get UUIDs assigned. See the `Document` class for details) :param documents: a list of Python dictionaries or a list of Haystack Document objects. For documents as dictionaries, the format is {"text": "<the-actual-text>"}. Optionally: Include meta data via {"text": "<the-actual-text>", "meta":{"name": "<some-document-name>, "author": "somebody", ...}} It can be used for filtering and is accessible in the responses of the Finder. Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary should be changed to what you have set for self.text_field and self.name_field. :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used. :param batch_size: Number of documents that are passed to Elasticsearch's bulk function at a time. :return: None
Indexes documents for later queries in Elasticsearch.
def write_documents( self, documents: Union[List[dict], List[Document]], index: Optional[str] = None, batch_size: int = 10_000 ): """ Indexes documents for later queries in Elasticsearch. Behaviour if a document with the same ID already exists in ElasticSearch: a) (Default) Throw Elastic's standard error message for duplicate IDs. b) If `self.update_existing_documents=True` for DocumentStore: Overwrite existing documents. (This is only relevant if you pass your own ID when initializing a `Document`. If don't set custom IDs for your Documents or just pass a list of dictionaries here, they will automatically get UUIDs assigned. See the `Document` class for details) :param documents: a list of Python dictionaries or a list of Haystack Document objects. For documents as dictionaries, the format is {"text": "<the-actual-text>"}. Optionally: Include meta data via {"text": "<the-actual-text>", "meta":{"name": "<some-document-name>, "author": "somebody", ...}} It can be used for filtering and is accessible in the responses of the Finder. Advanced: If you are using your own Elasticsearch mapping, the key names in the dictionary should be changed to what you have set for self.text_field and self.name_field. :param index: Elasticsearch index where the documents should be indexed. If not supplied, self.index will be used. :param batch_size: Number of documents that are passed to Elasticsearch's bulk function at a time. :return: None """ if index and not self.client.indices.exists(index=index): self._create_document_index(index) if index is None: index = self.index documents_to_index = [] for document in documents: # Make sure we comply to Document class format if isinstance(document, dict): doc = Document.from_dict(document, field_map=self._create_document_field_map()) else: doc = document _doc = { "_op_type": "index" if self.update_existing_documents else "create", "_index": index, **doc.to_dict(field_map=self._create_document_field_map()) } # type: Dict[str, Any] # cast embedding type as ES cannot deal with np.array if _doc[self.embedding_field] is not None: if type(_doc[self.embedding_field]) == np.ndarray: _doc[self.embedding_field] = _doc[self.embedding_field].tolist() # rename id for elastic _doc["_id"] = str(_doc.pop("id")) # don't index query score and empty fields _ = _doc.pop("score", None) _ = _doc.pop("probability", None) _doc = {k:v for k,v in _doc.items() if v is not None} # In order to have a flat structure in elastic + similar behaviour to the other DocumentStores, # we "unnest" all value within "meta" if "meta" in _doc.keys(): for k, v in _doc["meta"].items(): _doc[k] = v _doc.pop("meta") documents_to_index.append(_doc) # Pass batch_size number of documents to bulk if len(documents_to_index) % batch_size == 0: bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type) documents_to_index = [] if documents_to_index: bulk(self.client, documents_to_index, request_timeout=300, refresh=self.refresh_type)
[ "def", "write_documents", "(", "self", ",", "documents", ":", "Union", "[", "List", "[", "dict", "]", ",", "List", "[", "Document", "]", "]", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ")", ":", "if", "index", "and", "not", "self", ".", "client", ".", "indices", ".", "exists", "(", "index", "=", "index", ")", ":", "self", ".", "_create_document_index", "(", "index", ")", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "documents_to_index", "=", "[", "]", "for", "document", "in", "documents", ":", "# Make sure we comply to Document class format", "if", "isinstance", "(", "document", ",", "dict", ")", ":", "doc", "=", "Document", ".", "from_dict", "(", "document", ",", "field_map", "=", "self", ".", "_create_document_field_map", "(", ")", ")", "else", ":", "doc", "=", "document", "_doc", "=", "{", "\"_op_type\"", ":", "\"index\"", "if", "self", ".", "update_existing_documents", "else", "\"create\"", ",", "\"_index\"", ":", "index", ",", "*", "*", "doc", ".", "to_dict", "(", "field_map", "=", "self", ".", "_create_document_field_map", "(", ")", ")", "}", "# type: Dict[str, Any]", "# cast embedding type as ES cannot deal with np.array", "if", "_doc", "[", "self", ".", "embedding_field", "]", "is", "not", "None", ":", "if", "type", "(", "_doc", "[", "self", ".", "embedding_field", "]", ")", "==", "np", ".", "ndarray", ":", "_doc", "[", "self", ".", "embedding_field", "]", "=", "_doc", "[", "self", ".", "embedding_field", "]", ".", "tolist", "(", ")", "# rename id for elastic", "_doc", "[", "\"_id\"", "]", "=", "str", "(", "_doc", ".", "pop", "(", "\"id\"", ")", ")", "# don't index query score and empty fields", "_", "=", "_doc", ".", "pop", "(", "\"score\"", ",", "None", ")", "_", "=", "_doc", ".", "pop", "(", "\"probability\"", ",", "None", ")", "_doc", "=", "{", "k", ":", "v", "for", "k", ",", "v", "in", "_doc", ".", "items", "(", ")", "if", "v", "is", "not", "None", "}", "# In order to have a flat structure in elastic + similar behaviour to the other DocumentStores,", "# we \"unnest\" all value within \"meta\"", "if", "\"meta\"", "in", "_doc", ".", "keys", "(", ")", ":", "for", "k", ",", "v", "in", "_doc", "[", "\"meta\"", "]", ".", "items", "(", ")", ":", "_doc", "[", "k", "]", "=", "v", "_doc", ".", "pop", "(", "\"meta\"", ")", "documents_to_index", ".", "append", "(", "_doc", ")", "# Pass batch_size number of documents to bulk", "if", "len", "(", "documents_to_index", ")", "%", "batch_size", "==", "0", ":", "bulk", "(", "self", ".", "client", ",", "documents_to_index", ",", "request_timeout", "=", "300", ",", "refresh", "=", "self", ".", "refresh_type", ")", "documents_to_index", "=", "[", "]", "if", "documents_to_index", ":", "bulk", "(", "self", ".", "client", ",", "documents_to_index", ",", "request_timeout", "=", "300", ",", "refresh", "=", "self", ".", "refresh_type", ")" ]
[ 326, 4 ]
[ 398, 97 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.write_labels
( self, labels: Union[List[Label], List[dict]], index: Optional[str] = None, batch_size: int = 10_000 )
Write annotation labels into document store. :param labels: A list of Python dictionaries or a list of Haystack Label objects. :param batch_size: Number of labels that are passed to Elasticsearch's bulk function at a time.
Write annotation labels into document store.
def write_labels( self, labels: Union[List[Label], List[dict]], index: Optional[str] = None, batch_size: int = 10_000 ): """Write annotation labels into document store. :param labels: A list of Python dictionaries or a list of Haystack Label objects. :param batch_size: Number of labels that are passed to Elasticsearch's bulk function at a time. """ index = index or self.label_index if index and not self.client.indices.exists(index=index): self._create_label_index(index) labels_to_index = [] for l in labels: # Make sure we comply to Label class format if isinstance(l, dict): label = Label.from_dict(l) else: label = l # create timestamps if not available yet if not label.created_at: label.created_at = time.strftime("%Y-%m-%d %H:%M:%S") if not label.updated_at: label.updated_at = label.created_at _label = { "_op_type": "index" if self.update_existing_documents else "create", "_index": index, **label.to_dict() } # type: Dict[str, Any] # rename id for elastic if label.id is not None: _label["_id"] = str(_label.pop("id")) labels_to_index.append(_label) # Pass batch_size number of labels to bulk if len(labels_to_index) % batch_size == 0: bulk(self.client, labels_to_index, request_timeout=300, refresh=self.refresh_type) labels_to_index = [] if labels_to_index: bulk(self.client, labels_to_index, request_timeout=300, refresh=self.refresh_type)
[ "def", "write_labels", "(", "self", ",", "labels", ":", "Union", "[", "List", "[", "Label", "]", ",", "List", "[", "dict", "]", "]", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ")", ":", "index", "=", "index", "or", "self", ".", "label_index", "if", "index", "and", "not", "self", ".", "client", ".", "indices", ".", "exists", "(", "index", "=", "index", ")", ":", "self", ".", "_create_label_index", "(", "index", ")", "labels_to_index", "=", "[", "]", "for", "l", "in", "labels", ":", "# Make sure we comply to Label class format", "if", "isinstance", "(", "l", ",", "dict", ")", ":", "label", "=", "Label", ".", "from_dict", "(", "l", ")", "else", ":", "label", "=", "l", "# create timestamps if not available yet", "if", "not", "label", ".", "created_at", ":", "label", ".", "created_at", "=", "time", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "if", "not", "label", ".", "updated_at", ":", "label", ".", "updated_at", "=", "label", ".", "created_at", "_label", "=", "{", "\"_op_type\"", ":", "\"index\"", "if", "self", ".", "update_existing_documents", "else", "\"create\"", ",", "\"_index\"", ":", "index", ",", "*", "*", "label", ".", "to_dict", "(", ")", "}", "# type: Dict[str, Any]", "# rename id for elastic", "if", "label", ".", "id", "is", "not", "None", ":", "_label", "[", "\"_id\"", "]", "=", "str", "(", "_label", ".", "pop", "(", "\"id\"", ")", ")", "labels_to_index", ".", "append", "(", "_label", ")", "# Pass batch_size number of labels to bulk", "if", "len", "(", "labels_to_index", ")", "%", "batch_size", "==", "0", ":", "bulk", "(", "self", ".", "client", ",", "labels_to_index", ",", "request_timeout", "=", "300", ",", "refresh", "=", "self", ".", "refresh_type", ")", "labels_to_index", "=", "[", "]", "if", "labels_to_index", ":", "bulk", "(", "self", ".", "client", ",", "labels_to_index", ",", "request_timeout", "=", "300", ",", "refresh", "=", "self", ".", "refresh_type", ")" ]
[ 400, 4 ]
[ 444, 94 ]
python
en
['en', 'en', 'en']
True
ElasticsearchDocumentStore.update_document_meta
(self, id: str, meta: Dict[str, str])
Update the metadata dictionary of a document by specifying its string id
Update the metadata dictionary of a document by specifying its string id
def update_document_meta(self, id: str, meta: Dict[str, str]): """ Update the metadata dictionary of a document by specifying its string id """ body = {"doc": meta} self.client.update(index=self.index, id=id, body=body, refresh=self.refresh_type)
[ "def", "update_document_meta", "(", "self", ",", "id", ":", "str", ",", "meta", ":", "Dict", "[", "str", ",", "str", "]", ")", ":", "body", "=", "{", "\"doc\"", ":", "meta", "}", "self", ".", "client", ".", "update", "(", "index", "=", "self", ".", "index", ",", "id", "=", "id", ",", "body", "=", "body", ",", "refresh", "=", "self", ".", "refresh_type", ")" ]
[ 446, 4 ]
[ 451, 89 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.get_document_count
(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None)
Return the number of documents in the document store.
Return the number of documents in the document store.
def get_document_count(self, filters: Optional[Dict[str, List[str]]] = None, index: Optional[str] = None) -> int: """ Return the number of documents in the document store. """ index = index or self.index body: dict = {"query": {"bool": {}}} if filters: filter_clause = [] for key, values in filters.items(): if type(values) != list: raise ValueError( f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. ' 'Example: {"name": ["some", "more"], "category": ["only_one"]} ') filter_clause.append( { "terms": {key: values} } ) body["query"]["bool"]["filter"] = filter_clause result = self.client.count(index=index, body=body) count = result["count"] return count
[ "def", "get_document_count", "(", "self", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "int", ":", "index", "=", "index", "or", "self", ".", "index", "body", ":", "dict", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "}", "}", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "if", "type", "(", "values", ")", "!=", "list", ":", "raise", "ValueError", "(", "f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '", "'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} '", ")", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"filter\"", "]", "=", "filter_clause", "result", "=", "self", ".", "client", ".", "count", "(", "index", "=", "index", ",", "body", "=", "body", ")", "count", "=", "result", "[", "\"count\"", "]", "return", "count" ]
[ 453, 4 ]
[ 476, 20 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.get_label_count
(self, index: Optional[str] = None)
Return the number of labels in the document store
Return the number of labels in the document store
def get_label_count(self, index: Optional[str] = None) -> int: """ Return the number of labels in the document store """ return self.get_document_count(index=index)
[ "def", "get_label_count", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "int", ":", "return", "self", ".", "get_document_count", "(", "index", "=", "index", ")" ]
[ 478, 4 ]
[ 482, 51 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.get_all_documents
( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, )
Get documents from the document store. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint.
Get documents from the document store.
def get_all_documents( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, ) -> List[Document]: """ Get documents from the document store. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint. """ result = self.get_all_documents_generator( index=index, filters=filters, return_embedding=return_embedding, batch_size=batch_size ) documents = list(result) return documents
[ "def", "get_all_documents", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", ")", "->", "List", "[", "Document", "]", ":", "result", "=", "self", ".", "get_all_documents_generator", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "return_embedding", "=", "return_embedding", ",", "batch_size", "=", "batch_size", ")", "documents", "=", "list", "(", "result", ")", "return", "documents" ]
[ 484, 4 ]
[ 505, 24 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.get_all_documents_generator
( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, )
Get documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint.
Get documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory.
def get_all_documents_generator( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, return_embedding: Optional[bool] = None, batch_size: int = 10_000, ) -> Generator[Document, None, None]: """ Get documents from the document store. Under-the-hood, documents are fetched in batches from the document store and yielded as individual documents. This method can be used to iteratively process a large number of documents without having to load all documents in memory. :param index: Name of the index to get the documents from. If None, the DocumentStore's default index (self.index) will be used. :param filters: Optional filters to narrow down the documents to return. Example: {"name": ["some", "more"], "category": ["only_one"]} :param return_embedding: Whether to return the document embeddings. :param batch_size: When working with large number of documents, batching can help reduce memory footprint. """ if index is None: index = self.index if return_embedding is None: return_embedding = self.return_embedding result = self._get_all_documents_in_index(index=index, filters=filters, batch_size=batch_size) for hit in result: document = self._convert_es_hit_to_document(hit, return_embedding=return_embedding) yield document
[ "def", "get_all_documents_generator", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", ")", "->", "Generator", "[", "Document", ",", "None", ",", "None", "]", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "if", "return_embedding", "is", "None", ":", "return_embedding", "=", "self", ".", "return_embedding", "result", "=", "self", ".", "_get_all_documents_in_index", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "batch_size", "=", "batch_size", ")", "for", "hit", "in", "result", ":", "document", "=", "self", ".", "_convert_es_hit_to_document", "(", "hit", ",", "return_embedding", "=", "return_embedding", ")", "yield", "document" ]
[ 507, 4 ]
[ 536, 26 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.get_all_labels
( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, batch_size: int = 10_000 )
Return all labels in the document store
Return all labels in the document store
def get_all_labels( self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, batch_size: int = 10_000 ) -> List[Label]: """ Return all labels in the document store """ index = index or self.label_index result = list(self._get_all_documents_in_index(index=index, filters=filters, batch_size=batch_size)) labels = [Label.from_dict(hit["_source"]) for hit in result] return labels
[ "def", "get_all_labels", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ")", "->", "List", "[", "Label", "]", ":", "index", "=", "index", "or", "self", ".", "label_index", "result", "=", "list", "(", "self", ".", "_get_all_documents_in_index", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "batch_size", "=", "batch_size", ")", ")", "labels", "=", "[", "Label", ".", "from_dict", "(", "hit", "[", "\"_source\"", "]", ")", "for", "hit", "in", "result", "]", "return", "labels" ]
[ 538, 4 ]
[ 547, 21 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore._get_all_documents_in_index
( self, index: str, filters: Optional[Dict[str, List[str]]] = None, batch_size: int = 10_000, only_documents_without_embedding: bool = False, )
Return all documents in a specific index in the document store
Return all documents in a specific index in the document store
def _get_all_documents_in_index( self, index: str, filters: Optional[Dict[str, List[str]]] = None, batch_size: int = 10_000, only_documents_without_embedding: bool = False, ) -> Generator[dict, None, None]: """ Return all documents in a specific index in the document store """ body: dict = {"query": {"bool": {}}} if filters: filter_clause = [] for key, values in filters.items(): filter_clause.append( { "terms": {key: values} } ) body["query"]["bool"]["filter"] = filter_clause if only_documents_without_embedding: body["query"]["bool"] = {"must_not": {"exists": {"field": self.embedding_field}}} result = scan(self.client, query=body, index=index, size=batch_size, scroll="1d") yield from result
[ "def", "_get_all_documents_in_index", "(", "self", ",", "index", ":", "str", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "batch_size", ":", "int", "=", "10_000", ",", "only_documents_without_embedding", ":", "bool", "=", "False", ",", ")", "->", "Generator", "[", "dict", ",", "None", ",", "None", "]", ":", "body", ":", "dict", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "}", "}", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"filter\"", "]", "=", "filter_clause", "if", "only_documents_without_embedding", ":", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", "=", "{", "\"must_not\"", ":", "{", "\"exists\"", ":", "{", "\"field\"", ":", "self", ".", "embedding_field", "}", "}", "}", "result", "=", "scan", "(", "self", ".", "client", ",", "query", "=", "body", ",", "index", "=", "index", ",", "size", "=", "batch_size", ",", "scroll", "=", "\"1d\"", ")", "yield", "from", "result" ]
[ 549, 4 ]
[ 575, 25 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.query
( self, query: Optional[str], filters: Optional[Dict[str, List[str]]] = None, top_k: int = 10, custom_query: Optional[str] = None, index: Optional[str] = None, )
Scan through documents in DocumentStore and return a small number documents that are most relevant to the query as defined by the BM25 algorithm. :param query: The query :param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field :param top_k: How many documents to return per query. :param index: The name of the index in the DocumentStore from which to retrieve documents
Scan through documents in DocumentStore and return a small number documents that are most relevant to the query as defined by the BM25 algorithm.
def query( self, query: Optional[str], filters: Optional[Dict[str, List[str]]] = None, top_k: int = 10, custom_query: Optional[str] = None, index: Optional[str] = None, ) -> List[Document]: """ Scan through documents in DocumentStore and return a small number documents that are most relevant to the query as defined by the BM25 algorithm. :param query: The query :param filters: A dictionary where the keys specify a metadata field and the value is a list of accepted values for that field :param top_k: How many documents to return per query. :param index: The name of the index in the DocumentStore from which to retrieve documents """ if index is None: index = self.index # Naive retrieval without BM25, only filtering if query is None: body = {"query": {"bool": {"must": {"match_all": {}}}}} # type: Dict[str, Any] if filters: filter_clause = [] for key, values in filters.items(): filter_clause.append( { "terms": {key: values} } ) body["query"]["bool"]["filter"] = filter_clause # Retrieval via custom query elif custom_query: # substitute placeholder for query and filters for the custom_query template string template = Template(custom_query) # replace all "${query}" placeholder(s) with query substitutions = {"query": f'"{query}"'} # For each filter we got passed, we'll try to find & replace the corresponding placeholder in the template # Example: filters={"years":[2018]} => replaces {$years} in custom_query with '[2018]' if filters: for key, values in filters.items(): values_str = json.dumps(values) substitutions[key] = values_str custom_query_json = template.substitute(**substitutions) body = json.loads(custom_query_json) # add top_k body["size"] = str(top_k) # Default Retrieval via BM25 using the user query on `self.search_fields` else: body = { "size": str(top_k), "query": { "bool": { "should": [{"multi_match": {"query": query, "type": "most_fields", "fields": self.search_fields}}] } }, } if filters: filter_clause = [] for key, values in filters.items(): if type(values) != list: raise ValueError(f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. ' 'Example: {"name": ["some", "more"], "category": ["only_one"]} ') filter_clause.append( { "terms": {key: values} } ) body["query"]["bool"]["filter"] = filter_clause if self.excluded_meta_data: body["_source"] = {"excludes": self.excluded_meta_data} logger.debug(f"Retriever query: {body}") result = self.client.search(index=index, body=body)["hits"]["hits"] documents = [self._convert_es_hit_to_document(hit, return_embedding=self.return_embedding) for hit in result] return documents
[ "def", "query", "(", "self", ",", "query", ":", "Optional", "[", "str", "]", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "top_k", ":", "int", "=", "10", ",", "custom_query", ":", "Optional", "[", "str", "]", "=", "None", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", ")", "->", "List", "[", "Document", "]", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "# Naive retrieval without BM25, only filtering", "if", "query", "is", "None", ":", "body", "=", "{", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"must\"", ":", "{", "\"match_all\"", ":", "{", "}", "}", "}", "}", "}", "# type: Dict[str, Any]", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"filter\"", "]", "=", "filter_clause", "# Retrieval via custom query", "elif", "custom_query", ":", "# substitute placeholder for query and filters for the custom_query template string", "template", "=", "Template", "(", "custom_query", ")", "# replace all \"${query}\" placeholder(s) with query", "substitutions", "=", "{", "\"query\"", ":", "f'\"{query}\"'", "}", "# For each filter we got passed, we'll try to find & replace the corresponding placeholder in the template", "# Example: filters={\"years\":[2018]} => replaces {$years} in custom_query with '[2018]'", "if", "filters", ":", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "values_str", "=", "json", ".", "dumps", "(", "values", ")", "substitutions", "[", "key", "]", "=", "values_str", "custom_query_json", "=", "template", ".", "substitute", "(", "*", "*", "substitutions", ")", "body", "=", "json", ".", "loads", "(", "custom_query_json", ")", "# add top_k", "body", "[", "\"size\"", "]", "=", "str", "(", "top_k", ")", "# Default Retrieval via BM25 using the user query on `self.search_fields`", "else", ":", "body", "=", "{", "\"size\"", ":", "str", "(", "top_k", ")", ",", "\"query\"", ":", "{", "\"bool\"", ":", "{", "\"should\"", ":", "[", "{", "\"multi_match\"", ":", "{", "\"query\"", ":", "query", ",", "\"type\"", ":", "\"most_fields\"", ",", "\"fields\"", ":", "self", ".", "search_fields", "}", "}", "]", "}", "}", ",", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "if", "type", "(", "values", ")", "!=", "list", ":", "raise", "ValueError", "(", "f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '", "'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} '", ")", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "body", "[", "\"query\"", "]", "[", "\"bool\"", "]", "[", "\"filter\"", "]", "=", "filter_clause", "if", "self", ".", "excluded_meta_data", ":", "body", "[", "\"_source\"", "]", "=", "{", "\"excludes\"", ":", "self", ".", "excluded_meta_data", "}", "logger", ".", "debug", "(", "f\"Retriever query: {body}\"", ")", "result", "=", "self", ".", "client", ".", "search", "(", "index", "=", "index", ",", "body", "=", "body", ")", "[", "\"hits\"", "]", "[", "\"hits\"", "]", "documents", "=", "[", "self", ".", "_convert_es_hit_to_document", "(", "hit", ",", "return_embedding", "=", "self", ".", "return_embedding", ")", "for", "hit", "in", "result", "]", "return", "documents" ]
[ 577, 4 ]
[ 660, 24 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.query_by_embedding
(self, query_emb: np.ndarray, filters: Optional[Dict[str, List[str]]] = None, top_k: int = 10, index: Optional[str] = None, return_embedding: Optional[bool] = None)
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric. :param query_emb: Embedding of the query (e.g. gathered from DPR) :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :param top_k: How many documents to return :param index: Index name for storing the docs and metadata :param return_embedding: To return document embedding :return:
Find the document that is most similar to the provided `query_emb` by using a vector similarity metric.
def query_by_embedding(self, query_emb: np.ndarray, filters: Optional[Dict[str, List[str]]] = None, top_k: int = 10, index: Optional[str] = None, return_embedding: Optional[bool] = None) -> List[Document]: """ Find the document that is most similar to the provided `query_emb` by using a vector similarity metric. :param query_emb: Embedding of the query (e.g. gathered from DPR) :param filters: Optional filters to narrow down the search space. Example: {"name": ["some", "more"], "category": ["only_one"]} :param top_k: How many documents to return :param index: Index name for storing the docs and metadata :param return_embedding: To return document embedding :return: """ if index is None: index = self.index if return_embedding is None: return_embedding = self.return_embedding if not self.embedding_field: raise RuntimeError("Please specify arg `embedding_field` in ElasticsearchDocumentStore()") else: # +1 in similarity to avoid negative numbers (for cosine sim) body = { "size": top_k, "query": self._get_vector_similarity_query(query_emb, top_k) } if filters: filter_clause = [] for key, values in filters.items(): if type(values) != list: raise ValueError(f'Wrong filter format for key "{key}": Please provide a list of allowed values for each key. ' 'Example: {"name": ["some", "more"], "category": ["only_one"]} ') filter_clause.append( { "terms": {key: values} } ) body["query"]["script_score"]["query"] = {"bool": {"filter": filter_clause}} excluded_meta_data: Optional[list] = None if self.excluded_meta_data: excluded_meta_data = deepcopy(self.excluded_meta_data) if return_embedding is True and self.embedding_field in excluded_meta_data: excluded_meta_data.remove(self.embedding_field) elif return_embedding is False and self.embedding_field not in excluded_meta_data: excluded_meta_data.append(self.embedding_field) elif return_embedding is False: excluded_meta_data = [self.embedding_field] if excluded_meta_data: body["_source"] = {"excludes": excluded_meta_data} logger.debug(f"Retriever query: {body}") result = self.client.search(index=index, body=body, request_timeout=300)["hits"]["hits"] documents = [ self._convert_es_hit_to_document(hit, adapt_score_for_embedding=True, return_embedding=return_embedding) for hit in result ] return documents
[ "def", "query_by_embedding", "(", "self", ",", "query_emb", ":", "np", ".", "ndarray", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "top_k", ":", "int", "=", "10", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "return_embedding", ":", "Optional", "[", "bool", "]", "=", "None", ")", "->", "List", "[", "Document", "]", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "if", "return_embedding", "is", "None", ":", "return_embedding", "=", "self", ".", "return_embedding", "if", "not", "self", ".", "embedding_field", ":", "raise", "RuntimeError", "(", "\"Please specify arg `embedding_field` in ElasticsearchDocumentStore()\"", ")", "else", ":", "# +1 in similarity to avoid negative numbers (for cosine sim)", "body", "=", "{", "\"size\"", ":", "top_k", ",", "\"query\"", ":", "self", ".", "_get_vector_similarity_query", "(", "query_emb", ",", "top_k", ")", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "if", "type", "(", "values", ")", "!=", "list", ":", "raise", "ValueError", "(", "f'Wrong filter format for key \"{key}\": Please provide a list of allowed values for each key. '", "'Example: {\"name\": [\"some\", \"more\"], \"category\": [\"only_one\"]} '", ")", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "body", "[", "\"query\"", "]", "[", "\"script_score\"", "]", "[", "\"query\"", "]", "=", "{", "\"bool\"", ":", "{", "\"filter\"", ":", "filter_clause", "}", "}", "excluded_meta_data", ":", "Optional", "[", "list", "]", "=", "None", "if", "self", ".", "excluded_meta_data", ":", "excluded_meta_data", "=", "deepcopy", "(", "self", ".", "excluded_meta_data", ")", "if", "return_embedding", "is", "True", "and", "self", ".", "embedding_field", "in", "excluded_meta_data", ":", "excluded_meta_data", ".", "remove", "(", "self", ".", "embedding_field", ")", "elif", "return_embedding", "is", "False", "and", "self", ".", "embedding_field", "not", "in", "excluded_meta_data", ":", "excluded_meta_data", ".", "append", "(", "self", ".", "embedding_field", ")", "elif", "return_embedding", "is", "False", ":", "excluded_meta_data", "=", "[", "self", ".", "embedding_field", "]", "if", "excluded_meta_data", ":", "body", "[", "\"_source\"", "]", "=", "{", "\"excludes\"", ":", "excluded_meta_data", "}", "logger", ".", "debug", "(", "f\"Retriever query: {body}\"", ")", "result", "=", "self", ".", "client", ".", "search", "(", "index", "=", "index", ",", "body", "=", "body", ",", "request_timeout", "=", "300", ")", "[", "\"hits\"", "]", "[", "\"hits\"", "]", "documents", "=", "[", "self", ".", "_convert_es_hit_to_document", "(", "hit", ",", "adapt_score_for_embedding", "=", "True", ",", "return_embedding", "=", "return_embedding", ")", "for", "hit", "in", "result", "]", "return", "documents" ]
[ 662, 4 ]
[ 728, 28 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore._get_vector_similarity_query
(self, query_emb: np.ndarray, top_k: int)
Generate Elasticsearch query for vector similarity.
Generate Elasticsearch query for vector similarity.
def _get_vector_similarity_query(self, query_emb: np.ndarray, top_k: int): """ Generate Elasticsearch query for vector similarity. """ if self.similarity == "cosine": similarity_fn_name = "cosineSimilarity" elif self.similarity == "dot_product": similarity_fn_name = "dotProduct" else: raise Exception("Invalid value for similarity in ElasticSearchDocumentStore constructor. Choose between \'cosine\' and \'dot_product\'") query = { "script_score": { "query": {"match_all": {}}, "script": { # offset score to ensure a positive range as required by Elasticsearch "source": f"{similarity_fn_name}(params.query_vector,'{self.embedding_field}') + 1000", "params": {"query_vector": query_emb.tolist()}, }, } } return query
[ "def", "_get_vector_similarity_query", "(", "self", ",", "query_emb", ":", "np", ".", "ndarray", ",", "top_k", ":", "int", ")", ":", "if", "self", ".", "similarity", "==", "\"cosine\"", ":", "similarity_fn_name", "=", "\"cosineSimilarity\"", "elif", "self", ".", "similarity", "==", "\"dot_product\"", ":", "similarity_fn_name", "=", "\"dotProduct\"", "else", ":", "raise", "Exception", "(", "\"Invalid value for similarity in ElasticSearchDocumentStore constructor. Choose between \\'cosine\\' and \\'dot_product\\'\"", ")", "query", "=", "{", "\"script_score\"", ":", "{", "\"query\"", ":", "{", "\"match_all\"", ":", "{", "}", "}", ",", "\"script\"", ":", "{", "# offset score to ensure a positive range as required by Elasticsearch", "\"source\"", ":", "f\"{similarity_fn_name}(params.query_vector,'{self.embedding_field}') + 1000\"", ",", "\"params\"", ":", "{", "\"query_vector\"", ":", "query_emb", ".", "tolist", "(", ")", "}", ",", "}", ",", "}", "}", "return", "query" ]
[ 730, 4 ]
[ 751, 20 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.describe_documents
(self, index=None)
Return a summary of the documents in the document store
Return a summary of the documents in the document store
def describe_documents(self, index=None): """ Return a summary of the documents in the document store """ if index is None: index = self.index docs = self.get_all_documents(index) l = [len(d.text) for d in docs] stats = {"count": len(docs), "chars_mean": np.mean(l), "chars_max": max(l), "chars_min": min(l), "chars_median": np.median(l), } return stats
[ "def", "describe_documents", "(", "self", ",", "index", "=", "None", ")", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "docs", "=", "self", ".", "get_all_documents", "(", "index", ")", "l", "=", "[", "len", "(", "d", ".", "text", ")", "for", "d", "in", "docs", "]", "stats", "=", "{", "\"count\"", ":", "len", "(", "docs", ")", ",", "\"chars_mean\"", ":", "np", ".", "mean", "(", "l", ")", ",", "\"chars_max\"", ":", "max", "(", "l", ")", ",", "\"chars_min\"", ":", "min", "(", "l", ")", ",", "\"chars_median\"", ":", "np", ".", "median", "(", "l", ")", ",", "}", "return", "stats" ]
[ 799, 4 ]
[ 814, 20 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.update_embeddings
( self, retriever, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, update_existing_embeddings: bool = True, batch_size: int = 10_000 )
Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config). :param retriever: Retriever to use to update the embeddings. :param index: Index name to update :param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False, only documents without embeddings are processed. This mode can be used for incremental updating of embeddings, wherein, only newly indexed documents get processed. :param filters: Optional filters to narrow down the documents for which embeddings are to be updated. Example: {"name": ["some", "more"], "category": ["only_one"]} :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :return: None
Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config).
def update_embeddings( self, retriever, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None, update_existing_embeddings: bool = True, batch_size: int = 10_000 ): """ Updates the embeddings in the the document store using the encoding model specified in the retriever. This can be useful if want to add or change the embeddings for your documents (e.g. after changing the retriever config). :param retriever: Retriever to use to update the embeddings. :param index: Index name to update :param update_existing_embeddings: Whether to update existing embeddings of the documents. If set to False, only documents without embeddings are processed. This mode can be used for incremental updating of embeddings, wherein, only newly indexed documents get processed. :param filters: Optional filters to narrow down the documents for which embeddings are to be updated. Example: {"name": ["some", "more"], "category": ["only_one"]} :param batch_size: When working with large number of documents, batching can help reduce memory footprint. :return: None """ if index is None: index = self.index if not self.embedding_field: raise RuntimeError("Specify the arg `embedding_field` when initializing ElasticsearchDocumentStore()") if update_existing_embeddings: logger.info(f"Updating embeddings for all {self.get_document_count(index=index)} docs ...") else: logger.info(f"Updating embeddings for new docs without embeddings ...") result = self._get_all_documents_in_index( index=index, filters=filters, batch_size=batch_size, only_documents_without_embedding=not update_existing_embeddings ) for result_batch in get_batches_from_generator(result, batch_size): document_batch = [self._convert_es_hit_to_document(hit, return_embedding=False) for hit in result_batch] embeddings = retriever.embed_passages(document_batch) # type: ignore assert len(document_batch) == len(embeddings) if embeddings[0].shape[0] != self.embedding_dim: raise RuntimeError(f"Embedding dim. of model ({embeddings[0].shape[0]})" f" doesn't match embedding dim. in DocumentStore ({self.embedding_dim})." "Specify the arg `embedding_dim` when initializing ElasticsearchDocumentStore()") doc_updates = [] for doc, emb in zip(document_batch, embeddings): update = {"_op_type": "update", "_index": index, "_id": doc.id, "doc": {self.embedding_field: emb.tolist()}, } doc_updates.append(update) bulk(self.client, doc_updates, request_timeout=300, refresh=self.refresh_type)
[ "def", "update_embeddings", "(", "self", ",", "retriever", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ",", "update_existing_embeddings", ":", "bool", "=", "True", ",", "batch_size", ":", "int", "=", "10_000", ")", ":", "if", "index", "is", "None", ":", "index", "=", "self", ".", "index", "if", "not", "self", ".", "embedding_field", ":", "raise", "RuntimeError", "(", "\"Specify the arg `embedding_field` when initializing ElasticsearchDocumentStore()\"", ")", "if", "update_existing_embeddings", ":", "logger", ".", "info", "(", "f\"Updating embeddings for all {self.get_document_count(index=index)} docs ...\"", ")", "else", ":", "logger", ".", "info", "(", "f\"Updating embeddings for new docs without embeddings ...\"", ")", "result", "=", "self", ".", "_get_all_documents_in_index", "(", "index", "=", "index", ",", "filters", "=", "filters", ",", "batch_size", "=", "batch_size", ",", "only_documents_without_embedding", "=", "not", "update_existing_embeddings", ")", "for", "result_batch", "in", "get_batches_from_generator", "(", "result", ",", "batch_size", ")", ":", "document_batch", "=", "[", "self", ".", "_convert_es_hit_to_document", "(", "hit", ",", "return_embedding", "=", "False", ")", "for", "hit", "in", "result_batch", "]", "embeddings", "=", "retriever", ".", "embed_passages", "(", "document_batch", ")", "# type: ignore", "assert", "len", "(", "document_batch", ")", "==", "len", "(", "embeddings", ")", "if", "embeddings", "[", "0", "]", ".", "shape", "[", "0", "]", "!=", "self", ".", "embedding_dim", ":", "raise", "RuntimeError", "(", "f\"Embedding dim. of model ({embeddings[0].shape[0]})\"", "f\" doesn't match embedding dim. in DocumentStore ({self.embedding_dim}).\"", "\"Specify the arg `embedding_dim` when initializing ElasticsearchDocumentStore()\"", ")", "doc_updates", "=", "[", "]", "for", "doc", ",", "emb", "in", "zip", "(", "document_batch", ",", "embeddings", ")", ":", "update", "=", "{", "\"_op_type\"", ":", "\"update\"", ",", "\"_index\"", ":", "index", ",", "\"_id\"", ":", "doc", ".", "id", ",", "\"doc\"", ":", "{", "self", ".", "embedding_field", ":", "emb", ".", "tolist", "(", ")", "}", ",", "}", "doc_updates", ".", "append", "(", "update", ")", "bulk", "(", "self", ".", "client", ",", "doc_updates", ",", "request_timeout", "=", "300", ",", "refresh", "=", "self", ".", "refresh_type", ")" ]
[ 816, 4 ]
[ 875, 90 ]
python
en
['en', 'error', 'th']
False
ElasticsearchDocumentStore.delete_all_documents
(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None)
Delete documents in an index. All documents are deleted if no filters are passed. :param index: Index name to delete the document from. :param filters: Optional filters to narrow down the documents to be deleted. :return: None
Delete documents in an index. All documents are deleted if no filters are passed.
def delete_all_documents(self, index: Optional[str] = None, filters: Optional[Dict[str, List[str]]] = None): """ Delete documents in an index. All documents are deleted if no filters are passed. :param index: Index name to delete the document from. :param filters: Optional filters to narrow down the documents to be deleted. :return: None """ index = index or self.index query: Dict[str, Any] = {"query": {}} if filters: filter_clause = [] for key, values in filters.items(): filter_clause.append( { "terms": {key: values} } ) query["query"]["bool"] = {"filter": filter_clause} else: query["query"] = {"match_all": {}} self.client.delete_by_query(index=index, body=query, ignore=[404]) # We want to be sure that all docs are deleted before continuing (delete_by_query doesn't support wait_for) if self.refresh_type == "wait_for": time.sleep(2)
[ "def", "delete_all_documents", "(", "self", ",", "index", ":", "Optional", "[", "str", "]", "=", "None", ",", "filters", ":", "Optional", "[", "Dict", "[", "str", ",", "List", "[", "str", "]", "]", "]", "=", "None", ")", ":", "index", "=", "index", "or", "self", ".", "index", "query", ":", "Dict", "[", "str", ",", "Any", "]", "=", "{", "\"query\"", ":", "{", "}", "}", "if", "filters", ":", "filter_clause", "=", "[", "]", "for", "key", ",", "values", "in", "filters", ".", "items", "(", ")", ":", "filter_clause", ".", "append", "(", "{", "\"terms\"", ":", "{", "key", ":", "values", "}", "}", ")", "query", "[", "\"query\"", "]", "[", "\"bool\"", "]", "=", "{", "\"filter\"", ":", "filter_clause", "}", "else", ":", "query", "[", "\"query\"", "]", "=", "{", "\"match_all\"", ":", "{", "}", "}", "self", ".", "client", ".", "delete_by_query", "(", "index", "=", "index", ",", "body", "=", "query", ",", "ignore", "=", "[", "404", "]", ")", "# We want to be sure that all docs are deleted before continuing (delete_by_query doesn't support wait_for)", "if", "self", ".", "refresh_type", "==", "\"wait_for\"", ":", "time", ".", "sleep", "(", "2", ")" ]
[ 877, 4 ]
[ 901, 25 ]
python
en
['en', 'error', 'th']
False
OpenDistroElasticsearchDocumentStore._create_document_index
(self, index_name: str)
Create a new index for storing documents.
Create a new index for storing documents.
def _create_document_index(self, index_name: str): """ Create a new index for storing documents. """ if self.custom_mapping: mapping = self.custom_mapping else: mapping = { "mappings": { "properties": { self.name_field: {"type": "keyword"}, self.text_field: {"type": "text"}, }, "dynamic_templates": [ { "strings": { "path_match": "*", "match_mapping_type": "string", "mapping": {"type": "keyword"}}} ], }, "settings": { "analysis": { "analyzer": { "default": { "type": self.analyzer, } } } } } if self.embedding_field: if self.similarity == "cosine": similarity_space_type = "cosinesimil" elif self.similarity == "dot_product": similarity_space_type = "l2" else: raise Exception( f"Similarity function {self.similarity} is not supported by OpenDistroElasticsearchDocumentStore." ) mapping["settings"]["knn"] = True mapping["settings"]["knn.space_type"] = similarity_space_type mapping["mappings"]["properties"][self.embedding_field] = { "type": "knn_vector", "dimension": self.embedding_dim, } try: self.client.indices.create(index=index_name, body=mapping) except RequestError as e: # With multiple workers we need to avoid race conditions, where: # - there's no index in the beginning # - both want to create one # - one fails as the other one already created it if not self.client.indices.exists(index=index_name): raise e
[ "def", "_create_document_index", "(", "self", ",", "index_name", ":", "str", ")", ":", "if", "self", ".", "custom_mapping", ":", "mapping", "=", "self", ".", "custom_mapping", "else", ":", "mapping", "=", "{", "\"mappings\"", ":", "{", "\"properties\"", ":", "{", "self", ".", "name_field", ":", "{", "\"type\"", ":", "\"keyword\"", "}", ",", "self", ".", "text_field", ":", "{", "\"type\"", ":", "\"text\"", "}", ",", "}", ",", "\"dynamic_templates\"", ":", "[", "{", "\"strings\"", ":", "{", "\"path_match\"", ":", "\"*\"", ",", "\"match_mapping_type\"", ":", "\"string\"", ",", "\"mapping\"", ":", "{", "\"type\"", ":", "\"keyword\"", "}", "}", "}", "]", ",", "}", ",", "\"settings\"", ":", "{", "\"analysis\"", ":", "{", "\"analyzer\"", ":", "{", "\"default\"", ":", "{", "\"type\"", ":", "self", ".", "analyzer", ",", "}", "}", "}", "}", "}", "if", "self", ".", "embedding_field", ":", "if", "self", ".", "similarity", "==", "\"cosine\"", ":", "similarity_space_type", "=", "\"cosinesimil\"", "elif", "self", ".", "similarity", "==", "\"dot_product\"", ":", "similarity_space_type", "=", "\"l2\"", "else", ":", "raise", "Exception", "(", "f\"Similarity function {self.similarity} is not supported by OpenDistroElasticsearchDocumentStore.\"", ")", "mapping", "[", "\"settings\"", "]", "[", "\"knn\"", "]", "=", "True", "mapping", "[", "\"settings\"", "]", "[", "\"knn.space_type\"", "]", "=", "similarity_space_type", "mapping", "[", "\"mappings\"", "]", "[", "\"properties\"", "]", "[", "self", ".", "embedding_field", "]", "=", "{", "\"type\"", ":", "\"knn_vector\"", ",", "\"dimension\"", ":", "self", ".", "embedding_dim", ",", "}", "try", ":", "self", ".", "client", ".", "indices", ".", "create", "(", "index", "=", "index_name", ",", "body", "=", "mapping", ")", "except", "RequestError", "as", "e", ":", "# With multiple workers we need to avoid race conditions, where:", "# - there's no index in the beginning", "# - both want to create one", "# - one fails as the other one already created it", "if", "not", "self", ".", "client", ".", "indices", ".", "exists", "(", "index", "=", "index_name", ")", ":", "raise", "e" ]
[ 912, 4 ]
[ 968, 23 ]
python
en
['en', 'error', 'th']
False
OpenDistroElasticsearchDocumentStore._get_vector_similarity_query
(self, query_emb: np.ndarray, top_k: int)
Generate Elasticsearch query for vector similarity.
Generate Elasticsearch query for vector similarity.
def _get_vector_similarity_query(self, query_emb: np.ndarray, top_k: int): """ Generate Elasticsearch query for vector similarity. """ query = {"knn": {self.embedding_field: {"vector": query_emb.tolist(), "k": top_k}}} return query
[ "def", "_get_vector_similarity_query", "(", "self", ",", "query_emb", ":", "np", ".", "ndarray", ",", "top_k", ":", "int", ")", ":", "query", "=", "{", "\"knn\"", ":", "{", "self", ".", "embedding_field", ":", "{", "\"vector\"", ":", "query_emb", ".", "tolist", "(", ")", ",", "\"k\"", ":", "top_k", "}", "}", "}", "return", "query" ]
[ 970, 4 ]
[ 975, 20 ]
python
en
['en', 'error', 'th']
False
WindowGenerator.example
(self)
Get and cache an example batch of `inputs, labels` for plotting.
Get and cache an example batch of `inputs, labels` for plotting.
def example(self): """Get and cache an example batch of `inputs, labels` for plotting.""" result = getattr(self, '_example', None) if result is None: # No example batch was found, so get one from the `.train` dataset result = next(iter(self.train)) # And cache it for next time self._example = result return result
[ "def", "example", "(", "self", ")", ":", "result", "=", "getattr", "(", "self", ",", "'_example'", ",", "None", ")", "if", "result", "is", "None", ":", "# No example batch was found, so get one from the `.train` dataset", "result", "=", "next", "(", "iter", "(", "self", ".", "train", ")", ")", "# And cache it for next time", "self", ".", "_example", "=", "result", "return", "result" ]
[ 162, 4 ]
[ 170, 21 ]
python
en
['en', 'en', 'en']
True
BaseXLearner.__init__
(self, learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0)
Initialize a X-learner. Args: learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment groups control_outcome_learner (optional): a model to estimate outcomes in the control group treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group control_effect_learner (optional): a model to estimate treatment effects in the control group treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group
Initialize a X-learner.
def __init__(self, learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0): """Initialize a X-learner. Args: learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment groups control_outcome_learner (optional): a model to estimate outcomes in the control group treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group control_effect_learner (optional): a model to estimate treatment effects in the control group treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ assert (learner is not None) or ((control_outcome_learner is not None) and (treatment_outcome_learner is not None) and (control_effect_learner is not None) and (treatment_effect_learner is not None)) if control_outcome_learner is None: self.model_mu_c = deepcopy(learner) else: self.model_mu_c = control_outcome_learner if treatment_outcome_learner is None: self.model_mu_t = deepcopy(learner) else: self.model_mu_t = treatment_outcome_learner if control_effect_learner is None: self.model_tau_c = deepcopy(learner) else: self.model_tau_c = control_effect_learner if treatment_effect_learner is None: self.model_tau_t = deepcopy(learner) else: self.model_tau_t = treatment_effect_learner self.ate_alpha = ate_alpha self.control_name = control_name self.propensity = None self.propensity_model = None
[ "def", "__init__", "(", "self", ",", "learner", "=", "None", ",", "control_outcome_learner", "=", "None", ",", "treatment_outcome_learner", "=", "None", ",", "control_effect_learner", "=", "None", ",", "treatment_effect_learner", "=", "None", ",", "ate_alpha", "=", ".05", ",", "control_name", "=", "0", ")", ":", "assert", "(", "learner", "is", "not", "None", ")", "or", "(", "(", "control_outcome_learner", "is", "not", "None", ")", "and", "(", "treatment_outcome_learner", "is", "not", "None", ")", "and", "(", "control_effect_learner", "is", "not", "None", ")", "and", "(", "treatment_effect_learner", "is", "not", "None", ")", ")", "if", "control_outcome_learner", "is", "None", ":", "self", ".", "model_mu_c", "=", "deepcopy", "(", "learner", ")", "else", ":", "self", ".", "model_mu_c", "=", "control_outcome_learner", "if", "treatment_outcome_learner", "is", "None", ":", "self", ".", "model_mu_t", "=", "deepcopy", "(", "learner", ")", "else", ":", "self", ".", "model_mu_t", "=", "treatment_outcome_learner", "if", "control_effect_learner", "is", "None", ":", "self", ".", "model_tau_c", "=", "deepcopy", "(", "learner", ")", "else", ":", "self", ".", "model_tau_c", "=", "control_effect_learner", "if", "treatment_effect_learner", "is", "None", ":", "self", ".", "model_tau_t", "=", "deepcopy", "(", "learner", ")", "else", ":", "self", ".", "model_tau_t", "=", "treatment_effect_learner", "self", ".", "ate_alpha", "=", "ate_alpha", "self", ".", "control_name", "=", "control_name", "self", ".", "propensity", "=", "None", "self", ".", "propensity_model", "=", "None" ]
[ 24, 4 ]
[ 73, 36 ]
python
en
['en', 'en', 'it']
True
BaseXLearner.fit
(self, X, treatment, y, p=None)
Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
Fit the inference model.
def fit(self, X, treatment, y, p=None): """Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() if p is None: self._set_propensity_models(X=X, treatment=treatment, y=y) p = self.propensity else: p = self._format_p(p, self.t_groups) self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_mu_c = {group: deepcopy(self.model_mu_c) for group in self.t_groups} self.models_mu_t = {group: deepcopy(self.model_mu_t) for group in self.t_groups} self.models_tau_c = {group: deepcopy(self.model_tau_c) for group in self.t_groups} self.models_tau_t = {group: deepcopy(self.model_tau_t) for group in self.t_groups} self.vars_c = {} self.vars_t = {} for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) # Train outcome models self.models_mu_c[group].fit(X_filt[w == 0], y_filt[w == 0]) self.models_mu_t[group].fit(X_filt[w == 1], y_filt[w == 1]) # Calculate variances and treatment effects var_c = (y_filt[w == 0] - self.models_mu_c[group].predict(X_filt[w == 0])).var() self.vars_c[group] = var_c var_t = (y_filt[w == 1] - self.models_mu_t[group].predict(X_filt[w == 1])).var() self.vars_t[group] = var_t # Train treatment models d_c = self.models_mu_t[group].predict(X_filt[w == 0]) - y_filt[w == 0] d_t = y_filt[w == 1] - self.models_mu_c[group].predict(X_filt[w == 1]) self.models_tau_c[group].fit(X_filt[w == 0], d_c) self.models_tau_t[group].fit(X_filt[w == 1], d_t)
[ "def", "fit", "(", "self", ",", "X", ",", "treatment", ",", "y", ",", "p", "=", "None", ")", ":", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "check_treatment_vector", "(", "treatment", ",", "self", ".", "control_name", ")", "self", ".", "t_groups", "=", "np", ".", "unique", "(", "treatment", "[", "treatment", "!=", "self", ".", "control_name", "]", ")", "self", ".", "t_groups", ".", "sort", "(", ")", "if", "p", "is", "None", ":", "self", ".", "_set_propensity_models", "(", "X", "=", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", "p", "=", "self", ".", "propensity", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "self", ".", "_classes", "=", "{", "group", ":", "i", "for", "i", ",", "group", "in", "enumerate", "(", "self", ".", "t_groups", ")", "}", "self", ".", "models_mu_c", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_mu_c", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_mu_t", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_mu_t", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_tau_c", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_tau_c", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_tau_t", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_tau_t", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "vars_c", "=", "{", "}", "self", ".", "vars_t", "=", "{", "}", "for", "group", "in", "self", ".", "t_groups", ":", "mask", "=", "(", "treatment", "==", "group", ")", "|", "(", "treatment", "==", "self", ".", "control_name", ")", "treatment_filt", "=", "treatment", "[", "mask", "]", "X_filt", "=", "X", "[", "mask", "]", "y_filt", "=", "y", "[", "mask", "]", "w", "=", "(", "treatment_filt", "==", "group", ")", ".", "astype", "(", "int", ")", "# Train outcome models", "self", ".", "models_mu_c", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "0", "]", ",", "y_filt", "[", "w", "==", "0", "]", ")", "self", ".", "models_mu_t", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "1", "]", ",", "y_filt", "[", "w", "==", "1", "]", ")", "# Calculate variances and treatment effects", "var_c", "=", "(", "y_filt", "[", "w", "==", "0", "]", "-", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "0", "]", ")", ")", ".", "var", "(", ")", "self", ".", "vars_c", "[", "group", "]", "=", "var_c", "var_t", "=", "(", "y_filt", "[", "w", "==", "1", "]", "-", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "1", "]", ")", ")", ".", "var", "(", ")", "self", ".", "vars_t", "[", "group", "]", "=", "var_t", "# Train treatment models", "d_c", "=", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "0", "]", ")", "-", "y_filt", "[", "w", "==", "0", "]", "d_t", "=", "y_filt", "[", "w", "==", "1", "]", "-", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "1", "]", ")", "self", ".", "models_tau_c", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "0", "]", ",", "d_c", ")", "self", ".", "models_tau_t", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "1", "]", ",", "d_t", ")" ]
[ 85, 4 ]
[ 136, 61 ]
python
en
['en', 'en', 'en']
True
BaseXLearner.predict
(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True)
Predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series, optional): a treatment vector y (np.array or pd.Series, optional): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_components (bool, optional): whether to return outcome for treatment and control seperately verbose (bool, optional): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects.
Predict treatment effects.
def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True): """Predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series, optional): a treatment vector y (np.array or pd.Series, optional): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_components (bool, optional): whether to return outcome for treatment and control seperately verbose (bool, optional): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects. """ X, treatment, y = convert_pd_to_np(X, treatment, y) if p is None: logger.info('Generating propensity score') p = dict() for group in self.t_groups: p_model = self.propensity_model[group] p[group] = p_model.predict(X) else: p = self._format_p(p, self.t_groups) te = np.zeros((X.shape[0], self.t_groups.shape[0])) dhat_cs = {} dhat_ts = {} for i, group in enumerate(self.t_groups): model_tau_c = self.models_tau_c[group] model_tau_t = self.models_tau_t[group] dhat_cs[group] = model_tau_c.predict(X) dhat_ts[group] = model_tau_t.predict(X) _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_ts[group]).reshape(-1, 1) te[:, i] = np.ravel(_te) if (y is not None) and (treatment is not None) and verbose: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) yhat = np.zeros_like(y_filt, dtype=float) yhat[w == 0] = self.models_mu_c[group].predict(X_filt[w == 0]) yhat[w == 1] = self.models_mu_t[group].predict(X_filt[w == 1]) logger.info('Error metrics for group {}'.format(group)) regression_metrics(y_filt, yhat, w) if not return_components: return te else: return te, dhat_cs, dhat_ts
[ "def", "predict", "(", "self", ",", "X", ",", "treatment", "=", "None", ",", "y", "=", "None", ",", "p", "=", "None", ",", "return_components", "=", "False", ",", "verbose", "=", "True", ")", ":", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "if", "p", "is", "None", ":", "logger", ".", "info", "(", "'Generating propensity score'", ")", "p", "=", "dict", "(", ")", "for", "group", "in", "self", ".", "t_groups", ":", "p_model", "=", "self", ".", "propensity_model", "[", "group", "]", "p", "[", "group", "]", "=", "p_model", ".", "predict", "(", "X", ")", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "te", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ")", ")", "dhat_cs", "=", "{", "}", "dhat_ts", "=", "{", "}", "for", "i", ",", "group", "in", "enumerate", "(", "self", ".", "t_groups", ")", ":", "model_tau_c", "=", "self", ".", "models_tau_c", "[", "group", "]", "model_tau_t", "=", "self", ".", "models_tau_t", "[", "group", "]", "dhat_cs", "[", "group", "]", "=", "model_tau_c", ".", "predict", "(", "X", ")", "dhat_ts", "[", "group", "]", "=", "model_tau_t", ".", "predict", "(", "X", ")", "_te", "=", "(", "p", "[", "group", "]", "*", "dhat_cs", "[", "group", "]", "+", "(", "1", "-", "p", "[", "group", "]", ")", "*", "dhat_ts", "[", "group", "]", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "te", "[", ":", ",", "i", "]", "=", "np", ".", "ravel", "(", "_te", ")", "if", "(", "y", "is", "not", "None", ")", "and", "(", "treatment", "is", "not", "None", ")", "and", "verbose", ":", "mask", "=", "(", "treatment", "==", "group", ")", "|", "(", "treatment", "==", "self", ".", "control_name", ")", "treatment_filt", "=", "treatment", "[", "mask", "]", "X_filt", "=", "X", "[", "mask", "]", "y_filt", "=", "y", "[", "mask", "]", "w", "=", "(", "treatment_filt", "==", "group", ")", ".", "astype", "(", "int", ")", "yhat", "=", "np", ".", "zeros_like", "(", "y_filt", ",", "dtype", "=", "float", ")", "yhat", "[", "w", "==", "0", "]", "=", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "0", "]", ")", "yhat", "[", "w", "==", "1", "]", "=", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict", "(", "X_filt", "[", "w", "==", "1", "]", ")", "logger", ".", "info", "(", "'Error metrics for group {}'", ".", "format", "(", "group", ")", ")", "regression_metrics", "(", "y_filt", ",", "yhat", ",", "w", ")", "if", "not", "return_components", ":", "return", "te", "else", ":", "return", "te", ",", "dhat_cs", ",", "dhat_ts" ]
[ 138, 4 ]
[ 195, 39 ]
python
en
['fr', 'en', 'en']
True
BaseXLearner.fit_predict
(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, return_components=False, verbose=True)
Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_ci (bool): whether to return confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap return_components (bool, optional): whether to return outcome for treatment and control seperately verbose (str): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment] If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment], UB [n_samples, n_treatment]
Fit the treatment effect and outcome models of the R learner and predict treatment effects.
def fit_predict(self, X, treatment, y, p=None, return_ci=False, n_bootstraps=1000, bootstrap_size=10000, return_components=False, verbose=True): """Fit the treatment effect and outcome models of the R learner and predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_ci (bool): whether to return confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap return_components (bool, optional): whether to return outcome for treatment and control seperately verbose (str): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects. Output dim: [n_samples, n_treatment] If return_ci, returns CATE [n_samples, n_treatment], LB [n_samples, n_treatment], UB [n_samples, n_treatment] """ X, treatment, y = convert_pd_to_np(X, treatment, y) self.fit(X, treatment, y, p) if p is None: p = self.propensity else: p = self._format_p(p, self.t_groups) te = self.predict(X, treatment=treatment, y=y, p=p, return_components=return_components) if not return_ci: return te else: t_groups_global = self.t_groups _classes_global = self._classes models_mu_c_global = deepcopy(self.models_mu_c) models_mu_t_global = deepcopy(self.models_mu_t) models_tau_c_global = deepcopy(self.models_tau_c) models_tau_t_global = deepcopy(self.models_tau_t) te_bootstraps = np.zeros(shape=(X.shape[0], self.t_groups.shape[0], n_bootstraps)) logger.info('Bootstrap Confidence Intervals') for i in tqdm(range(n_bootstraps)): te_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size) te_bootstraps[:, :, i] = te_b te_lower = np.percentile(te_bootstraps, (self.ate_alpha / 2) * 100, axis=2) te_upper = np.percentile(te_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=2) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.models_mu_c = deepcopy(models_mu_c_global) self.models_mu_t = deepcopy(models_mu_t_global) self.models_tau_c = deepcopy(models_tau_c_global) self.models_tau_t = deepcopy(models_tau_t_global) return (te, te_lower, te_upper)
[ "def", "fit_predict", "(", "self", ",", "X", ",", "treatment", ",", "y", ",", "p", "=", "None", ",", "return_ci", "=", "False", ",", "n_bootstraps", "=", "1000", ",", "bootstrap_size", "=", "10000", ",", "return_components", "=", "False", ",", "verbose", "=", "True", ")", ":", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "self", ".", "fit", "(", "X", ",", "treatment", ",", "y", ",", "p", ")", "if", "p", "is", "None", ":", "p", "=", "self", ".", "propensity", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "te", "=", "self", ".", "predict", "(", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ",", "p", "=", "p", ",", "return_components", "=", "return_components", ")", "if", "not", "return_ci", ":", "return", "te", "else", ":", "t_groups_global", "=", "self", ".", "t_groups", "_classes_global", "=", "self", ".", "_classes", "models_mu_c_global", "=", "deepcopy", "(", "self", ".", "models_mu_c", ")", "models_mu_t_global", "=", "deepcopy", "(", "self", ".", "models_mu_t", ")", "models_tau_c_global", "=", "deepcopy", "(", "self", ".", "models_tau_c", ")", "models_tau_t_global", "=", "deepcopy", "(", "self", ".", "models_tau_t", ")", "te_bootstraps", "=", "np", ".", "zeros", "(", "shape", "=", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ",", "n_bootstraps", ")", ")", "logger", ".", "info", "(", "'Bootstrap Confidence Intervals'", ")", "for", "i", "in", "tqdm", "(", "range", "(", "n_bootstraps", ")", ")", ":", "te_b", "=", "self", ".", "bootstrap", "(", "X", ",", "treatment", ",", "y", ",", "p", ",", "size", "=", "bootstrap_size", ")", "te_bootstraps", "[", ":", ",", ":", ",", "i", "]", "=", "te_b", "te_lower", "=", "np", ".", "percentile", "(", "te_bootstraps", ",", "(", "self", ".", "ate_alpha", "/", "2", ")", "*", "100", ",", "axis", "=", "2", ")", "te_upper", "=", "np", ".", "percentile", "(", "te_bootstraps", ",", "(", "1", "-", "self", ".", "ate_alpha", "/", "2", ")", "*", "100", ",", "axis", "=", "2", ")", "# set member variables back to global (currently last bootstrapped outcome)", "self", ".", "t_groups", "=", "t_groups_global", "self", ".", "_classes", "=", "_classes_global", "self", ".", "models_mu_c", "=", "deepcopy", "(", "models_mu_c_global", ")", "self", ".", "models_mu_t", "=", "deepcopy", "(", "models_mu_t_global", ")", "self", ".", "models_tau_c", "=", "deepcopy", "(", "models_tau_c_global", ")", "self", ".", "models_tau_t", "=", "deepcopy", "(", "models_tau_t_global", ")", "return", "(", "te", ",", "te_lower", ",", "te_upper", ")" ]
[ 197, 4 ]
[ 255, 43 ]
python
en
['en', 'en', 'en']
True
BaseXLearner.estimate_ate
(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000)
Estimate the Average Treatment Effect (ATE). Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. bootstrap_ci (bool): whether run bootstrap for confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap Returns: The mean and confidence interval (LB, UB) of the ATE estimate.
Estimate the Average Treatment Effect (ATE).
def estimate_ate(self, X, treatment, y, p=None, bootstrap_ci=False, n_bootstraps=1000, bootstrap_size=10000): """Estimate the Average Treatment Effect (ATE). Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. bootstrap_ci (bool): whether run bootstrap for confidence intervals n_bootstraps (int): number of bootstrap iterations bootstrap_size (int): number of samples per bootstrap Returns: The mean and confidence interval (LB, UB) of the ATE estimate. """ te, dhat_cs, dhat_ts = self.fit_predict(X, treatment, y, p, return_components=True) X, treatment, y = convert_pd_to_np(X, treatment, y) if p is None: p = self.propensity else: p = self._format_p(p, self.t_groups) ate = np.zeros(self.t_groups.shape[0]) ate_lb = np.zeros(self.t_groups.shape[0]) ate_ub = np.zeros(self.t_groups.shape[0]) for i, group in enumerate(self.t_groups): _ate = te[:, i].mean() mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] w = (treatment_filt == group).astype(int) prob_treatment = float(sum(w)) / w.shape[0] dhat_c = dhat_cs[group][mask] dhat_t = dhat_ts[group][mask] p_filt = p[group][mask] # SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009. # "Recent Developments in the Econometrics of Program Evaluation." Journal of Economic Literature se = np.sqrt(( self.vars_t[group] / prob_treatment + self.vars_c[group] / (1 - prob_treatment) + (p_filt * dhat_c + (1 - p_filt) * dhat_t).var() ) / w.shape[0]) _ate_lb = _ate - se * norm.ppf(1 - self.ate_alpha / 2) _ate_ub = _ate + se * norm.ppf(1 - self.ate_alpha / 2) ate[i] = _ate ate_lb[i] = _ate_lb ate_ub[i] = _ate_ub if not bootstrap_ci: return ate, ate_lb, ate_ub else: t_groups_global = self.t_groups _classes_global = self._classes models_mu_c_global = deepcopy(self.models_mu_c) models_mu_t_global = deepcopy(self.models_mu_t) models_tau_c_global = deepcopy(self.models_tau_c) models_tau_t_global = deepcopy(self.models_tau_t) logger.info('Bootstrap Confidence Intervals for ATE') ate_bootstraps = np.zeros(shape=(self.t_groups.shape[0], n_bootstraps)) for n in tqdm(range(n_bootstraps)): cate_b = self.bootstrap(X, treatment, y, p, size=bootstrap_size) ate_bootstraps[:, n] = cate_b.mean() ate_lower = np.percentile(ate_bootstraps, (self.ate_alpha / 2) * 100, axis=1) ate_upper = np.percentile(ate_bootstraps, (1 - self.ate_alpha / 2) * 100, axis=1) # set member variables back to global (currently last bootstrapped outcome) self.t_groups = t_groups_global self._classes = _classes_global self.models_mu_c = deepcopy(models_mu_c_global) self.models_mu_t = deepcopy(models_mu_t_global) self.models_tau_c = deepcopy(models_tau_c_global) self.models_tau_t = deepcopy(models_tau_t_global) return ate, ate_lower, ate_upper
[ "def", "estimate_ate", "(", "self", ",", "X", ",", "treatment", ",", "y", ",", "p", "=", "None", ",", "bootstrap_ci", "=", "False", ",", "n_bootstraps", "=", "1000", ",", "bootstrap_size", "=", "10000", ")", ":", "te", ",", "dhat_cs", ",", "dhat_ts", "=", "self", ".", "fit_predict", "(", "X", ",", "treatment", ",", "y", ",", "p", ",", "return_components", "=", "True", ")", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "if", "p", "is", "None", ":", "p", "=", "self", ".", "propensity", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "ate", "=", "np", ".", "zeros", "(", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ")", "ate_lb", "=", "np", ".", "zeros", "(", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ")", "ate_ub", "=", "np", ".", "zeros", "(", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ")", "for", "i", ",", "group", "in", "enumerate", "(", "self", ".", "t_groups", ")", ":", "_ate", "=", "te", "[", ":", ",", "i", "]", ".", "mean", "(", ")", "mask", "=", "(", "treatment", "==", "group", ")", "|", "(", "treatment", "==", "self", ".", "control_name", ")", "treatment_filt", "=", "treatment", "[", "mask", "]", "w", "=", "(", "treatment_filt", "==", "group", ")", ".", "astype", "(", "int", ")", "prob_treatment", "=", "float", "(", "sum", "(", "w", ")", ")", "/", "w", ".", "shape", "[", "0", "]", "dhat_c", "=", "dhat_cs", "[", "group", "]", "[", "mask", "]", "dhat_t", "=", "dhat_ts", "[", "group", "]", "[", "mask", "]", "p_filt", "=", "p", "[", "group", "]", "[", "mask", "]", "# SE formula is based on the lower bound formula (7) from Imbens, Guido W., and Jeffrey M. Wooldridge. 2009.", "# \"Recent Developments in the Econometrics of Program Evaluation.\" Journal of Economic Literature", "se", "=", "np", ".", "sqrt", "(", "(", "self", ".", "vars_t", "[", "group", "]", "/", "prob_treatment", "+", "self", ".", "vars_c", "[", "group", "]", "/", "(", "1", "-", "prob_treatment", ")", "+", "(", "p_filt", "*", "dhat_c", "+", "(", "1", "-", "p_filt", ")", "*", "dhat_t", ")", ".", "var", "(", ")", ")", "/", "w", ".", "shape", "[", "0", "]", ")", "_ate_lb", "=", "_ate", "-", "se", "*", "norm", ".", "ppf", "(", "1", "-", "self", ".", "ate_alpha", "/", "2", ")", "_ate_ub", "=", "_ate", "+", "se", "*", "norm", ".", "ppf", "(", "1", "-", "self", ".", "ate_alpha", "/", "2", ")", "ate", "[", "i", "]", "=", "_ate", "ate_lb", "[", "i", "]", "=", "_ate_lb", "ate_ub", "[", "i", "]", "=", "_ate_ub", "if", "not", "bootstrap_ci", ":", "return", "ate", ",", "ate_lb", ",", "ate_ub", "else", ":", "t_groups_global", "=", "self", ".", "t_groups", "_classes_global", "=", "self", ".", "_classes", "models_mu_c_global", "=", "deepcopy", "(", "self", ".", "models_mu_c", ")", "models_mu_t_global", "=", "deepcopy", "(", "self", ".", "models_mu_t", ")", "models_tau_c_global", "=", "deepcopy", "(", "self", ".", "models_tau_c", ")", "models_tau_t_global", "=", "deepcopy", "(", "self", ".", "models_tau_t", ")", "logger", ".", "info", "(", "'Bootstrap Confidence Intervals for ATE'", ")", "ate_bootstraps", "=", "np", ".", "zeros", "(", "shape", "=", "(", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ",", "n_bootstraps", ")", ")", "for", "n", "in", "tqdm", "(", "range", "(", "n_bootstraps", ")", ")", ":", "cate_b", "=", "self", ".", "bootstrap", "(", "X", ",", "treatment", ",", "y", ",", "p", ",", "size", "=", "bootstrap_size", ")", "ate_bootstraps", "[", ":", ",", "n", "]", "=", "cate_b", ".", "mean", "(", ")", "ate_lower", "=", "np", ".", "percentile", "(", "ate_bootstraps", ",", "(", "self", ".", "ate_alpha", "/", "2", ")", "*", "100", ",", "axis", "=", "1", ")", "ate_upper", "=", "np", ".", "percentile", "(", "ate_bootstraps", ",", "(", "1", "-", "self", ".", "ate_alpha", "/", "2", ")", "*", "100", ",", "axis", "=", "1", ")", "# set member variables back to global (currently last bootstrapped outcome)", "self", ".", "t_groups", "=", "t_groups_global", "self", ".", "_classes", "=", "_classes_global", "self", ".", "models_mu_c", "=", "deepcopy", "(", "models_mu_c_global", ")", "self", ".", "models_mu_t", "=", "deepcopy", "(", "models_mu_t_global", ")", "self", ".", "models_tau_c", "=", "deepcopy", "(", "models_tau_c_global", ")", "self", ".", "models_tau_t", "=", "deepcopy", "(", "models_tau_t_global", ")", "return", "ate", ",", "ate_lower", ",", "ate_upper" ]
[ 257, 4 ]
[ 338, 44 ]
python
en
['en', 'it', 'en']
True
BaseXRegressor.__init__
(self, learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0)
Initialize an X-learner regressor. Args: learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment groups control_outcome_learner (optional): a model to estimate outcomes in the control group treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group control_effect_learner (optional): a model to estimate treatment effects in the control group treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group
Initialize an X-learner regressor.
def __init__(self, learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0): """Initialize an X-learner regressor. Args: learner (optional): a model to estimate outcomes and treatment effects in both the control and treatment groups control_outcome_learner (optional): a model to estimate outcomes in the control group treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group control_effect_learner (optional): a model to estimate treatment effects in the control group treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ super().__init__( learner=learner, control_outcome_learner=control_outcome_learner, treatment_outcome_learner=treatment_outcome_learner, control_effect_learner=control_effect_learner, treatment_effect_learner=treatment_effect_learner, ate_alpha=ate_alpha, control_name=control_name)
[ "def", "__init__", "(", "self", ",", "learner", "=", "None", ",", "control_outcome_learner", "=", "None", ",", "treatment_outcome_learner", "=", "None", ",", "control_effect_learner", "=", "None", ",", "treatment_effect_learner", "=", "None", ",", "ate_alpha", "=", ".05", ",", "control_name", "=", "0", ")", ":", "super", "(", ")", ".", "__init__", "(", "learner", "=", "learner", ",", "control_outcome_learner", "=", "control_outcome_learner", ",", "treatment_outcome_learner", "=", "treatment_outcome_learner", ",", "control_effect_learner", "=", "control_effect_learner", ",", "treatment_effect_learner", "=", "treatment_effect_learner", ",", "ate_alpha", "=", "ate_alpha", ",", "control_name", "=", "control_name", ")" ]
[ 346, 4 ]
[ 373, 38 ]
python
en
['en', 'fy', 'nl']
False
BaseXClassifier.__init__
(self, outcome_learner=None, effect_learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0)
Initialize an X-learner classifier. Args: outcome_learner (optional): a model to estimate outcomes in both the control and treatment groups. Should be a classifier. effect_learner (optional): a model to estimate treatment effects in both the control and treatment groups. Should be a regressor. control_outcome_learner (optional): a model to estimate outcomes in the control group. Should be a classifier. treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group. Should be a classifier. control_effect_learner (optional): a model to estimate treatment effects in the control group. Should be a regressor. treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group Should be a regressor. ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group
Initialize an X-learner classifier.
def __init__(self, outcome_learner=None, effect_learner=None, control_outcome_learner=None, treatment_outcome_learner=None, control_effect_learner=None, treatment_effect_learner=None, ate_alpha=.05, control_name=0): """Initialize an X-learner classifier. Args: outcome_learner (optional): a model to estimate outcomes in both the control and treatment groups. Should be a classifier. effect_learner (optional): a model to estimate treatment effects in both the control and treatment groups. Should be a regressor. control_outcome_learner (optional): a model to estimate outcomes in the control group. Should be a classifier. treatment_outcome_learner (optional): a model to estimate outcomes in the treatment group. Should be a classifier. control_effect_learner (optional): a model to estimate treatment effects in the control group. Should be a regressor. treatment_effect_learner (optional): a model to estimate treatment effects in the treatment group Should be a regressor. ate_alpha (float, optional): the confidence level alpha of the ATE estimate control_name (str or int, optional): name of control group """ if outcome_learner is not None: control_outcome_learner = outcome_learner treatment_outcome_learner = outcome_learner if effect_learner is not None: control_effect_learner = effect_learner treatment_effect_learner = effect_learner super().__init__( learner=None, control_outcome_learner=control_outcome_learner, treatment_outcome_learner=treatment_outcome_learner, control_effect_learner=control_effect_learner, treatment_effect_learner=treatment_effect_learner, ate_alpha=ate_alpha, control_name=control_name) if ((control_outcome_learner is None) or (treatment_outcome_learner is None)) and ( (control_effect_learner is None) or (treatment_effect_learner is None)): raise ValueError("Either the outcome learner or the effect learner pair must be specified.")
[ "def", "__init__", "(", "self", ",", "outcome_learner", "=", "None", ",", "effect_learner", "=", "None", ",", "control_outcome_learner", "=", "None", ",", "treatment_outcome_learner", "=", "None", ",", "control_effect_learner", "=", "None", ",", "treatment_effect_learner", "=", "None", ",", "ate_alpha", "=", ".05", ",", "control_name", "=", "0", ")", ":", "if", "outcome_learner", "is", "not", "None", ":", "control_outcome_learner", "=", "outcome_learner", "treatment_outcome_learner", "=", "outcome_learner", "if", "effect_learner", "is", "not", "None", ":", "control_effect_learner", "=", "effect_learner", "treatment_effect_learner", "=", "effect_learner", "super", "(", ")", ".", "__init__", "(", "learner", "=", "None", ",", "control_outcome_learner", "=", "control_outcome_learner", ",", "treatment_outcome_learner", "=", "treatment_outcome_learner", ",", "control_effect_learner", "=", "control_effect_learner", ",", "treatment_effect_learner", "=", "treatment_effect_learner", ",", "ate_alpha", "=", "ate_alpha", ",", "control_name", "=", "control_name", ")", "if", "(", "(", "control_outcome_learner", "is", "None", ")", "or", "(", "treatment_outcome_learner", "is", "None", ")", ")", "and", "(", "(", "control_effect_learner", "is", "None", ")", "or", "(", "treatment_effect_learner", "is", "None", ")", ")", ":", "raise", "ValueError", "(", "\"Either the outcome learner or the effect learner pair must be specified.\"", ")" ]
[ 381, 4 ]
[ 426, 104 ]
python
en
['en', 'fy', 'nl']
False
BaseXClassifier.fit
(self, X, treatment, y, p=None)
Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores.
Fit the inference model.
def fit(self, X, treatment, y, p=None): """Fit the inference model. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series): a treatment vector y (np.array or pd.Series): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. """ X, treatment, y = convert_pd_to_np(X, treatment, y) check_treatment_vector(treatment, self.control_name) self.t_groups = np.unique(treatment[treatment != self.control_name]) self.t_groups.sort() if p is None: self._set_propensity_models(X=X, treatment=treatment, y=y) p = self.propensity else: p = self._format_p(p, self.t_groups) self._classes = {group: i for i, group in enumerate(self.t_groups)} self.models_mu_c = {group: deepcopy(self.model_mu_c) for group in self.t_groups} self.models_mu_t = {group: deepcopy(self.model_mu_t) for group in self.t_groups} self.models_tau_c = {group: deepcopy(self.model_tau_c) for group in self.t_groups} self.models_tau_t = {group: deepcopy(self.model_tau_t) for group in self.t_groups} self.vars_c = {} self.vars_t = {} for group in self.t_groups: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) # Train outcome models self.models_mu_c[group].fit(X_filt[w == 0], y_filt[w == 0]) self.models_mu_t[group].fit(X_filt[w == 1], y_filt[w == 1]) # Calculate variances and treatment effects var_c = (y_filt[w == 0] - self.models_mu_c[group].predict_proba(X_filt[w == 0])[:, 1]).var() self.vars_c[group] = var_c var_t = (y_filt[w == 1] - self.models_mu_t[group].predict_proba(X_filt[w == 1])[:, 1]).var() self.vars_t[group] = var_t # Train treatment models d_c = self.models_mu_t[group].predict_proba(X_filt[w == 0])[:, 1] - y_filt[w == 0] d_t = y_filt[w == 1] - self.models_mu_c[group].predict_proba(X_filt[w == 1])[:, 1] self.models_tau_c[group].fit(X_filt[w == 0], d_c) self.models_tau_t[group].fit(X_filt[w == 1], d_t)
[ "def", "fit", "(", "self", ",", "X", ",", "treatment", ",", "y", ",", "p", "=", "None", ")", ":", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "check_treatment_vector", "(", "treatment", ",", "self", ".", "control_name", ")", "self", ".", "t_groups", "=", "np", ".", "unique", "(", "treatment", "[", "treatment", "!=", "self", ".", "control_name", "]", ")", "self", ".", "t_groups", ".", "sort", "(", ")", "if", "p", "is", "None", ":", "self", ".", "_set_propensity_models", "(", "X", "=", "X", ",", "treatment", "=", "treatment", ",", "y", "=", "y", ")", "p", "=", "self", ".", "propensity", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "self", ".", "_classes", "=", "{", "group", ":", "i", "for", "i", ",", "group", "in", "enumerate", "(", "self", ".", "t_groups", ")", "}", "self", ".", "models_mu_c", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_mu_c", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_mu_t", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_mu_t", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_tau_c", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_tau_c", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "models_tau_t", "=", "{", "group", ":", "deepcopy", "(", "self", ".", "model_tau_t", ")", "for", "group", "in", "self", ".", "t_groups", "}", "self", ".", "vars_c", "=", "{", "}", "self", ".", "vars_t", "=", "{", "}", "for", "group", "in", "self", ".", "t_groups", ":", "mask", "=", "(", "treatment", "==", "group", ")", "|", "(", "treatment", "==", "self", ".", "control_name", ")", "treatment_filt", "=", "treatment", "[", "mask", "]", "X_filt", "=", "X", "[", "mask", "]", "y_filt", "=", "y", "[", "mask", "]", "w", "=", "(", "treatment_filt", "==", "group", ")", ".", "astype", "(", "int", ")", "# Train outcome models", "self", ".", "models_mu_c", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "0", "]", ",", "y_filt", "[", "w", "==", "0", "]", ")", "self", ".", "models_mu_t", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "1", "]", ",", "y_filt", "[", "w", "==", "1", "]", ")", "# Calculate variances and treatment effects", "var_c", "=", "(", "y_filt", "[", "w", "==", "0", "]", "-", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "0", "]", ")", "[", ":", ",", "1", "]", ")", ".", "var", "(", ")", "self", ".", "vars_c", "[", "group", "]", "=", "var_c", "var_t", "=", "(", "y_filt", "[", "w", "==", "1", "]", "-", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "1", "]", ")", "[", ":", ",", "1", "]", ")", ".", "var", "(", ")", "self", ".", "vars_t", "[", "group", "]", "=", "var_t", "# Train treatment models", "d_c", "=", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "0", "]", ")", "[", ":", ",", "1", "]", "-", "y_filt", "[", "w", "==", "0", "]", "d_t", "=", "y_filt", "[", "w", "==", "1", "]", "-", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "1", "]", ")", "[", ":", ",", "1", "]", "self", ".", "models_tau_c", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "0", "]", ",", "d_c", ")", "self", ".", "models_tau_t", "[", "group", "]", ".", "fit", "(", "X_filt", "[", "w", "==", "1", "]", ",", "d_t", ")" ]
[ 428, 4 ]
[ 479, 61 ]
python
en
['en', 'en', 'en']
True
BaseXClassifier.predict
(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True)
Predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series, optional): a treatment vector y (np.array or pd.Series, optional): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_components (bool, optional): whether to return outcome for treatment and control seperately return_p_score (bool, optional): whether to return propensity score verbose (bool, optional): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects.
Predict treatment effects.
def predict(self, X, treatment=None, y=None, p=None, return_components=False, verbose=True): """Predict treatment effects. Args: X (np.matrix or np.array or pd.Dataframe): a feature matrix treatment (np.array or pd.Series, optional): a treatment vector y (np.array or pd.Series, optional): an outcome vector p (np.ndarray or pd.Series or dict, optional): an array of propensity scores of float (0,1) in the single-treatment case; or, a dictionary of treatment groups that map to propensity vectors of float (0,1); if None will run ElasticNetPropensityModel() to generate the propensity scores. return_components (bool, optional): whether to return outcome for treatment and control seperately return_p_score (bool, optional): whether to return propensity score verbose (bool, optional): whether to output progress logs Returns: (numpy.ndarray): Predictions of treatment effects. """ X, treatment, y = convert_pd_to_np(X, treatment, y) if p is None: logger.info('Generating propensity score') p = dict() for group in self.t_groups: p_model = self.propensity_model[group] p[group] = p_model.predict(X) else: p = self._format_p(p, self.t_groups) te = np.zeros((X.shape[0], self.t_groups.shape[0])) dhat_cs = {} dhat_ts = {} for i, group in enumerate(self.t_groups): model_tau_c = self.models_tau_c[group] model_tau_t = self.models_tau_t[group] dhat_cs[group] = model_tau_c.predict(X) dhat_ts[group] = model_tau_t.predict(X) _te = (p[group] * dhat_cs[group] + (1 - p[group]) * dhat_ts[group]).reshape(-1, 1) te[:, i] = np.ravel(_te) if (y is not None) and (treatment is not None) and verbose: mask = (treatment == group) | (treatment == self.control_name) treatment_filt = treatment[mask] X_filt = X[mask] y_filt = y[mask] w = (treatment_filt == group).astype(int) yhat = np.zeros_like(y_filt, dtype=float) yhat[w == 0] = self.models_mu_c[group].predict_proba(X_filt[w == 0])[:, 1] yhat[w == 1] = self.models_mu_t[group].predict_proba(X_filt[w == 1])[:, 1] logger.info('Error metrics for group {}'.format(group)) classification_metrics(y_filt, yhat, w) if not return_components: return te else: return te, dhat_cs, dhat_ts
[ "def", "predict", "(", "self", ",", "X", ",", "treatment", "=", "None", ",", "y", "=", "None", ",", "p", "=", "None", ",", "return_components", "=", "False", ",", "verbose", "=", "True", ")", ":", "X", ",", "treatment", ",", "y", "=", "convert_pd_to_np", "(", "X", ",", "treatment", ",", "y", ")", "if", "p", "is", "None", ":", "logger", ".", "info", "(", "'Generating propensity score'", ")", "p", "=", "dict", "(", ")", "for", "group", "in", "self", ".", "t_groups", ":", "p_model", "=", "self", ".", "propensity_model", "[", "group", "]", "p", "[", "group", "]", "=", "p_model", ".", "predict", "(", "X", ")", "else", ":", "p", "=", "self", ".", "_format_p", "(", "p", ",", "self", ".", "t_groups", ")", "te", "=", "np", ".", "zeros", "(", "(", "X", ".", "shape", "[", "0", "]", ",", "self", ".", "t_groups", ".", "shape", "[", "0", "]", ")", ")", "dhat_cs", "=", "{", "}", "dhat_ts", "=", "{", "}", "for", "i", ",", "group", "in", "enumerate", "(", "self", ".", "t_groups", ")", ":", "model_tau_c", "=", "self", ".", "models_tau_c", "[", "group", "]", "model_tau_t", "=", "self", ".", "models_tau_t", "[", "group", "]", "dhat_cs", "[", "group", "]", "=", "model_tau_c", ".", "predict", "(", "X", ")", "dhat_ts", "[", "group", "]", "=", "model_tau_t", ".", "predict", "(", "X", ")", "_te", "=", "(", "p", "[", "group", "]", "*", "dhat_cs", "[", "group", "]", "+", "(", "1", "-", "p", "[", "group", "]", ")", "*", "dhat_ts", "[", "group", "]", ")", ".", "reshape", "(", "-", "1", ",", "1", ")", "te", "[", ":", ",", "i", "]", "=", "np", ".", "ravel", "(", "_te", ")", "if", "(", "y", "is", "not", "None", ")", "and", "(", "treatment", "is", "not", "None", ")", "and", "verbose", ":", "mask", "=", "(", "treatment", "==", "group", ")", "|", "(", "treatment", "==", "self", ".", "control_name", ")", "treatment_filt", "=", "treatment", "[", "mask", "]", "X_filt", "=", "X", "[", "mask", "]", "y_filt", "=", "y", "[", "mask", "]", "w", "=", "(", "treatment_filt", "==", "group", ")", ".", "astype", "(", "int", ")", "yhat", "=", "np", ".", "zeros_like", "(", "y_filt", ",", "dtype", "=", "float", ")", "yhat", "[", "w", "==", "0", "]", "=", "self", ".", "models_mu_c", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "0", "]", ")", "[", ":", ",", "1", "]", "yhat", "[", "w", "==", "1", "]", "=", "self", ".", "models_mu_t", "[", "group", "]", ".", "predict_proba", "(", "X_filt", "[", "w", "==", "1", "]", ")", "[", ":", ",", "1", "]", "logger", ".", "info", "(", "'Error metrics for group {}'", ".", "format", "(", "group", ")", ")", "classification_metrics", "(", "y_filt", ",", "yhat", ",", "w", ")", "if", "not", "return_components", ":", "return", "te", "else", ":", "return", "te", ",", "dhat_cs", ",", "dhat_ts" ]
[ 481, 4 ]
[ 539, 39 ]
python
en
['fr', 'en', 'en']
True
test_ValidationsStore_with_TupleS3StoreBackend
()
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_ValidationsStore_with_TupleS3StoreBackend(): bucket = "test_validation_store_bucket" prefix = "test/prefix" # create a bucket in Moto's mock AWS environment conn = boto3.resource("s3", region_name="us-east-1") conn.create_bucket(Bucket=bucket) # First, demonstrate that we pick up default configuration including from an S3TupleS3StoreBackend my_store = ValidationsStore( store_backend={ "class_name": "TupleS3StoreBackend", "bucket": bucket, "prefix": prefix, } ) with pytest.raises(TypeError): my_store.get("not_a_ValidationResultIdentifier") ns_1 = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name="asset.quarantine", ), run_id="20191007T151224.1234Z_prod_100", batch_identifier="batch_id", ) my_store.set(ns_1, ExpectationSuiteValidationResult(success=True)) assert my_store.get(ns_1) == ExpectationSuiteValidationResult( success=True, statistics={}, results=[] ) ns_2 = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name="asset.quarantine", ), run_id="20191007T151224.1234Z_prod_200", batch_identifier="batch_id", ) my_store.set(ns_2, ExpectationSuiteValidationResult(success=False)) assert my_store.get(ns_2) == ExpectationSuiteValidationResult( success=False, statistics={}, results=[] ) # Verify that internals are working as expected, including the default filepath assert { s3_object_info["Key"] for s3_object_info in boto3.client("s3").list_objects_v2( Bucket=bucket, Prefix=prefix )["Contents"] } == { "test/prefix/.ge_store_backend_id", "test/prefix/asset/quarantine/20191007T151224.1234Z_prod_100/20190926T134241.000000Z/batch_id.json", "test/prefix/asset/quarantine/20191007T151224.1234Z_prod_200/20190926T134241.000000Z/batch_id.json", } print(my_store.list_keys()) assert set(my_store.list_keys()) == { ns_1, ns_2, } """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert my_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(my_store.store_backend_id)
[ "def", "test_ValidationsStore_with_TupleS3StoreBackend", "(", ")", ":", "bucket", "=", "\"test_validation_store_bucket\"", "prefix", "=", "\"test/prefix\"", "# create a bucket in Moto's mock AWS environment", "conn", "=", "boto3", ".", "resource", "(", "\"s3\"", ",", "region_name", "=", "\"us-east-1\"", ")", "conn", ".", "create_bucket", "(", "Bucket", "=", "bucket", ")", "# First, demonstrate that we pick up default configuration including from an S3TupleS3StoreBackend", "my_store", "=", "ValidationsStore", "(", "store_backend", "=", "{", "\"class_name\"", ":", "\"TupleS3StoreBackend\"", ",", "\"bucket\"", ":", "bucket", ",", "\"prefix\"", ":", "prefix", ",", "}", ")", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "my_store", ".", "get", "(", "\"not_a_ValidationResultIdentifier\"", ")", "ns_1", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "\"asset.quarantine\"", ",", ")", ",", "run_id", "=", "\"20191007T151224.1234Z_prod_100\"", ",", "batch_identifier", "=", "\"batch_id\"", ",", ")", "my_store", ".", "set", "(", "ns_1", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ")", ")", "assert", "my_store", ".", "get", "(", "ns_1", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "ns_2", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "\"asset.quarantine\"", ",", ")", ",", "run_id", "=", "\"20191007T151224.1234Z_prod_200\"", ",", "batch_identifier", "=", "\"batch_id\"", ",", ")", "my_store", ".", "set", "(", "ns_2", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ")", ")", "assert", "my_store", ".", "get", "(", "ns_2", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "# Verify that internals are working as expected, including the default filepath", "assert", "{", "s3_object_info", "[", "\"Key\"", "]", "for", "s3_object_info", "in", "boto3", ".", "client", "(", "\"s3\"", ")", ".", "list_objects_v2", "(", "Bucket", "=", "bucket", ",", "Prefix", "=", "prefix", ")", "[", "\"Contents\"", "]", "}", "==", "{", "\"test/prefix/.ge_store_backend_id\"", ",", "\"test/prefix/asset/quarantine/20191007T151224.1234Z_prod_100/20190926T134241.000000Z/batch_id.json\"", ",", "\"test/prefix/asset/quarantine/20191007T151224.1234Z_prod_200/20190926T134241.000000Z/batch_id.json\"", ",", "}", "print", "(", "my_store", ".", "list_keys", "(", ")", ")", "assert", "set", "(", "my_store", ".", "list_keys", "(", ")", ")", "==", "{", "ns_1", ",", "ns_2", ",", "}", "# Check that store_backend_id exists can be read", "assert", "my_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "my_store", ".", "store_backend_id", ")" ]
[ 22, 0 ]
[ 93, 63 ]
python
en
['en', 'error', 'th']
False
test_ValidationsStore_with_InMemoryStoreBackend
()
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_ValidationsStore_with_InMemoryStoreBackend(): my_store = ValidationsStore( store_backend={ "module_name": "great_expectations.data_context.store", "class_name": "InMemoryStoreBackend", } ) with pytest.raises(TypeError): my_store.get("not_a_ValidationResultIdentifier") ns_1 = ValidationResultIdentifier.from_tuple( ( "a", "b", "c", "quarantine", datetime.datetime.now(datetime.timezone.utc), "prod-100", ) ) my_store.set(ns_1, ExpectationSuiteValidationResult(success=True)) assert my_store.get(ns_1) == ExpectationSuiteValidationResult( success=True, statistics={}, results=[] ) ns_2 = ValidationResultIdentifier.from_tuple( ( "a", "b", "c", "quarantine", datetime.datetime.now(datetime.timezone.utc), "prod-200", ) ) my_store.set(ns_2, ExpectationSuiteValidationResult(success=False)) assert my_store.get(ns_2) == ExpectationSuiteValidationResult( success=False, statistics={}, results=[] ) assert set(my_store.list_keys()) == { ns_1, ns_2, } """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert my_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(my_store.store_backend_id)
[ "def", "test_ValidationsStore_with_InMemoryStoreBackend", "(", ")", ":", "my_store", "=", "ValidationsStore", "(", "store_backend", "=", "{", "\"module_name\"", ":", "\"great_expectations.data_context.store\"", ",", "\"class_name\"", ":", "\"InMemoryStoreBackend\"", ",", "}", ")", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "my_store", ".", "get", "(", "\"not_a_ValidationResultIdentifier\"", ")", "ns_1", "=", "ValidationResultIdentifier", ".", "from_tuple", "(", "(", "\"a\"", ",", "\"b\"", ",", "\"c\"", ",", "\"quarantine\"", ",", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ",", "\"prod-100\"", ",", ")", ")", "my_store", ".", "set", "(", "ns_1", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ")", ")", "assert", "my_store", ".", "get", "(", "ns_1", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "ns_2", "=", "ValidationResultIdentifier", ".", "from_tuple", "(", "(", "\"a\"", ",", "\"b\"", ",", "\"c\"", ",", "\"quarantine\"", ",", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ",", "\"prod-200\"", ",", ")", ")", "my_store", ".", "set", "(", "ns_2", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ")", ")", "assert", "my_store", ".", "get", "(", "ns_2", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "assert", "set", "(", "my_store", ".", "list_keys", "(", ")", ")", "==", "{", "ns_1", ",", "ns_2", ",", "}", "# Check that store_backend_id exists can be read", "assert", "my_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "my_store", ".", "store_backend_id", ")" ]
[ 97, 0 ]
[ 149, 63 ]
python
en
['en', 'error', 'th']
False
test_ValidationsStore_with_TupleFileSystemStoreBackend
(tmp_path_factory)
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_ValidationsStore_with_TupleFileSystemStoreBackend(tmp_path_factory): path = str( tmp_path_factory.mktemp( "test_ValidationResultStore_with_TupleFileSystemStoreBackend__dir" ) ) project_path = str(tmp_path_factory.mktemp("my_dir")) my_store = ValidationsStore( store_backend={ "module_name": "great_expectations.data_context.store", "class_name": "TupleFilesystemStoreBackend", "base_directory": "my_store/", }, runtime_environment={"root_directory": path}, ) with pytest.raises(TypeError): my_store.get("not_a_ValidationResultIdentifier") ns_1 = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier("asset.quarantine"), run_id="prod-100", batch_identifier="batch_id", ) my_store.set(ns_1, ExpectationSuiteValidationResult(success=True)) assert my_store.get(ns_1) == ExpectationSuiteValidationResult( success=True, statistics={}, results=[] ) ns_2 = ValidationResultIdentifier.from_tuple( ( "asset", "quarantine", "prod-20", datetime.datetime.now(datetime.timezone.utc), "batch_id", ) ) my_store.set(ns_2, ExpectationSuiteValidationResult(success=False)) assert my_store.get(ns_2) == ExpectationSuiteValidationResult( success=False, statistics={}, results=[] ) print(my_store.list_keys()) assert set(my_store.list_keys()) == { ns_1, ns_2, } print(gen_directory_tree_str(path)) assert ( gen_directory_tree_str(path) == """\ test_ValidationResultStore_with_TupleFileSystemStoreBackend__dir0/ my_store/ .ge_store_backend_id asset/ quarantine/ prod-100/ 20190926T134241.000000Z/ batch_id.json prod-20/ 20190926T134241.000000Z/ batch_id.json """ ) """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert my_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(my_store.store_backend_id) # Check that another store with the same configuration shares the same store_backend_id my_store_duplicate = ValidationsStore( store_backend={ "module_name": "great_expectations.data_context.store", "class_name": "TupleFilesystemStoreBackend", "base_directory": "my_store/", }, runtime_environment={"root_directory": path}, ) assert my_store.store_backend_id == my_store_duplicate.store_backend_id
[ "def", "test_ValidationsStore_with_TupleFileSystemStoreBackend", "(", "tmp_path_factory", ")", ":", "path", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"test_ValidationResultStore_with_TupleFileSystemStoreBackend__dir\"", ")", ")", "project_path", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"my_dir\"", ")", ")", "my_store", "=", "ValidationsStore", "(", "store_backend", "=", "{", "\"module_name\"", ":", "\"great_expectations.data_context.store\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "\"base_directory\"", ":", "\"my_store/\"", ",", "}", ",", "runtime_environment", "=", "{", "\"root_directory\"", ":", "path", "}", ",", ")", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "my_store", ".", "get", "(", "\"not_a_ValidationResultIdentifier\"", ")", "ns_1", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "\"asset.quarantine\"", ")", ",", "run_id", "=", "\"prod-100\"", ",", "batch_identifier", "=", "\"batch_id\"", ",", ")", "my_store", ".", "set", "(", "ns_1", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ")", ")", "assert", "my_store", ".", "get", "(", "ns_1", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "ns_2", "=", "ValidationResultIdentifier", ".", "from_tuple", "(", "(", "\"asset\"", ",", "\"quarantine\"", ",", "\"prod-20\"", ",", "datetime", ".", "datetime", ".", "now", "(", "datetime", ".", "timezone", ".", "utc", ")", ",", "\"batch_id\"", ",", ")", ")", "my_store", ".", "set", "(", "ns_2", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ")", ")", "assert", "my_store", ".", "get", "(", "ns_2", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "print", "(", "my_store", ".", "list_keys", "(", ")", ")", "assert", "set", "(", "my_store", ".", "list_keys", "(", ")", ")", "==", "{", "ns_1", ",", "ns_2", ",", "}", "print", "(", "gen_directory_tree_str", "(", "path", ")", ")", "assert", "(", "gen_directory_tree_str", "(", "path", ")", "==", "\"\"\"\\\ntest_ValidationResultStore_with_TupleFileSystemStoreBackend__dir0/\n my_store/\n .ge_store_backend_id\n asset/\n quarantine/\n prod-100/\n 20190926T134241.000000Z/\n batch_id.json\n prod-20/\n 20190926T134241.000000Z/\n batch_id.json\n\"\"\"", ")", "# Check that store_backend_id exists can be read", "assert", "my_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "my_store", ".", "store_backend_id", ")", "# Check that another store with the same configuration shares the same store_backend_id", "my_store_duplicate", "=", "ValidationsStore", "(", "store_backend", "=", "{", "\"module_name\"", ":", "\"great_expectations.data_context.store\"", ",", "\"class_name\"", ":", "\"TupleFilesystemStoreBackend\"", ",", "\"base_directory\"", ":", "\"my_store/\"", ",", "}", ",", "runtime_environment", "=", "{", "\"root_directory\"", ":", "path", "}", ",", ")", "assert", "my_store", ".", "store_backend_id", "==", "my_store_duplicate", ".", "store_backend_id" ]
[ 153, 0 ]
[ 240, 75 ]
python
en
['en', 'error', 'th']
False
test_ValidationsStore_with_DatabaseStoreBackend
(sa)
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated.
def test_ValidationsStore_with_DatabaseStoreBackend(sa): # Use sqlite so we don't require postgres for this test. connection_kwargs = {"drivername": "sqlite"} # First, demonstrate that we pick up default configuration my_store = ValidationsStore( store_backend={ "class_name": "DatabaseStoreBackend", "credentials": connection_kwargs, } ) with pytest.raises(TypeError): my_store.get("not_a_ValidationResultIdentifier") ns_1 = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name="asset.quarantine", ), run_id="20191007T151224.1234Z_prod_100", batch_identifier="batch_id", ) my_store.set(ns_1, ExpectationSuiteValidationResult(success=True)) assert my_store.get(ns_1) == ExpectationSuiteValidationResult( success=True, statistics={}, results=[] ) ns_2 = ValidationResultIdentifier( expectation_suite_identifier=ExpectationSuiteIdentifier( expectation_suite_name="asset.quarantine", ), run_id="20191007T151224.1234Z_prod_200", batch_identifier="batch_id", ) my_store.set(ns_2, ExpectationSuiteValidationResult(success=False)) assert my_store.get(ns_2) == ExpectationSuiteValidationResult( success=False, statistics={}, results=[] ) assert set(my_store.list_keys()) == { ns_1, ns_2, } """ What does this test and why? A Store should be able to report it's store_backend_id which is set when the StoreBackend is instantiated. """ # Check that store_backend_id exists can be read assert my_store.store_backend_id is not None # Check that store_backend_id is a valid UUID assert test_utils.validate_uuid4(my_store.store_backend_id)
[ "def", "test_ValidationsStore_with_DatabaseStoreBackend", "(", "sa", ")", ":", "# Use sqlite so we don't require postgres for this test.", "connection_kwargs", "=", "{", "\"drivername\"", ":", "\"sqlite\"", "}", "# First, demonstrate that we pick up default configuration", "my_store", "=", "ValidationsStore", "(", "store_backend", "=", "{", "\"class_name\"", ":", "\"DatabaseStoreBackend\"", ",", "\"credentials\"", ":", "connection_kwargs", ",", "}", ")", "with", "pytest", ".", "raises", "(", "TypeError", ")", ":", "my_store", ".", "get", "(", "\"not_a_ValidationResultIdentifier\"", ")", "ns_1", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "\"asset.quarantine\"", ",", ")", ",", "run_id", "=", "\"20191007T151224.1234Z_prod_100\"", ",", "batch_identifier", "=", "\"batch_id\"", ",", ")", "my_store", ".", "set", "(", "ns_1", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ")", ")", "assert", "my_store", ".", "get", "(", "ns_1", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "True", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "ns_2", "=", "ValidationResultIdentifier", "(", "expectation_suite_identifier", "=", "ExpectationSuiteIdentifier", "(", "expectation_suite_name", "=", "\"asset.quarantine\"", ",", ")", ",", "run_id", "=", "\"20191007T151224.1234Z_prod_200\"", ",", "batch_identifier", "=", "\"batch_id\"", ",", ")", "my_store", ".", "set", "(", "ns_2", ",", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ")", ")", "assert", "my_store", ".", "get", "(", "ns_2", ")", "==", "ExpectationSuiteValidationResult", "(", "success", "=", "False", ",", "statistics", "=", "{", "}", ",", "results", "=", "[", "]", ")", "assert", "set", "(", "my_store", ".", "list_keys", "(", ")", ")", "==", "{", "ns_1", ",", "ns_2", ",", "}", "# Check that store_backend_id exists can be read", "assert", "my_store", ".", "store_backend_id", "is", "not", "None", "# Check that store_backend_id is a valid UUID", "assert", "test_utils", ".", "validate_uuid4", "(", "my_store", ".", "store_backend_id", ")" ]
[ 243, 0 ]
[ 296, 63 ]
python
en
['en', 'error', 'th']
False
test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasources_and_a_site_configured
( tmp_path_factory, )
This test ensures that a url will be returned for a default site even if a datasource is not configured, and docs are not built.
This test ensures that a url will be returned for a default site even if a datasource is not configured, and docs are not built.
def test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasources_and_a_site_configured( tmp_path_factory, ): """ This test ensures that a url will be returned for a default site even if a datasource is not configured, and docs are not built. """ empty_directory = str(tmp_path_factory.mktemp("another_empty_project")) DataContext.create(empty_directory) context = DataContext(os.path.join(empty_directory, DataContext.GE_DIR)) obs = context.get_docs_sites_urls(only_if_exists=False) assert len(obs) == 1 assert obs[0]["site_url"].endswith( "great_expectations/uncommitted/data_docs/local_site/index.html" )
[ "def", "test_existing_local_data_docs_urls_returns_url_on_project_with_no_datasources_and_a_site_configured", "(", "tmp_path_factory", ",", ")", ":", "empty_directory", "=", "str", "(", "tmp_path_factory", ".", "mktemp", "(", "\"another_empty_project\"", ")", ")", "DataContext", ".", "create", "(", "empty_directory", ")", "context", "=", "DataContext", "(", "os", ".", "path", ".", "join", "(", "empty_directory", ",", "DataContext", ".", "GE_DIR", ")", ")", "obs", "=", "context", ".", "get_docs_sites_urls", "(", "only_if_exists", "=", "False", ")", "assert", "len", "(", "obs", ")", "==", "1", "assert", "obs", "[", "0", "]", "[", "\"site_url\"", "]", ".", "endswith", "(", "\"great_expectations/uncommitted/data_docs/local_site/index.html\"", ")" ]
[ 265, 0 ]
[ 280, 5 ]
python
en
['en', 'error', 'th']
False
LoadEdges
(filename, targets)
Load the edges map from the dump file, and filter it to only show targets in |targets| and their depedendents.
Load the edges map from the dump file, and filter it to only show targets in |targets| and their depedendents.
def LoadEdges(filename, targets): """Load the edges map from the dump file, and filter it to only show targets in |targets| and their depedendents.""" file = open('dump.json') edges = json.load(file) file.close() # Copy out only the edges we're interested in from the full edge list. target_edges = {} to_visit = targets[:] while to_visit: src = to_visit.pop() if src in target_edges: continue target_edges[src] = edges[src] to_visit.extend(edges[src]) return target_edges
[ "def", "LoadEdges", "(", "filename", ",", "targets", ")", ":", "file", "=", "open", "(", "'dump.json'", ")", "edges", "=", "json", ".", "load", "(", "file", ")", "file", ".", "close", "(", ")", "# Copy out only the edges we're interested in from the full edge list.", "target_edges", "=", "{", "}", "to_visit", "=", "targets", "[", ":", "]", "while", "to_visit", ":", "src", "=", "to_visit", ".", "pop", "(", ")", "if", "src", "in", "target_edges", ":", "continue", "target_edges", "[", "src", "]", "=", "edges", "[", "src", "]", "to_visit", ".", "extend", "(", "edges", "[", "src", "]", ")", "return", "target_edges" ]
[ 21, 0 ]
[ 39, 21 ]
python
en
['en', 'en', 'en']
True
WriteGraph
(edges)
Print a graphviz graph to stdout. |edges| is a map of target to a list of other targets it depends on.
Print a graphviz graph to stdout. |edges| is a map of target to a list of other targets it depends on.
def WriteGraph(edges): """Print a graphviz graph to stdout. |edges| is a map of target to a list of other targets it depends on.""" # Bucket targets by file. files = collections.defaultdict(list) for src, dst in edges.items(): build_file, target_name, toolset = ParseTarget(src) files[build_file].append(src) print 'digraph D {' print ' fontsize=8' # Used by subgraphs. print ' node [fontsize=8]' # Output nodes by file. We must first write out each node within # its file grouping before writing out any edges that may refer # to those nodes. for filename, targets in files.items(): if len(targets) == 1: # If there's only one node for this file, simplify # the display by making it a box without an internal node. target = targets[0] build_file, target_name, toolset = ParseTarget(target) print ' "%s" [shape=box, label="%s\\n%s"]' % (target, filename, target_name) else: # Group multiple nodes together in a subgraph. print ' subgraph "cluster_%s" {' % filename print ' label = "%s"' % filename for target in targets: build_file, target_name, toolset = ParseTarget(target) print ' "%s" [label="%s"]' % (target, target_name) print ' }' # Now that we've placed all the nodes within subgraphs, output all # the edges between nodes. for src, dsts in edges.items(): for dst in dsts: print ' "%s" -> "%s"' % (src, dst) print '}'
[ "def", "WriteGraph", "(", "edges", ")", ":", "# Bucket targets by file.", "files", "=", "collections", ".", "defaultdict", "(", "list", ")", "for", "src", ",", "dst", "in", "edges", ".", "items", "(", ")", ":", "build_file", ",", "target_name", ",", "toolset", "=", "ParseTarget", "(", "src", ")", "files", "[", "build_file", "]", ".", "append", "(", "src", ")", "print", "'digraph D {'", "print", "' fontsize=8'", "# Used by subgraphs.", "print", "' node [fontsize=8]'", "# Output nodes by file. We must first write out each node within", "# its file grouping before writing out any edges that may refer", "# to those nodes.", "for", "filename", ",", "targets", "in", "files", ".", "items", "(", ")", ":", "if", "len", "(", "targets", ")", "==", "1", ":", "# If there's only one node for this file, simplify", "# the display by making it a box without an internal node.", "target", "=", "targets", "[", "0", "]", "build_file", ",", "target_name", ",", "toolset", "=", "ParseTarget", "(", "target", ")", "print", "' \"%s\" [shape=box, label=\"%s\\\\n%s\"]'", "%", "(", "target", ",", "filename", ",", "target_name", ")", "else", ":", "# Group multiple nodes together in a subgraph.", "print", "' subgraph \"cluster_%s\" {'", "%", "filename", "print", "' label = \"%s\"'", "%", "filename", "for", "target", "in", "targets", ":", "build_file", ",", "target_name", ",", "toolset", "=", "ParseTarget", "(", "target", ")", "print", "' \"%s\" [label=\"%s\"]'", "%", "(", "target", ",", "target_name", ")", "print", "' }'", "# Now that we've placed all the nodes within subgraphs, output all", "# the edges between nodes.", "for", "src", ",", "dsts", "in", "edges", ".", "items", "(", ")", ":", "for", "dst", "in", "dsts", ":", "print", "' \"%s\" -> \"%s\"'", "%", "(", "src", ",", "dst", ")", "print", "'}'" ]
[ 42, 0 ]
[ 82, 11 ]
python
en
['en', 'ht', 'it']
False
execute_shell_command
(command: str)
Execute a shell (bash in the present case) command from inside Python program. While developed independently, this function is very similar to the one, offered in this StackOverflow article: https://stackoverflow.com/questions/30993411/environment-variables-using-subprocess-check-output-python :param command: bash command -- as if typed in a shell/Terminal window :return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
Execute a shell (bash in the present case) command from inside Python program.
def execute_shell_command(command: str) -> int: """ Execute a shell (bash in the present case) command from inside Python program. While developed independently, this function is very similar to the one, offered in this StackOverflow article: https://stackoverflow.com/questions/30993411/environment-variables-using-subprocess-check-output-python :param command: bash command -- as if typed in a shell/Terminal window :return: status code -- 0 if successful; all other values (1 is the most common) indicate an error """ cwd: str = os.getcwd() path_env_var: str = os.pathsep.join([os.environ.get("PATH", os.defpath), cwd]) env: dict = dict(os.environ, PATH=path_env_var) status_code: int = 0 try: res: CompletedProcess = run( args=["bash", "-c", command], stdin=None, input=None, stdout=None, stderr=None, capture_output=True, shell=False, cwd=cwd, timeout=None, check=True, encoding=None, errors=None, text=None, env=env, universal_newlines=True, ) sh_out: str = res.stdout.strip() logger.info(sh_out) except CalledProcessError as cpe: status_code = cpe.returncode sys.stderr.write(cpe.output) sys.stderr.flush() exception_message: str = "A Sub-Process call Exception occurred.\n" exception_traceback: str = traceback.format_exc() exception_message += ( f'{type(cpe).__name__}: "{str(cpe)}". Traceback: "{exception_traceback}".' ) logger.error(exception_message) return status_code
[ "def", "execute_shell_command", "(", "command", ":", "str", ")", "->", "int", ":", "cwd", ":", "str", "=", "os", ".", "getcwd", "(", ")", "path_env_var", ":", "str", "=", "os", ".", "pathsep", ".", "join", "(", "[", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "os", ".", "defpath", ")", ",", "cwd", "]", ")", "env", ":", "dict", "=", "dict", "(", "os", ".", "environ", ",", "PATH", "=", "path_env_var", ")", "status_code", ":", "int", "=", "0", "try", ":", "res", ":", "CompletedProcess", "=", "run", "(", "args", "=", "[", "\"bash\"", ",", "\"-c\"", ",", "command", "]", ",", "stdin", "=", "None", ",", "input", "=", "None", ",", "stdout", "=", "None", ",", "stderr", "=", "None", ",", "capture_output", "=", "True", ",", "shell", "=", "False", ",", "cwd", "=", "cwd", ",", "timeout", "=", "None", ",", "check", "=", "True", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", "text", "=", "None", ",", "env", "=", "env", ",", "universal_newlines", "=", "True", ",", ")", "sh_out", ":", "str", "=", "res", ".", "stdout", ".", "strip", "(", ")", "logger", ".", "info", "(", "sh_out", ")", "except", "CalledProcessError", "as", "cpe", ":", "status_code", "=", "cpe", ".", "returncode", "sys", ".", "stderr", ".", "write", "(", "cpe", ".", "output", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "exception_message", ":", "str", "=", "\"A Sub-Process call Exception occurred.\\n\"", "exception_traceback", ":", "str", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "+=", "(", "f'{type(cpe).__name__}: \"{str(cpe)}\". Traceback: \"{exception_traceback}\".'", ")", "logger", ".", "error", "(", "exception_message", ")", "return", "status_code" ]
[ 12, 0 ]
[ 59, 22 ]
python
en
['en', 'error', 'th']
False
execute_shell_command_with_progress_polling
(command: str)
Execute a shell (bash in the present case) command from inside Python program with polling (to enable progress bar). :param command: bash command -- as if typed in a shell/Terminal window :return: status code -- 0 if successful; all other values (1 is the most common) indicate an error
Execute a shell (bash in the present case) command from inside Python program with polling (to enable progress bar).
def execute_shell_command_with_progress_polling(command: str) -> int: """ Execute a shell (bash in the present case) command from inside Python program with polling (to enable progress bar). :param command: bash command -- as if typed in a shell/Terminal window :return: status code -- 0 if successful; all other values (1 is the most common) indicate an error """ cwd: str = os.getcwd() path_env_var: str = os.pathsep.join([os.environ.get("PATH", os.defpath), cwd]) env: dict = dict(os.environ, PATH=path_env_var) status_code: int bar_length_100_percent: int = 100 max_work_amount: int = bar_length_100_percent poll_period_seconds: int = 1 gathered: int = 0 progress: float with click.progressbar(length=bar_length_100_percent, label=command) as bar: try: with Popen( args=["bash", "-c", command], bufsize=-1, executable=None, stdin=None, stdout=PIPE, stderr=PIPE, preexec_fn=None, close_fds=True, shell=False, cwd=cwd, env=env, universal_newlines=True, startupinfo=None, creationflags=0, restore_signals=True, start_new_session=False, pass_fds=(), encoding=None, errors=None, ) as proc: poll_status_code: Optional[int] = proc.poll() poll_stdout: str = proc.stdout.readline() while poll_status_code is None: gathered += max([len(poll_stdout), poll_period_seconds]) progress = float(gathered) / max_work_amount excess: float = progress - 1.0 if excess > 0: if 0.0 < excess <= 1.0: max_work_amount += 2.0 * excess * max_work_amount elif 1.0 < excess <= 2.0: max_work_amount += 5.0 * excess * max_work_amount elif 2.0 < excess <= 1.0e1: max_work_amount += 1.0e1 * excess * max_work_amount else: max_work_amount += 1.0e2 * excess * max_work_amount progress = float(gathered) / max_work_amount bar.pos = int(progress * (bar_length_100_percent - 1)) + 1 bar.update(0) time.sleep(poll_period_seconds) poll_status_code = proc.poll() poll_stdout = proc.stdout.readline() status_code = proc.returncode if status_code != poll_status_code: status_code = 1 else: bar.pos = bar_length_100_percent bar.update(0) except CalledProcessError as cpe: status_code = cpe.returncode sys.stderr.write(cpe.output) sys.stderr.flush() exception_message: str = "A Sub-Process call Exception occurred.\n" exception_traceback: str = traceback.format_exc() exception_message += f'{type(cpe).__name__}: "{str(cpe)}". Traceback: "{exception_traceback}".' logger.error(exception_message) return status_code
[ "def", "execute_shell_command_with_progress_polling", "(", "command", ":", "str", ")", "->", "int", ":", "cwd", ":", "str", "=", "os", ".", "getcwd", "(", ")", "path_env_var", ":", "str", "=", "os", ".", "pathsep", ".", "join", "(", "[", "os", ".", "environ", ".", "get", "(", "\"PATH\"", ",", "os", ".", "defpath", ")", ",", "cwd", "]", ")", "env", ":", "dict", "=", "dict", "(", "os", ".", "environ", ",", "PATH", "=", "path_env_var", ")", "status_code", ":", "int", "bar_length_100_percent", ":", "int", "=", "100", "max_work_amount", ":", "int", "=", "bar_length_100_percent", "poll_period_seconds", ":", "int", "=", "1", "gathered", ":", "int", "=", "0", "progress", ":", "float", "with", "click", ".", "progressbar", "(", "length", "=", "bar_length_100_percent", ",", "label", "=", "command", ")", "as", "bar", ":", "try", ":", "with", "Popen", "(", "args", "=", "[", "\"bash\"", ",", "\"-c\"", ",", "command", "]", ",", "bufsize", "=", "-", "1", ",", "executable", "=", "None", ",", "stdin", "=", "None", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "preexec_fn", "=", "None", ",", "close_fds", "=", "True", ",", "shell", "=", "False", ",", "cwd", "=", "cwd", ",", "env", "=", "env", ",", "universal_newlines", "=", "True", ",", "startupinfo", "=", "None", ",", "creationflags", "=", "0", ",", "restore_signals", "=", "True", ",", "start_new_session", "=", "False", ",", "pass_fds", "=", "(", ")", ",", "encoding", "=", "None", ",", "errors", "=", "None", ",", ")", "as", "proc", ":", "poll_status_code", ":", "Optional", "[", "int", "]", "=", "proc", ".", "poll", "(", ")", "poll_stdout", ":", "str", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "while", "poll_status_code", "is", "None", ":", "gathered", "+=", "max", "(", "[", "len", "(", "poll_stdout", ")", ",", "poll_period_seconds", "]", ")", "progress", "=", "float", "(", "gathered", ")", "/", "max_work_amount", "excess", ":", "float", "=", "progress", "-", "1.0", "if", "excess", ">", "0", ":", "if", "0.0", "<", "excess", "<=", "1.0", ":", "max_work_amount", "+=", "2.0", "*", "excess", "*", "max_work_amount", "elif", "1.0", "<", "excess", "<=", "2.0", ":", "max_work_amount", "+=", "5.0", "*", "excess", "*", "max_work_amount", "elif", "2.0", "<", "excess", "<=", "1.0e1", ":", "max_work_amount", "+=", "1.0e1", "*", "excess", "*", "max_work_amount", "else", ":", "max_work_amount", "+=", "1.0e2", "*", "excess", "*", "max_work_amount", "progress", "=", "float", "(", "gathered", ")", "/", "max_work_amount", "bar", ".", "pos", "=", "int", "(", "progress", "*", "(", "bar_length_100_percent", "-", "1", ")", ")", "+", "1", "bar", ".", "update", "(", "0", ")", "time", ".", "sleep", "(", "poll_period_seconds", ")", "poll_status_code", "=", "proc", ".", "poll", "(", ")", "poll_stdout", "=", "proc", ".", "stdout", ".", "readline", "(", ")", "status_code", "=", "proc", ".", "returncode", "if", "status_code", "!=", "poll_status_code", ":", "status_code", "=", "1", "else", ":", "bar", ".", "pos", "=", "bar_length_100_percent", "bar", ".", "update", "(", "0", ")", "except", "CalledProcessError", "as", "cpe", ":", "status_code", "=", "cpe", ".", "returncode", "sys", ".", "stderr", ".", "write", "(", "cpe", ".", "output", ")", "sys", ".", "stderr", ".", "flush", "(", ")", "exception_message", ":", "str", "=", "\"A Sub-Process call Exception occurred.\\n\"", "exception_traceback", ":", "str", "=", "traceback", ".", "format_exc", "(", ")", "exception_message", "+=", "f'{type(cpe).__name__}: \"{str(cpe)}\". Traceback: \"{exception_traceback}\".'", "logger", ".", "error", "(", "exception_message", ")", "return", "status_code" ]
[ 62, 0 ]
[ 143, 22 ]
python
en
['en', 'error', 'th']
False
DependencyGraph.traverse
(self)
Returns the items in this graph in topological order.
Returns the items in this graph in topological order.
def traverse(self) -> Iterator[Any]: """ Returns the items in this graph in topological order. """ if len(self.vertices) == 0: return # This method implements Kahn's algorithm. See # https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm for # more information. # Create a copy of the counts of each inbound edge so we can mutate # them. in_counts = {obj: vertex.in_count for obj, vertex in self.vertices.items()} # Find the roots of the graph. queue = [obj for obj, in_count in in_counts.items() if in_count == 0] # No roots of a graph with at least one vertex indicates a cycle. if len(queue) == 0: raise ValueError('cyclic') while len(queue) > 0: cur = queue.pop(0) yield cur for obj in self.vertices[cur].out: in_counts[obj] -= 1 if in_counts[obj] == 0: queue.append(obj) assert sum(in_counts.values()) == 0, 'Traversal did not reach every vertex exactly once'
[ "def", "traverse", "(", "self", ")", "->", "Iterator", "[", "Any", "]", ":", "if", "len", "(", "self", ".", "vertices", ")", "==", "0", ":", "return", "# This method implements Kahn's algorithm. See", "# https://en.wikipedia.org/wiki/Topological_sorting#Kahn's_algorithm for", "# more information.", "# Create a copy of the counts of each inbound edge so we can mutate", "# them.", "in_counts", "=", "{", "obj", ":", "vertex", ".", "in_count", "for", "obj", ",", "vertex", "in", "self", ".", "vertices", ".", "items", "(", ")", "}", "# Find the roots of the graph.", "queue", "=", "[", "obj", "for", "obj", ",", "in_count", "in", "in_counts", ".", "items", "(", ")", "if", "in_count", "==", "0", "]", "# No roots of a graph with at least one vertex indicates a cycle.", "if", "len", "(", "queue", ")", "==", "0", ":", "raise", "ValueError", "(", "'cyclic'", ")", "while", "len", "(", "queue", ")", ">", "0", ":", "cur", "=", "queue", ".", "pop", "(", "0", ")", "yield", "cur", "for", "obj", "in", "self", ".", "vertices", "[", "cur", "]", ".", "out", ":", "in_counts", "[", "obj", "]", "-=", "1", "if", "in_counts", "[", "obj", "]", "==", "0", ":", "queue", ".", "append", "(", "obj", ")", "assert", "sum", "(", "in_counts", ".", "values", "(", ")", ")", "==", "0", ",", "'Traversal did not reach every vertex exactly once'" ]
[ 121, 4 ]
[ 153, 96 ]
python
en
['en', 'error', 'th']
False
get_synthetic_preds
(synthetic_data_func, n=1000, estimators={})
Generate predictions for synthetic data using specified function (single simulation) Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples estimators (dict of object): dict of names and objects of treatment effect estimators Returns: (dict): dict of the actual and estimates of treatment effects
Generate predictions for synthetic data using specified function (single simulation)
def get_synthetic_preds(synthetic_data_func, n=1000, estimators={}): """Generate predictions for synthetic data using specified function (single simulation) Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples estimators (dict of object): dict of names and objects of treatment effect estimators Returns: (dict): dict of the actual and estimates of treatment effects """ y, X, w, tau, b, e = synthetic_data_func(n=n) preds_dict = {} preds_dict[KEY_ACTUAL] = tau preds_dict[KEY_GENERATED_DATA] = {'y': y, 'X': X, 'w': w, 'tau': tau, 'b': b, 'e': e} # Predict p_hat because e would not be directly observed in real-life p_model = ElasticNetPropensityModel() p_hat = p_model.fit_predict(X, w) if estimators: for name, learner in estimators.items(): try: preds_dict[name] = learner.fit_predict(X=X, treatment=w, y=y, p=p_hat).flatten() except TypeError: preds_dict[name] = learner.fit_predict(X=X, treatment=w, y=y).flatten() else: for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor], ['S', 'T', 'X', 'R']): for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']): learner = base_learner(model()) model_name = '{} Learner ({})'.format(label_l, label_m) try: preds_dict[model_name] = learner.fit_predict(X=X, treatment=w, y=y, p=p_hat).flatten() except TypeError: preds_dict[model_name] = learner.fit_predict(X=X, treatment=w, y=y).flatten() learner = CausalTreeRegressor(random_state=RANDOM_SEED) preds_dict['Causal Tree'] = learner.fit_predict(X=X, treatment=w, y=y).flatten() return preds_dict
[ "def", "get_synthetic_preds", "(", "synthetic_data_func", ",", "n", "=", "1000", ",", "estimators", "=", "{", "}", ")", ":", "y", ",", "X", ",", "w", ",", "tau", ",", "b", ",", "e", "=", "synthetic_data_func", "(", "n", "=", "n", ")", "preds_dict", "=", "{", "}", "preds_dict", "[", "KEY_ACTUAL", "]", "=", "tau", "preds_dict", "[", "KEY_GENERATED_DATA", "]", "=", "{", "'y'", ":", "y", ",", "'X'", ":", "X", ",", "'w'", ":", "w", ",", "'tau'", ":", "tau", ",", "'b'", ":", "b", ",", "'e'", ":", "e", "}", "# Predict p_hat because e would not be directly observed in real-life", "p_model", "=", "ElasticNetPropensityModel", "(", ")", "p_hat", "=", "p_model", ".", "fit_predict", "(", "X", ",", "w", ")", "if", "estimators", ":", "for", "name", ",", "learner", "in", "estimators", ".", "items", "(", ")", ":", "try", ":", "preds_dict", "[", "name", "]", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "w", ",", "y", "=", "y", ",", "p", "=", "p_hat", ")", ".", "flatten", "(", ")", "except", "TypeError", ":", "preds_dict", "[", "name", "]", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "w", ",", "y", "=", "y", ")", ".", "flatten", "(", ")", "else", ":", "for", "base_learner", ",", "label_l", "in", "zip", "(", "[", "BaseSRegressor", ",", "BaseTRegressor", ",", "BaseXRegressor", ",", "BaseRRegressor", "]", ",", "[", "'S'", ",", "'T'", ",", "'X'", ",", "'R'", "]", ")", ":", "for", "model", ",", "label_m", "in", "zip", "(", "[", "LinearRegression", ",", "XGBRegressor", "]", ",", "[", "'LR'", ",", "'XGB'", "]", ")", ":", "learner", "=", "base_learner", "(", "model", "(", ")", ")", "model_name", "=", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "try", ":", "preds_dict", "[", "model_name", "]", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "w", ",", "y", "=", "y", ",", "p", "=", "p_hat", ")", ".", "flatten", "(", ")", "except", "TypeError", ":", "preds_dict", "[", "model_name", "]", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "w", ",", "y", "=", "y", ")", ".", "flatten", "(", ")", "learner", "=", "CausalTreeRegressor", "(", "random_state", "=", "RANDOM_SEED", ")", "preds_dict", "[", "'Causal Tree'", "]", "=", "learner", ".", "fit_predict", "(", "X", "=", "X", ",", "treatment", "=", "w", ",", "y", "=", "y", ")", ".", "flatten", "(", ")", "return", "preds_dict" ]
[ 29, 0 ]
[ 70, 21 ]
python
en
['en', 'en', 'en']
True
get_synthetic_summary
(synthetic_data_func, n=1000, k=1, estimators={})
Generate a summary for predictions on synthetic data using specified function Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples per simulation k (int, optional): number of simulations
Generate a summary for predictions on synthetic data using specified function
def get_synthetic_summary(synthetic_data_func, n=1000, k=1, estimators={}): """Generate a summary for predictions on synthetic data using specified function Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples per simulation k (int, optional): number of simulations """ summaries = [] for i in range(k): synthetic_preds = get_synthetic_preds(synthetic_data_func, n=n, estimators=estimators) actuals = synthetic_preds[KEY_ACTUAL] synthetic_summary = pd.DataFrame({label: [preds.mean(), mse(preds, actuals)] for label, preds in synthetic_preds.items() if label != KEY_GENERATED_DATA}, index=['ATE', 'MSE']).T synthetic_summary['Abs % Error of ATE'] = np.abs((synthetic_summary['ATE'] / synthetic_summary.loc[KEY_ACTUAL, 'ATE']) - 1) for label in synthetic_summary.index: stacked_values = np.hstack((synthetic_preds[label], actuals)) stacked_low = np.percentile(stacked_values, 0.1) stacked_high = np.percentile(stacked_values, 99.9) bins = np.linspace(stacked_low, stacked_high, 100) distr = np.histogram(synthetic_preds[label], bins=bins)[0] distr = np.clip(distr/distr.sum(), 0.001, 0.999) true_distr = np.histogram(actuals, bins=bins)[0] true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999) kl = entropy(distr, true_distr) synthetic_summary.loc[label, 'KL Divergence'] = kl summaries.append(synthetic_summary) summary = sum(summaries) / k return summary[['Abs % Error of ATE', 'MSE', 'KL Divergence']]
[ "def", "get_synthetic_summary", "(", "synthetic_data_func", ",", "n", "=", "1000", ",", "k", "=", "1", ",", "estimators", "=", "{", "}", ")", ":", "summaries", "=", "[", "]", "for", "i", "in", "range", "(", "k", ")", ":", "synthetic_preds", "=", "get_synthetic_preds", "(", "synthetic_data_func", ",", "n", "=", "n", ",", "estimators", "=", "estimators", ")", "actuals", "=", "synthetic_preds", "[", "KEY_ACTUAL", "]", "synthetic_summary", "=", "pd", ".", "DataFrame", "(", "{", "label", ":", "[", "preds", ".", "mean", "(", ")", ",", "mse", "(", "preds", ",", "actuals", ")", "]", "for", "label", ",", "preds", "in", "synthetic_preds", ".", "items", "(", ")", "if", "label", "!=", "KEY_GENERATED_DATA", "}", ",", "index", "=", "[", "'ATE'", ",", "'MSE'", "]", ")", ".", "T", "synthetic_summary", "[", "'Abs % Error of ATE'", "]", "=", "np", ".", "abs", "(", "(", "synthetic_summary", "[", "'ATE'", "]", "/", "synthetic_summary", ".", "loc", "[", "KEY_ACTUAL", ",", "'ATE'", "]", ")", "-", "1", ")", "for", "label", "in", "synthetic_summary", ".", "index", ":", "stacked_values", "=", "np", ".", "hstack", "(", "(", "synthetic_preds", "[", "label", "]", ",", "actuals", ")", ")", "stacked_low", "=", "np", ".", "percentile", "(", "stacked_values", ",", "0.1", ")", "stacked_high", "=", "np", ".", "percentile", "(", "stacked_values", ",", "99.9", ")", "bins", "=", "np", ".", "linspace", "(", "stacked_low", ",", "stacked_high", ",", "100", ")", "distr", "=", "np", ".", "histogram", "(", "synthetic_preds", "[", "label", "]", ",", "bins", "=", "bins", ")", "[", "0", "]", "distr", "=", "np", ".", "clip", "(", "distr", "/", "distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "true_distr", "=", "np", ".", "histogram", "(", "actuals", ",", "bins", "=", "bins", ")", "[", "0", "]", "true_distr", "=", "np", ".", "clip", "(", "true_distr", "/", "true_distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "kl", "=", "entropy", "(", "distr", ",", "true_distr", ")", "synthetic_summary", ".", "loc", "[", "label", ",", "'KL Divergence'", "]", "=", "kl", "summaries", ".", "append", "(", "synthetic_summary", ")", "summary", "=", "sum", "(", "summaries", ")", "/", "k", "return", "summary", "[", "[", "'Abs % Error of ATE'", ",", "'MSE'", ",", "'KL Divergence'", "]", "]" ]
[ 73, 0 ]
[ 110, 66 ]
python
en
['en', 'en', 'en']
True
scatter_plot_summary
(synthetic_summary, k, drop_learners=[], drop_cols=[])
Generates a scatter plot comparing learner performance. Each learner's performance is plotted as a point in the (Abs % Error of ATE, MSE) space. Args: synthetic_summary (pd.DataFrame): summary generated by get_synthetic_summary() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting
Generates a scatter plot comparing learner performance. Each learner's performance is plotted as a point in the (Abs % Error of ATE, MSE) space.
def scatter_plot_summary(synthetic_summary, k, drop_learners=[], drop_cols=[]): """Generates a scatter plot comparing learner performance. Each learner's performance is plotted as a point in the (Abs % Error of ATE, MSE) space. Args: synthetic_summary (pd.DataFrame): summary generated by get_synthetic_summary() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting """ plot_data = synthetic_summary.drop(drop_learners).drop(drop_cols, axis=1) fig, ax = plt.subplots() fig.set_size_inches(12, 8) xs = plot_data['Abs % Error of ATE'] ys = plot_data['MSE'] ax.scatter(xs, ys) ylim = ax.get_ylim() xlim = ax.get_xlim() for i, txt in enumerate(plot_data.index): ax.annotate(txt, (xs[i] - np.random.binomial(1, 0.5)*xlim[1]*0.04, ys[i] - ylim[1]*0.03)) ax.set_xlabel('Abs % Error of ATE') ax.set_ylabel('MSE') ax.set_title('Learner Performance (averaged over k={} simulations)'.format(k))
[ "def", "scatter_plot_summary", "(", "synthetic_summary", ",", "k", ",", "drop_learners", "=", "[", "]", ",", "drop_cols", "=", "[", "]", ")", ":", "plot_data", "=", "synthetic_summary", ".", "drop", "(", "drop_learners", ")", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "fig", ".", "set_size_inches", "(", "12", ",", "8", ")", "xs", "=", "plot_data", "[", "'Abs % Error of ATE'", "]", "ys", "=", "plot_data", "[", "'MSE'", "]", "ax", ".", "scatter", "(", "xs", ",", "ys", ")", "ylim", "=", "ax", ".", "get_ylim", "(", ")", "xlim", "=", "ax", ".", "get_xlim", "(", ")", "for", "i", ",", "txt", "in", "enumerate", "(", "plot_data", ".", "index", ")", ":", "ax", ".", "annotate", "(", "txt", ",", "(", "xs", "[", "i", "]", "-", "np", ".", "random", ".", "binomial", "(", "1", ",", "0.5", ")", "*", "xlim", "[", "1", "]", "*", "0.04", ",", "ys", "[", "i", "]", "-", "ylim", "[", "1", "]", "*", "0.03", ")", ")", "ax", ".", "set_xlabel", "(", "'Abs % Error of ATE'", ")", "ax", ".", "set_ylabel", "(", "'MSE'", ")", "ax", ".", "set_title", "(", "'Learner Performance (averaged over k={} simulations)'", ".", "format", "(", "k", ")", ")" ]
[ 113, 0 ]
[ 140, 82 ]
python
en
['en', 'en', 'en']
True
bar_plot_summary
(synthetic_summary, k, drop_learners=[], drop_cols=[], sort_cols=['MSE', 'Abs % Error of ATE'])
Generates a bar plot comparing learner performance. Args: synthetic_summary (pd.DataFrame): summary generated by get_synthetic_summary() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting sort_cols (list, optional): list of metrics (str) to sort on when plotting
Generates a bar plot comparing learner performance.
def bar_plot_summary(synthetic_summary, k, drop_learners=[], drop_cols=[], sort_cols=['MSE', 'Abs % Error of ATE']): """Generates a bar plot comparing learner performance. Args: synthetic_summary (pd.DataFrame): summary generated by get_synthetic_summary() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting sort_cols (list, optional): list of metrics (str) to sort on when plotting """ plot_data = synthetic_summary.sort_values(sort_cols, ascending=True) plot_data = plot_data.drop(drop_learners + [KEY_ACTUAL]).drop(drop_cols, axis=1) plot_data.plot(kind='bar', figsize=(12, 8)) plt.xticks(rotation=30) plt.title('Learner Performance (averaged over k={} simulations)'.format(k))
[ "def", "bar_plot_summary", "(", "synthetic_summary", ",", "k", ",", "drop_learners", "=", "[", "]", ",", "drop_cols", "=", "[", "]", ",", "sort_cols", "=", "[", "'MSE'", ",", "'Abs % Error of ATE'", "]", ")", ":", "plot_data", "=", "synthetic_summary", ".", "sort_values", "(", "sort_cols", ",", "ascending", "=", "True", ")", "plot_data", "=", "plot_data", ".", "drop", "(", "drop_learners", "+", "[", "KEY_ACTUAL", "]", ")", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "plot_data", ".", "plot", "(", "kind", "=", "'bar'", ",", "figsize", "=", "(", "12", ",", "8", ")", ")", "plt", ".", "xticks", "(", "rotation", "=", "30", ")", "plt", ".", "title", "(", "'Learner Performance (averaged over k={} simulations)'", ".", "format", "(", "k", ")", ")" ]
[ 143, 0 ]
[ 158, 79 ]
python
en
['en', 'en', 'en']
True
distr_plot_single_sim
(synthetic_preds, kind='kde', drop_learners=[], bins=50, histtype='step', alpha=1, linewidth=1, bw_method=1)
Plots the distribution of each learner's predictions (for a single simulation). Kernel Density Estimation (kde) and actual histogram plots supported. Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() kind (str, optional): 'kde' or 'hist' drop_learners (list, optional): list of learners (str) to omit when plotting bins (int, optional): number of bins to plot if kind set to 'hist' histtype (str, optional): histogram type if kind set to 'hist' alpha (float, optional): alpha (transparency) for plotting linewidth (int, optional): line width for plotting bw_method (float, optional): parameter for kde
Plots the distribution of each learner's predictions (for a single simulation). Kernel Density Estimation (kde) and actual histogram plots supported.
def distr_plot_single_sim(synthetic_preds, kind='kde', drop_learners=[], bins=50, histtype='step', alpha=1, linewidth=1, bw_method=1): """Plots the distribution of each learner's predictions (for a single simulation). Kernel Density Estimation (kde) and actual histogram plots supported. Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() kind (str, optional): 'kde' or 'hist' drop_learners (list, optional): list of learners (str) to omit when plotting bins (int, optional): number of bins to plot if kind set to 'hist' histtype (str, optional): histogram type if kind set to 'hist' alpha (float, optional): alpha (transparency) for plotting linewidth (int, optional): line width for plotting bw_method (float, optional): parameter for kde """ preds_for_plot = synthetic_preds.copy() # deleted generated data and assign actual value del preds_for_plot[KEY_GENERATED_DATA] global_lower = np.percentile(np.hstack(preds_for_plot.values()), 1) global_upper = np.percentile(np.hstack(preds_for_plot.values()), 99) learners = list(preds_for_plot.keys()) learners = [learner for learner in learners if learner not in drop_learners] # Plotting plt.figure(figsize=(12, 8)) colors = ['black', 'red', 'blue', 'green', 'cyan', 'brown', 'grey', 'pink', 'orange', 'yellow'] for i, (k, v) in enumerate(preds_for_plot.items()): if k in learners: if kind == 'kde': v = pd.Series(v.flatten()) v = v[v.between(global_lower, global_upper)] v.plot(kind='kde', bw_method=bw_method, label=k, linewidth=linewidth, color=colors[i]) elif kind == 'hist': plt.hist(v, bins=np.linspace(global_lower, global_upper, bins), label=k, histtype=histtype, alpha=alpha, linewidth=linewidth, color=colors[i]) else: pass plt.xlim(global_lower, global_upper) plt.legend(loc='center left', bbox_to_anchor=(1, 0.5)) plt.title('Distribution from a Single Simulation')
[ "def", "distr_plot_single_sim", "(", "synthetic_preds", ",", "kind", "=", "'kde'", ",", "drop_learners", "=", "[", "]", ",", "bins", "=", "50", ",", "histtype", "=", "'step'", ",", "alpha", "=", "1", ",", "linewidth", "=", "1", ",", "bw_method", "=", "1", ")", ":", "preds_for_plot", "=", "synthetic_preds", ".", "copy", "(", ")", "# deleted generated data and assign actual value", "del", "preds_for_plot", "[", "KEY_GENERATED_DATA", "]", "global_lower", "=", "np", ".", "percentile", "(", "np", ".", "hstack", "(", "preds_for_plot", ".", "values", "(", ")", ")", ",", "1", ")", "global_upper", "=", "np", ".", "percentile", "(", "np", ".", "hstack", "(", "preds_for_plot", ".", "values", "(", ")", ")", ",", "99", ")", "learners", "=", "list", "(", "preds_for_plot", ".", "keys", "(", ")", ")", "learners", "=", "[", "learner", "for", "learner", "in", "learners", "if", "learner", "not", "in", "drop_learners", "]", "# Plotting", "plt", ".", "figure", "(", "figsize", "=", "(", "12", ",", "8", ")", ")", "colors", "=", "[", "'black'", ",", "'red'", ",", "'blue'", ",", "'green'", ",", "'cyan'", ",", "'brown'", ",", "'grey'", ",", "'pink'", ",", "'orange'", ",", "'yellow'", "]", "for", "i", ",", "(", "k", ",", "v", ")", "in", "enumerate", "(", "preds_for_plot", ".", "items", "(", ")", ")", ":", "if", "k", "in", "learners", ":", "if", "kind", "==", "'kde'", ":", "v", "=", "pd", ".", "Series", "(", "v", ".", "flatten", "(", ")", ")", "v", "=", "v", "[", "v", ".", "between", "(", "global_lower", ",", "global_upper", ")", "]", "v", ".", "plot", "(", "kind", "=", "'kde'", ",", "bw_method", "=", "bw_method", ",", "label", "=", "k", ",", "linewidth", "=", "linewidth", ",", "color", "=", "colors", "[", "i", "]", ")", "elif", "kind", "==", "'hist'", ":", "plt", ".", "hist", "(", "v", ",", "bins", "=", "np", ".", "linspace", "(", "global_lower", ",", "global_upper", ",", "bins", ")", ",", "label", "=", "k", ",", "histtype", "=", "histtype", ",", "alpha", "=", "alpha", ",", "linewidth", "=", "linewidth", ",", "color", "=", "colors", "[", "i", "]", ")", "else", ":", "pass", "plt", ".", "xlim", "(", "global_lower", ",", "global_upper", ")", "plt", ".", "legend", "(", "loc", "=", "'center left'", ",", "bbox_to_anchor", "=", "(", "1", ",", "0.5", ")", ")", "plt", ".", "title", "(", "'Distribution from a Single Simulation'", ")" ]
[ 161, 0 ]
[ 202, 54 ]
python
en
['en', 'en', 'en']
True
scatter_plot_single_sim
(synthetic_preds)
Creates a grid of scatter plots comparing each learner's predictions with the truth (for a single simulation). Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() or get_synthetic_preds_holdout()
Creates a grid of scatter plots comparing each learner's predictions with the truth (for a single simulation).
def scatter_plot_single_sim(synthetic_preds): """Creates a grid of scatter plots comparing each learner's predictions with the truth (for a single simulation). Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() or get_synthetic_preds_holdout() """ preds_for_plot = synthetic_preds.copy() # deleted generated data and get actual column name del preds_for_plot[KEY_GENERATED_DATA] n_row = int(np.ceil(len(preds_for_plot.keys()) / 3)) fig, axes = plt.subplots(n_row, 3, figsize=(5 * n_row, 15)) axes = np.ravel(axes) for i, (label, preds) in enumerate(preds_for_plot.items()): axes[i].scatter(preds_for_plot[KEY_ACTUAL], preds, s=2, label='Predictions') axes[i].set_title(label, size=12) axes[i].set_xlabel('Actual', size=10) axes[i].set_ylabel('Prediction', size=10) xlim = axes[i].get_xlim() ylim = axes[i].get_xlim() axes[i].plot([xlim[0], xlim[1]], [ylim[0], ylim[1]], label='Perfect Model', linewidth=1, color='grey') axes[i].legend(loc=2, prop={'size': 10})
[ "def", "scatter_plot_single_sim", "(", "synthetic_preds", ")", ":", "preds_for_plot", "=", "synthetic_preds", ".", "copy", "(", ")", "# deleted generated data and get actual column name", "del", "preds_for_plot", "[", "KEY_GENERATED_DATA", "]", "n_row", "=", "int", "(", "np", ".", "ceil", "(", "len", "(", "preds_for_plot", ".", "keys", "(", ")", ")", "/", "3", ")", ")", "fig", ",", "axes", "=", "plt", ".", "subplots", "(", "n_row", ",", "3", ",", "figsize", "=", "(", "5", "*", "n_row", ",", "15", ")", ")", "axes", "=", "np", ".", "ravel", "(", "axes", ")", "for", "i", ",", "(", "label", ",", "preds", ")", "in", "enumerate", "(", "preds_for_plot", ".", "items", "(", ")", ")", ":", "axes", "[", "i", "]", ".", "scatter", "(", "preds_for_plot", "[", "KEY_ACTUAL", "]", ",", "preds", ",", "s", "=", "2", ",", "label", "=", "'Predictions'", ")", "axes", "[", "i", "]", ".", "set_title", "(", "label", ",", "size", "=", "12", ")", "axes", "[", "i", "]", ".", "set_xlabel", "(", "'Actual'", ",", "size", "=", "10", ")", "axes", "[", "i", "]", ".", "set_ylabel", "(", "'Prediction'", ",", "size", "=", "10", ")", "xlim", "=", "axes", "[", "i", "]", ".", "get_xlim", "(", ")", "ylim", "=", "axes", "[", "i", "]", ".", "get_xlim", "(", ")", "axes", "[", "i", "]", ".", "plot", "(", "[", "xlim", "[", "0", "]", ",", "xlim", "[", "1", "]", "]", ",", "[", "ylim", "[", "0", "]", ",", "ylim", "[", "1", "]", "]", ",", "label", "=", "'Perfect Model'", ",", "linewidth", "=", "1", ",", "color", "=", "'grey'", ")", "axes", "[", "i", "]", ".", "legend", "(", "loc", "=", "2", ",", "prop", "=", "{", "'size'", ":", "10", "}", ")" ]
[ 205, 0 ]
[ 229, 48 ]
python
en
['en', 'en', 'en']
True
get_synthetic_preds_holdout
(synthetic_data_func, n=1000, valid_size=0.2, estimators={})
Generate predictions for synthetic data using specified function (single simulation) for train and holdout Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples valid_size(float,optional): validaiton/hold out data size estimators (dict of object): dict of names and objects of treatment effect estimators Returns: (tuple): synthetic training and validation data dictionaries: - preds_dict_train (dict): synthetic training data dictionary - preds_dict_valid (dict): synthetic validation data dictionary
Generate predictions for synthetic data using specified function (single simulation) for train and holdout
def get_synthetic_preds_holdout(synthetic_data_func, n=1000, valid_size=0.2, estimators={}): """Generate predictions for synthetic data using specified function (single simulation) for train and holdout Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples valid_size(float,optional): validaiton/hold out data size estimators (dict of object): dict of names and objects of treatment effect estimators Returns: (tuple): synthetic training and validation data dictionaries: - preds_dict_train (dict): synthetic training data dictionary - preds_dict_valid (dict): synthetic validation data dictionary """ y, X, w, tau, b, e = synthetic_data_func(n=n) X_train, X_val, y_train, y_val, w_train, w_val, tau_train, tau_val, b_train, b_val, e_train, e_val = \ train_test_split(X, y, w, tau, b, e, test_size=valid_size, random_state=RANDOM_SEED, shuffle=True) preds_dict_train = {} preds_dict_valid = {} preds_dict_train[KEY_ACTUAL] = tau_train preds_dict_valid[KEY_ACTUAL] = tau_val preds_dict_train['generated_data'] = { 'y': y_train, 'X': X_train, 'w': w_train, 'tau': tau_train, 'b': b_train, 'e': e_train} preds_dict_valid['generated_data'] = { 'y': y_val, 'X': X_val, 'w': w_val, 'tau': tau_val, 'b': b_val, 'e': e_val} # Predict p_hat because e would not be directly observed in real-life p_model = ElasticNetPropensityModel() p_hat_train = p_model.fit_predict(X_train, w_train) p_hat_val = p_model.fit_predict(X_val, w_val) for base_learner, label_l in zip([BaseSRegressor, BaseTRegressor, BaseXRegressor, BaseRRegressor], ['S', 'T', 'X', 'R']): for model, label_m in zip([LinearRegression, XGBRegressor], ['LR', 'XGB']): # RLearner will need to fit on the p_hat if label_l != 'R': learner = base_learner(model()) # fit the model on training data only learner.fit(X=X_train, treatment=w_train, y=y_train) try: preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train, p=p_hat_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val, p=p_hat_val).flatten() except TypeError: preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train, treatment=w_train, y=y_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val, treatment=w_val, y=y_val).flatten() else: learner = base_learner(model()) learner.fit(X=X_train, p=p_hat_train, treatment=w_train, y=y_train) preds_dict_train['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_train).flatten() preds_dict_valid['{} Learner ({})'.format( label_l, label_m)] = learner.predict(X=X_val).flatten() return preds_dict_train, preds_dict_valid
[ "def", "get_synthetic_preds_holdout", "(", "synthetic_data_func", ",", "n", "=", "1000", ",", "valid_size", "=", "0.2", ",", "estimators", "=", "{", "}", ")", ":", "y", ",", "X", ",", "w", ",", "tau", ",", "b", ",", "e", "=", "synthetic_data_func", "(", "n", "=", "n", ")", "X_train", ",", "X_val", ",", "y_train", ",", "y_val", ",", "w_train", ",", "w_val", ",", "tau_train", ",", "tau_val", ",", "b_train", ",", "b_val", ",", "e_train", ",", "e_val", "=", "train_test_split", "(", "X", ",", "y", ",", "w", ",", "tau", ",", "b", ",", "e", ",", "test_size", "=", "valid_size", ",", "random_state", "=", "RANDOM_SEED", ",", "shuffle", "=", "True", ")", "preds_dict_train", "=", "{", "}", "preds_dict_valid", "=", "{", "}", "preds_dict_train", "[", "KEY_ACTUAL", "]", "=", "tau_train", "preds_dict_valid", "[", "KEY_ACTUAL", "]", "=", "tau_val", "preds_dict_train", "[", "'generated_data'", "]", "=", "{", "'y'", ":", "y_train", ",", "'X'", ":", "X_train", ",", "'w'", ":", "w_train", ",", "'tau'", ":", "tau_train", ",", "'b'", ":", "b_train", ",", "'e'", ":", "e_train", "}", "preds_dict_valid", "[", "'generated_data'", "]", "=", "{", "'y'", ":", "y_val", ",", "'X'", ":", "X_val", ",", "'w'", ":", "w_val", ",", "'tau'", ":", "tau_val", ",", "'b'", ":", "b_val", ",", "'e'", ":", "e_val", "}", "# Predict p_hat because e would not be directly observed in real-life", "p_model", "=", "ElasticNetPropensityModel", "(", ")", "p_hat_train", "=", "p_model", ".", "fit_predict", "(", "X_train", ",", "w_train", ")", "p_hat_val", "=", "p_model", ".", "fit_predict", "(", "X_val", ",", "w_val", ")", "for", "base_learner", ",", "label_l", "in", "zip", "(", "[", "BaseSRegressor", ",", "BaseTRegressor", ",", "BaseXRegressor", ",", "BaseRRegressor", "]", ",", "[", "'S'", ",", "'T'", ",", "'X'", ",", "'R'", "]", ")", ":", "for", "model", ",", "label_m", "in", "zip", "(", "[", "LinearRegression", ",", "XGBRegressor", "]", ",", "[", "'LR'", ",", "'XGB'", "]", ")", ":", "# RLearner will need to fit on the p_hat", "if", "label_l", "!=", "'R'", ":", "learner", "=", "base_learner", "(", "model", "(", ")", ")", "# fit the model on training data only", "learner", ".", "fit", "(", "X", "=", "X_train", ",", "treatment", "=", "w_train", ",", "y", "=", "y_train", ")", "try", ":", "preds_dict_train", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_train", ",", "p", "=", "p_hat_train", ")", ".", "flatten", "(", ")", "preds_dict_valid", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_val", ",", "p", "=", "p_hat_val", ")", ".", "flatten", "(", ")", "except", "TypeError", ":", "preds_dict_train", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_train", ",", "treatment", "=", "w_train", ",", "y", "=", "y_train", ")", ".", "flatten", "(", ")", "preds_dict_valid", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_val", ",", "treatment", "=", "w_val", ",", "y", "=", "y_val", ")", ".", "flatten", "(", ")", "else", ":", "learner", "=", "base_learner", "(", "model", "(", ")", ")", "learner", ".", "fit", "(", "X", "=", "X_train", ",", "p", "=", "p_hat_train", ",", "treatment", "=", "w_train", ",", "y", "=", "y_train", ")", "preds_dict_train", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_train", ")", ".", "flatten", "(", ")", "preds_dict_valid", "[", "'{} Learner ({})'", ".", "format", "(", "label_l", ",", "label_m", ")", "]", "=", "learner", ".", "predict", "(", "X", "=", "X_val", ")", ".", "flatten", "(", ")", "return", "preds_dict_train", ",", "preds_dict_valid" ]
[ 232, 0 ]
[ 305, 45 ]
python
en
['en', 'en', 'en']
True
get_synthetic_summary_holdout
(synthetic_data_func, n=1000, valid_size=0.2, k=1)
Generate a summary for predictions on synthetic data for train and holdout using specified function Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples per simulation valid_size(float,optional): validation/hold out data size k (int, optional): number of simulations Returns: (tuple): summary evaluation metrics of predictions for train and validation: - summary_train (pandas.DataFrame): training data evaluation summary - summary_train (pandas.DataFrame): validation data evaluation summary
Generate a summary for predictions on synthetic data for train and holdout using specified function
def get_synthetic_summary_holdout(synthetic_data_func, n=1000, valid_size=0.2, k=1): """Generate a summary for predictions on synthetic data for train and holdout using specified function Args: synthetic_data_func (function): synthetic data generation function n (int, optional): number of samples per simulation valid_size(float,optional): validation/hold out data size k (int, optional): number of simulations Returns: (tuple): summary evaluation metrics of predictions for train and validation: - summary_train (pandas.DataFrame): training data evaluation summary - summary_train (pandas.DataFrame): validation data evaluation summary """ summaries_train = [] summaries_validation = [] for i in range(k): preds_dict_train, preds_dict_valid = get_synthetic_preds_holdout(synthetic_data_func, n=n, valid_size=valid_size) actuals_train = preds_dict_train[KEY_ACTUAL] actuals_validation = preds_dict_valid[KEY_ACTUAL] synthetic_summary_train = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_train)] for label, preds in preds_dict_train.items() if KEY_GENERATED_DATA not in label.lower()}, index=['ATE', 'MSE']).T synthetic_summary_train['Abs % Error of ATE'] = np.abs( (synthetic_summary_train['ATE']/synthetic_summary_train.loc[KEY_ACTUAL, 'ATE']) - 1) synthetic_summary_validation = pd.DataFrame({label: [preds.mean(), mse(preds, actuals_validation)] for label, preds in preds_dict_valid.items() if KEY_GENERATED_DATA not in label.lower()}, index=['ATE', 'MSE']).T synthetic_summary_validation['Abs % Error of ATE'] = np.abs( (synthetic_summary_validation['ATE']/synthetic_summary_validation.loc[KEY_ACTUAL, 'ATE']) - 1) # calculate kl divergence for training for label in synthetic_summary_train.index: stacked_values = np.hstack((preds_dict_train[label], actuals_train)) stacked_low = np.percentile(stacked_values, 0.1) stacked_high = np.percentile(stacked_values, 99.9) bins = np.linspace(stacked_low, stacked_high, 100) distr = np.histogram(preds_dict_train[label], bins=bins)[0] distr = np.clip(distr/distr.sum(), 0.001, 0.999) true_distr = np.histogram(actuals_train, bins=bins)[0] true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999) kl = entropy(distr, true_distr) synthetic_summary_train.loc[label, 'KL Divergence'] = kl # calculate kl divergence for validation for label in synthetic_summary_validation.index: stacked_values = np.hstack((preds_dict_valid[label], actuals_validation)) stacked_low = np.percentile(stacked_values, 0.1) stacked_high = np.percentile(stacked_values, 99.9) bins = np.linspace(stacked_low, stacked_high, 100) distr = np.histogram(preds_dict_valid[label], bins=bins)[0] distr = np.clip(distr/distr.sum(), 0.001, 0.999) true_distr = np.histogram(actuals_validation, bins=bins)[0] true_distr = np.clip(true_distr/true_distr.sum(), 0.001, 0.999) kl = entropy(distr, true_distr) synthetic_summary_validation.loc[label, 'KL Divergence'] = kl summaries_train.append(synthetic_summary_train) summaries_validation.append(synthetic_summary_validation) summary_train = sum(summaries_train) / k summary_validation = sum(summaries_validation) / k return (summary_train[['Abs % Error of ATE', 'MSE', 'KL Divergence']], summary_validation[['Abs % Error of ATE', 'MSE', 'KL Divergence']])
[ "def", "get_synthetic_summary_holdout", "(", "synthetic_data_func", ",", "n", "=", "1000", ",", "valid_size", "=", "0.2", ",", "k", "=", "1", ")", ":", "summaries_train", "=", "[", "]", "summaries_validation", "=", "[", "]", "for", "i", "in", "range", "(", "k", ")", ":", "preds_dict_train", ",", "preds_dict_valid", "=", "get_synthetic_preds_holdout", "(", "synthetic_data_func", ",", "n", "=", "n", ",", "valid_size", "=", "valid_size", ")", "actuals_train", "=", "preds_dict_train", "[", "KEY_ACTUAL", "]", "actuals_validation", "=", "preds_dict_valid", "[", "KEY_ACTUAL", "]", "synthetic_summary_train", "=", "pd", ".", "DataFrame", "(", "{", "label", ":", "[", "preds", ".", "mean", "(", ")", ",", "mse", "(", "preds", ",", "actuals_train", ")", "]", "for", "label", ",", "preds", "in", "preds_dict_train", ".", "items", "(", ")", "if", "KEY_GENERATED_DATA", "not", "in", "label", ".", "lower", "(", ")", "}", ",", "index", "=", "[", "'ATE'", ",", "'MSE'", "]", ")", ".", "T", "synthetic_summary_train", "[", "'Abs % Error of ATE'", "]", "=", "np", ".", "abs", "(", "(", "synthetic_summary_train", "[", "'ATE'", "]", "/", "synthetic_summary_train", ".", "loc", "[", "KEY_ACTUAL", ",", "'ATE'", "]", ")", "-", "1", ")", "synthetic_summary_validation", "=", "pd", ".", "DataFrame", "(", "{", "label", ":", "[", "preds", ".", "mean", "(", ")", ",", "mse", "(", "preds", ",", "actuals_validation", ")", "]", "for", "label", ",", "preds", "in", "preds_dict_valid", ".", "items", "(", ")", "if", "KEY_GENERATED_DATA", "not", "in", "label", ".", "lower", "(", ")", "}", ",", "index", "=", "[", "'ATE'", ",", "'MSE'", "]", ")", ".", "T", "synthetic_summary_validation", "[", "'Abs % Error of ATE'", "]", "=", "np", ".", "abs", "(", "(", "synthetic_summary_validation", "[", "'ATE'", "]", "/", "synthetic_summary_validation", ".", "loc", "[", "KEY_ACTUAL", ",", "'ATE'", "]", ")", "-", "1", ")", "# calculate kl divergence for training", "for", "label", "in", "synthetic_summary_train", ".", "index", ":", "stacked_values", "=", "np", ".", "hstack", "(", "(", "preds_dict_train", "[", "label", "]", ",", "actuals_train", ")", ")", "stacked_low", "=", "np", ".", "percentile", "(", "stacked_values", ",", "0.1", ")", "stacked_high", "=", "np", ".", "percentile", "(", "stacked_values", ",", "99.9", ")", "bins", "=", "np", ".", "linspace", "(", "stacked_low", ",", "stacked_high", ",", "100", ")", "distr", "=", "np", ".", "histogram", "(", "preds_dict_train", "[", "label", "]", ",", "bins", "=", "bins", ")", "[", "0", "]", "distr", "=", "np", ".", "clip", "(", "distr", "/", "distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "true_distr", "=", "np", ".", "histogram", "(", "actuals_train", ",", "bins", "=", "bins", ")", "[", "0", "]", "true_distr", "=", "np", ".", "clip", "(", "true_distr", "/", "true_distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "kl", "=", "entropy", "(", "distr", ",", "true_distr", ")", "synthetic_summary_train", ".", "loc", "[", "label", ",", "'KL Divergence'", "]", "=", "kl", "# calculate kl divergence for validation", "for", "label", "in", "synthetic_summary_validation", ".", "index", ":", "stacked_values", "=", "np", ".", "hstack", "(", "(", "preds_dict_valid", "[", "label", "]", ",", "actuals_validation", ")", ")", "stacked_low", "=", "np", ".", "percentile", "(", "stacked_values", ",", "0.1", ")", "stacked_high", "=", "np", ".", "percentile", "(", "stacked_values", ",", "99.9", ")", "bins", "=", "np", ".", "linspace", "(", "stacked_low", ",", "stacked_high", ",", "100", ")", "distr", "=", "np", ".", "histogram", "(", "preds_dict_valid", "[", "label", "]", ",", "bins", "=", "bins", ")", "[", "0", "]", "distr", "=", "np", ".", "clip", "(", "distr", "/", "distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "true_distr", "=", "np", ".", "histogram", "(", "actuals_validation", ",", "bins", "=", "bins", ")", "[", "0", "]", "true_distr", "=", "np", ".", "clip", "(", "true_distr", "/", "true_distr", ".", "sum", "(", ")", ",", "0.001", ",", "0.999", ")", "kl", "=", "entropy", "(", "distr", ",", "true_distr", ")", "synthetic_summary_validation", ".", "loc", "[", "label", ",", "'KL Divergence'", "]", "=", "kl", "summaries_train", ".", "append", "(", "synthetic_summary_train", ")", "summaries_validation", ".", "append", "(", "synthetic_summary_validation", ")", "summary_train", "=", "sum", "(", "summaries_train", ")", "/", "k", "summary_validation", "=", "sum", "(", "summaries_validation", ")", "/", "k", "return", "(", "summary_train", "[", "[", "'Abs % Error of ATE'", ",", "'MSE'", ",", "'KL Divergence'", "]", "]", ",", "summary_validation", "[", "[", "'Abs % Error of ATE'", ",", "'MSE'", ",", "'KL Divergence'", "]", "]", ")" ]
[ 308, 0 ]
[ 383, 79 ]
python
en
['en', 'en', 'en']
True
scatter_plot_summary_holdout
(train_summary, validation_summary, k, label=['Train', 'Validation'], drop_learners=[], drop_cols=[])
Generates a scatter plot comparing learner performance by training and validation. Args: train_summary (pd.DataFrame): summary for training synthetic data generated by get_synthetic_summary_holdout() validation_summary (pd.DataFrame): summary for validation synthetic data generated by get_synthetic_summary_holdout() label (string, optional): legend label for plot k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting
Generates a scatter plot comparing learner performance by training and validation.
def scatter_plot_summary_holdout(train_summary, validation_summary, k, label=['Train', 'Validation'], drop_learners=[], drop_cols=[]): """Generates a scatter plot comparing learner performance by training and validation. Args: train_summary (pd.DataFrame): summary for training synthetic data generated by get_synthetic_summary_holdout() validation_summary (pd.DataFrame): summary for validation synthetic data generated by get_synthetic_summary_holdout() label (string, optional): legend label for plot k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting """ train_summary = train_summary.drop(drop_learners).drop(drop_cols, axis=1) validation_summary = validation_summary.drop(drop_learners).drop(drop_cols, axis=1) plot_data = pd.concat([train_summary, validation_summary]) plot_data['label'] = [i.replace('Train', '') for i in plot_data.index] plot_data['label'] = [i.replace('Validation', '') for i in plot_data.label] fig, ax = plt.subplots() fig.set_size_inches(12, 8) xs = plot_data['Abs % Error of ATE'] ys = plot_data['MSE'] group = np.array([label[0]] * train_summary.shape[0] + [label[1]] * validation_summary.shape[0]) cdict = {label[0]: 'red', label[1]: 'blue'} for g in np.unique(group): ix = np.where(group == g)[0].tolist() ax.scatter(xs[ix], ys[ix], c=cdict[g], label=g, s=100) for i, txt in enumerate(plot_data.label[:10]): ax.annotate(txt, (xs[i] + 0.005, ys[i])) ax.set_xlabel('Abs % Error of ATE') ax.set_ylabel('MSE') ax.set_title('Learner Performance (averaged over k={} simulations)'.format(k)) ax.legend(loc='center left', bbox_to_anchor=(1.1, 0.5)) plt.show()
[ "def", "scatter_plot_summary_holdout", "(", "train_summary", ",", "validation_summary", ",", "k", ",", "label", "=", "[", "'Train'", ",", "'Validation'", "]", ",", "drop_learners", "=", "[", "]", ",", "drop_cols", "=", "[", "]", ")", ":", "train_summary", "=", "train_summary", ".", "drop", "(", "drop_learners", ")", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "validation_summary", "=", "validation_summary", ".", "drop", "(", "drop_learners", ")", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "plot_data", "=", "pd", ".", "concat", "(", "[", "train_summary", ",", "validation_summary", "]", ")", "plot_data", "[", "'label'", "]", "=", "[", "i", ".", "replace", "(", "'Train'", ",", "''", ")", "for", "i", "in", "plot_data", ".", "index", "]", "plot_data", "[", "'label'", "]", "=", "[", "i", ".", "replace", "(", "'Validation'", ",", "''", ")", "for", "i", "in", "plot_data", ".", "label", "]", "fig", ",", "ax", "=", "plt", ".", "subplots", "(", ")", "fig", ".", "set_size_inches", "(", "12", ",", "8", ")", "xs", "=", "plot_data", "[", "'Abs % Error of ATE'", "]", "ys", "=", "plot_data", "[", "'MSE'", "]", "group", "=", "np", ".", "array", "(", "[", "label", "[", "0", "]", "]", "*", "train_summary", ".", "shape", "[", "0", "]", "+", "[", "label", "[", "1", "]", "]", "*", "validation_summary", ".", "shape", "[", "0", "]", ")", "cdict", "=", "{", "label", "[", "0", "]", ":", "'red'", ",", "label", "[", "1", "]", ":", "'blue'", "}", "for", "g", "in", "np", ".", "unique", "(", "group", ")", ":", "ix", "=", "np", ".", "where", "(", "group", "==", "g", ")", "[", "0", "]", ".", "tolist", "(", ")", "ax", ".", "scatter", "(", "xs", "[", "ix", "]", ",", "ys", "[", "ix", "]", ",", "c", "=", "cdict", "[", "g", "]", ",", "label", "=", "g", ",", "s", "=", "100", ")", "for", "i", ",", "txt", "in", "enumerate", "(", "plot_data", ".", "label", "[", ":", "10", "]", ")", ":", "ax", ".", "annotate", "(", "txt", ",", "(", "xs", "[", "i", "]", "+", "0.005", ",", "ys", "[", "i", "]", ")", ")", "ax", ".", "set_xlabel", "(", "'Abs % Error of ATE'", ")", "ax", ".", "set_ylabel", "(", "'MSE'", ")", "ax", ".", "set_title", "(", "'Learner Performance (averaged over k={} simulations)'", ".", "format", "(", "k", ")", ")", "ax", ".", "legend", "(", "loc", "=", "'center left'", ",", "bbox_to_anchor", "=", "(", "1.1", ",", "0.5", ")", ")", "plt", ".", "show", "(", ")" ]
[ 386, 0 ]
[ 424, 14 ]
python
en
['en', 'en', 'en']
True
bar_plot_summary_holdout
(train_summary, validation_summary, k, drop_learners=[], drop_cols=[])
Generates a bar plot comparing learner performance by training and validation Args: train_summary (pd.DataFrame): summary for training synthetic data generated by get_synthetic_summary_holdout() validation_summary (pd.DataFrame): summary for validation synthetic data generated by get_synthetic_summary_holdout() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting
Generates a bar plot comparing learner performance by training and validation
def bar_plot_summary_holdout(train_summary, validation_summary, k, drop_learners=[], drop_cols=[]): """Generates a bar plot comparing learner performance by training and validation Args: train_summary (pd.DataFrame): summary for training synthetic data generated by get_synthetic_summary_holdout() validation_summary (pd.DataFrame): summary for validation synthetic data generated by get_synthetic_summary_holdout() k (int): number of simulations (used only for plot title text) drop_learners (list, optional): list of learners (str) to omit when plotting drop_cols (list, optional): list of metrics (str) to omit when plotting """ train_summary = train_summary.drop([KEY_ACTUAL]) train_summary['Learner'] = train_summary.index validation_summary = validation_summary.drop([KEY_ACTUAL]) validation_summary['Learner'] = validation_summary.index for metric in ['Abs % Error of ATE', 'MSE', 'KL Divergence']: plot_data_sub = pd.DataFrame(train_summary.Learner).reset_index(drop=True) plot_data_sub['train'] = train_summary[metric].values plot_data_sub['validation'] = validation_summary[metric].values plot_data_sub = plot_data_sub.set_index('Learner') plot_data_sub = plot_data_sub.drop(drop_learners).drop(drop_cols, axis=1) plot_data_sub = plot_data_sub.sort_values('train', ascending=True) plot_data_sub.plot(kind='bar', color=['red', 'blue'], figsize=(12, 8)) plt.xticks(rotation=30) plt.title('Learner Performance of {} (averaged over k={} simulations)'.format(metric, k))
[ "def", "bar_plot_summary_holdout", "(", "train_summary", ",", "validation_summary", ",", "k", ",", "drop_learners", "=", "[", "]", ",", "drop_cols", "=", "[", "]", ")", ":", "train_summary", "=", "train_summary", ".", "drop", "(", "[", "KEY_ACTUAL", "]", ")", "train_summary", "[", "'Learner'", "]", "=", "train_summary", ".", "index", "validation_summary", "=", "validation_summary", ".", "drop", "(", "[", "KEY_ACTUAL", "]", ")", "validation_summary", "[", "'Learner'", "]", "=", "validation_summary", ".", "index", "for", "metric", "in", "[", "'Abs % Error of ATE'", ",", "'MSE'", ",", "'KL Divergence'", "]", ":", "plot_data_sub", "=", "pd", ".", "DataFrame", "(", "train_summary", ".", "Learner", ")", ".", "reset_index", "(", "drop", "=", "True", ")", "plot_data_sub", "[", "'train'", "]", "=", "train_summary", "[", "metric", "]", ".", "values", "plot_data_sub", "[", "'validation'", "]", "=", "validation_summary", "[", "metric", "]", ".", "values", "plot_data_sub", "=", "plot_data_sub", ".", "set_index", "(", "'Learner'", ")", "plot_data_sub", "=", "plot_data_sub", ".", "drop", "(", "drop_learners", ")", ".", "drop", "(", "drop_cols", ",", "axis", "=", "1", ")", "plot_data_sub", "=", "plot_data_sub", ".", "sort_values", "(", "'train'", ",", "ascending", "=", "True", ")", "plot_data_sub", ".", "plot", "(", "kind", "=", "'bar'", ",", "color", "=", "[", "'red'", ",", "'blue'", "]", ",", "figsize", "=", "(", "12", ",", "8", ")", ")", "plt", ".", "xticks", "(", "rotation", "=", "30", ")", "plt", ".", "title", "(", "'Learner Performance of {} (averaged over k={} simulations)'", ".", "format", "(", "metric", ",", "k", ")", ")" ]
[ 427, 0 ]
[ 454, 97 ]
python
en
['en', 'en', 'en']
True
get_synthetic_auuc
(synthetic_preds, drop_learners=[], outcome_col='y', treatment_col='w', treatment_effect_col='tau', plot=True)
Get auuc values for cumulative gains of model estimates in quantiles. For details, reference get_cumgain() and plot_gain() Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() or get_synthetic_preds_holdout() outcome_col (str, optional): the column name for the actual outcome treatment_col (str, optional): the column name for the treatment indicator (0 or 1) treatment_effect_col (str, optional): the column name for the true treatment effect plot (boolean,optional): plot the cumulative gain chart or not Returns: (pandas.DataFrame): auuc values by learner for cumulative gains of model estimates
Get auuc values for cumulative gains of model estimates in quantiles.
def get_synthetic_auuc(synthetic_preds, drop_learners=[], outcome_col='y', treatment_col='w', treatment_effect_col='tau', plot=True): """Get auuc values for cumulative gains of model estimates in quantiles. For details, reference get_cumgain() and plot_gain() Args: synthetic_preds (dict): dictionary of predictions generated by get_synthetic_preds() or get_synthetic_preds_holdout() outcome_col (str, optional): the column name for the actual outcome treatment_col (str, optional): the column name for the treatment indicator (0 or 1) treatment_effect_col (str, optional): the column name for the true treatment effect plot (boolean,optional): plot the cumulative gain chart or not Returns: (pandas.DataFrame): auuc values by learner for cumulative gains of model estimates """ synthetic_preds_df = synthetic_preds.copy() generated_data = synthetic_preds_df.pop(KEY_GENERATED_DATA) synthetic_preds_df = pd.DataFrame(synthetic_preds_df) synthetic_preds_df = synthetic_preds_df.drop(drop_learners, axis=1) synthetic_preds_df['y'] = generated_data[outcome_col] synthetic_preds_df['w'] = generated_data[treatment_col] if treatment_effect_col in generated_data.keys(): synthetic_preds_df['tau'] = generated_data[treatment_effect_col] assert ((outcome_col in synthetic_preds_df.columns) and (treatment_col in synthetic_preds_df.columns) or treatment_effect_col in synthetic_preds_df.columns) cumlift = get_cumgain(synthetic_preds_df, outcome_col='y', treatment_col='w', treatment_effect_col='tau') auuc_df = pd.DataFrame(cumlift.columns) auuc_df.columns = ['Learner'] auuc_df['cum_gain_auuc'] = [auc(cumlift.index.values/100, cumlift[learner].values) for learner in cumlift.columns] auuc_df = auuc_df.sort_values('cum_gain_auuc', ascending=False) if plot: plot_gain(synthetic_preds_df, outcome_col=outcome_col, treatment_col=treatment_col, treatment_effect_col=treatment_effect_col) return auuc_df
[ "def", "get_synthetic_auuc", "(", "synthetic_preds", ",", "drop_learners", "=", "[", "]", ",", "outcome_col", "=", "'y'", ",", "treatment_col", "=", "'w'", ",", "treatment_effect_col", "=", "'tau'", ",", "plot", "=", "True", ")", ":", "synthetic_preds_df", "=", "synthetic_preds", ".", "copy", "(", ")", "generated_data", "=", "synthetic_preds_df", ".", "pop", "(", "KEY_GENERATED_DATA", ")", "synthetic_preds_df", "=", "pd", ".", "DataFrame", "(", "synthetic_preds_df", ")", "synthetic_preds_df", "=", "synthetic_preds_df", ".", "drop", "(", "drop_learners", ",", "axis", "=", "1", ")", "synthetic_preds_df", "[", "'y'", "]", "=", "generated_data", "[", "outcome_col", "]", "synthetic_preds_df", "[", "'w'", "]", "=", "generated_data", "[", "treatment_col", "]", "if", "treatment_effect_col", "in", "generated_data", ".", "keys", "(", ")", ":", "synthetic_preds_df", "[", "'tau'", "]", "=", "generated_data", "[", "treatment_effect_col", "]", "assert", "(", "(", "outcome_col", "in", "synthetic_preds_df", ".", "columns", ")", "and", "(", "treatment_col", "in", "synthetic_preds_df", ".", "columns", ")", "or", "treatment_effect_col", "in", "synthetic_preds_df", ".", "columns", ")", "cumlift", "=", "get_cumgain", "(", "synthetic_preds_df", ",", "outcome_col", "=", "'y'", ",", "treatment_col", "=", "'w'", ",", "treatment_effect_col", "=", "'tau'", ")", "auuc_df", "=", "pd", ".", "DataFrame", "(", "cumlift", ".", "columns", ")", "auuc_df", ".", "columns", "=", "[", "'Learner'", "]", "auuc_df", "[", "'cum_gain_auuc'", "]", "=", "[", "auc", "(", "cumlift", ".", "index", ".", "values", "/", "100", ",", "cumlift", "[", "learner", "]", ".", "values", ")", "for", "learner", "in", "cumlift", ".", "columns", "]", "auuc_df", "=", "auuc_df", ".", "sort_values", "(", "'cum_gain_auuc'", ",", "ascending", "=", "False", ")", "if", "plot", ":", "plot_gain", "(", "synthetic_preds_df", ",", "outcome_col", "=", "outcome_col", ",", "treatment_col", "=", "treatment_col", ",", "treatment_effect_col", "=", "treatment_effect_col", ")", "return", "auuc_df" ]
[ 457, 0 ]
[ 498, 18 ]
python
en
['en', 'la', 'en']
True
generate_key
(dict_data, daily=True)
generate key from a dictionary
generate key from a dictionary
def generate_key(dict_data, daily=True): """generate key from a dictionary""" cache_dict = copy.copy(dict_data) json_data = json.dumps(cache_dict) return hashlib.md5(json_data.encode('utf-8')).hexdigest()
[ "def", "generate_key", "(", "dict_data", ",", "daily", "=", "True", ")", ":", "cache_dict", "=", "copy", ".", "copy", "(", "dict_data", ")", "json_data", "=", "json", ".", "dumps", "(", "cache_dict", ")", "return", "hashlib", ".", "md5", "(", "json_data", ".", "encode", "(", "'utf-8'", ")", ")", ".", "hexdigest", "(", ")" ]
[ 48, 0 ]
[ 52, 61 ]
python
en
['en', 'en', 'en']
True
parse_location
(ip)
country_code = Column(String(10)) country_name = Column(String(10)) city = Column(String(10)) latitude = Column(Float) longitude = Column(Float)
country_code = Column(String(10)) country_name = Column(String(10)) city = Column(String(10)) latitude = Column(Float) longitude = Column(Float)
def parse_location(ip): data = {} """ country_code = Column(String(10)) country_name = Column(String(10)) city = Column(String(10)) latitude = Column(Float) longitude = Column(Float)""" keys = ["country_code", "country_name", "city", "latitude", "longitude"] try: url = f"https://geolocation-db.com/json/{ip}&position=true" res = requests.get(url) payload = res.json() for key in keys: data[key] = payload[key] except: pass finally: return data
[ "def", "parse_location", "(", "ip", ")", ":", "data", "=", "{", "}", "keys", "=", "[", "\"country_code\"", ",", "\"country_name\"", ",", "\"city\"", ",", "\"latitude\"", ",", "\"longitude\"", "]", "try", ":", "url", "=", "f\"https://geolocation-db.com/json/{ip}&position=true\"", "res", "=", "requests", ".", "get", "(", "url", ")", "payload", "=", "res", ".", "json", "(", ")", "for", "key", "in", "keys", ":", "data", "[", "key", "]", "=", "payload", "[", "key", "]", "except", ":", "pass", "finally", ":", "return", "data" ]
[ 109, 0 ]
[ 127, 19 ]
python
en
['en', 'error', 'th']
False
isprime
(n)
check if integer n is a prime
check if integer n is a prime
def isprime(n): # https://stackoverflow.com/questions/18833759/python-prime-number-checker """check if integer n is a prime""" # make sure n is a positive integer n = abs(int(n)) # 0 and 1 are not primes if n < 2: return False # 2 is the only even prime number if n == 2: return True # all other even numbers are not primes if not n & 1: return False # range starts with 3 and only needs to go up # the square root of n for all odd numbers for x in range(3, int(n ** 0.5) + 1, 2): if n % x == 0: return False return True
[ "def", "isprime", "(", "n", ")", ":", "# https://stackoverflow.com/questions/18833759/python-prime-number-checker", "# make sure n is a positive integer", "n", "=", "abs", "(", "int", "(", "n", ")", ")", "# 0 and 1 are not primes", "if", "n", "<", "2", ":", "return", "False", "# 2 is the only even prime number", "if", "n", "==", "2", ":", "return", "True", "# all other even numbers are not primes", "if", "not", "n", "&", "1", ":", "return", "False", "# range starts with 3 and only needs to go up", "# the square root of n for all odd numbers", "for", "x", "in", "range", "(", "3", ",", "int", "(", "n", "**", "0.5", ")", "+", "1", ",", "2", ")", ":", "if", "n", "%", "x", "==", "0", ":", "return", "False", "return", "True" ]
[ 35, 0 ]
[ 60, 15 ]
python
en
['en', 'en', 'en']
True
TestIO.test_read_parquet
(self)
This test is unusual, because on travis (but only on travis), we have observed problems importing pyarrow, which breaks this test (since it requires pyarrow available). The issue seems to be related to a binary compatibility issue with the installed/available version of numpy: pyarrow 0.10 requires numpy >= 1.14. Since pyarrow is not in our actual requirements, we are not going to adjust up the required numpy version.
This test is unusual, because on travis (but only on travis), we have observed problems importing pyarrow, which breaks this test (since it requires pyarrow available).
def test_read_parquet(self): """ This test is unusual, because on travis (but only on travis), we have observed problems importing pyarrow, which breaks this test (since it requires pyarrow available). The issue seems to be related to a binary compatibility issue with the installed/available version of numpy: pyarrow 0.10 requires numpy >= 1.14. Since pyarrow is not in our actual requirements, we are not going to adjust up the required numpy version. """ # Pass this test if the available version of pandas is less than 0.21.0, because prior # versions of pandas did not include the read_parquet function. pandas_version = re.match(r"(\d+)\.(\d+)\..+", pd.__version__) if pandas_version is None: raise ValueError("Unrecognized pandas version!") else: pandas_major_version = int(pandas_version.group(1)) pandas_minor_version = int(pandas_version.group(2)) if pandas_major_version == 0 and pandas_minor_version < 23: pytest.skip("Pandas version < 23 is no longer compatible with pyarrow") script_path = os.path.dirname(os.path.realpath(__file__)) df = ge.read_parquet(script_path + "/test_sets/Titanic.parquet") assert df["Name"][1] == "Allen, Miss Elisabeth Walton" assert isinstance(df, PandasDataset)
[ "def", "test_read_parquet", "(", "self", ")", ":", "# Pass this test if the available version of pandas is less than 0.21.0, because prior", "# versions of pandas did not include the read_parquet function.", "pandas_version", "=", "re", ".", "match", "(", "r\"(\\d+)\\.(\\d+)\\..+\"", ",", "pd", ".", "__version__", ")", "if", "pandas_version", "is", "None", ":", "raise", "ValueError", "(", "\"Unrecognized pandas version!\"", ")", "else", ":", "pandas_major_version", "=", "int", "(", "pandas_version", ".", "group", "(", "1", ")", ")", "pandas_minor_version", "=", "int", "(", "pandas_version", ".", "group", "(", "2", ")", ")", "if", "pandas_major_version", "==", "0", "and", "pandas_minor_version", "<", "23", ":", "pytest", ".", "skip", "(", "\"Pandas version < 23 is no longer compatible with pyarrow\"", ")", "script_path", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "realpath", "(", "__file__", ")", ")", "df", "=", "ge", ".", "read_parquet", "(", "script_path", "+", "\"/test_sets/Titanic.parquet\"", ")", "assert", "df", "[", "\"Name\"", "]", "[", "1", "]", "==", "\"Allen, Miss Elisabeth Walton\"", "assert", "isinstance", "(", "df", ",", "PandasDataset", ")" ]
[ 1067, 4 ]
[ 1092, 44 ]
python
en
['en', 'error', 'th']
False
PerfCost._run_configuration
( self, run_type: "PerfCost.RunType", settings: dict, invocations: int, repetitions: int, suffix: str = "", )
Cold experiment: schedule all invocations in parallel.
Cold experiment: schedule all invocations in parallel.
def _run_configuration( self, run_type: "PerfCost.RunType", settings: dict, invocations: int, repetitions: int, suffix: str = "", ): # Randomize starting value to ensure that it's not the same # as in the previous run. # Otherwise we could not change anything and containers won't be killed. from random import randrange self._deployment_client.cold_start_counter = randrange(100) """ Cold experiment: schedule all invocations in parallel. """ file_name = ( f"{run_type.str()}_results_{suffix}.json" if suffix else f"{run_type.str()}_results.json" ) self.logging.info(f"Begin {run_type.str()} experiments") incorrect_executions = [] error_executions = [] error_count = 0 incorrect_count = 0 colds_count = 0 with open(os.path.join(self._out_dir, file_name), "w") as out_f: samples_gathered = 0 client_times = [] with ThreadPool(invocations) as pool: result = ExperimentResult(self.config, self._deployment_client.config) result.begin() samples_generated = 0 # Warm up container # For "warm" runs, we do it automatically by pruning cold results if run_type == PerfCost.RunType.SEQUENTIAL: self._trigger.sync_invoke(self._benchmark_input) first_iteration = True while samples_gathered < repetitions: if run_type == PerfCost.RunType.COLD or run_type == PerfCost.RunType.BURST: self._deployment_client.enforce_cold_start( [self._function], self._benchmark ) time.sleep(5) results = [] for i in range(0, invocations): results.append( pool.apply_async( self._trigger.sync_invoke, args=(self._benchmark_input,) ) ) incorrect = [] for res in results: try: ret = res.get() if first_iteration: continue if (run_type == PerfCost.RunType.COLD and not ret.stats.cold_start) or ( run_type == PerfCost.RunType.WARM and ret.stats.cold_start ): self.logging.info( f"Invocation {ret.request_id} " f"cold: {ret.stats.cold_start} " f"on experiment {run_type.str()}!" ) incorrect.append(ret) else: result.add_invocation(self._function, ret) colds_count += ret.stats.cold_start client_times.append(ret.times.client / 1000.0) samples_gathered += 1 except Exception as e: error_count += 1 error_executions.append(str(e)) self.logging.info( f"Processed {samples_gathered} samples out of {repetitions}," f"{error_count} errors" ) samples_generated += invocations if first_iteration: self.logging.info( f"Processed {samples_gathered} warm-up samples, ignore results." ) first_iteration = False if len(incorrect) > 0: incorrect_executions.extend(incorrect) incorrect_count += len(incorrect) time.sleep(5) result.end() self.compute_statistics(client_times) out_f.write( serialize( { **json.loads(serialize(result)), "statistics": { "samples_generated": samples_gathered, "failures": error_executions, "failures_count": error_count, "incorrect": incorrect_executions, "incorrect_count": incorrect_count, "cold_count": colds_count, }, } ) )
[ "def", "_run_configuration", "(", "self", ",", "run_type", ":", "\"PerfCost.RunType\"", ",", "settings", ":", "dict", ",", "invocations", ":", "int", ",", "repetitions", ":", "int", ",", "suffix", ":", "str", "=", "\"\"", ",", ")", ":", "# Randomize starting value to ensure that it's not the same", "# as in the previous run.", "# Otherwise we could not change anything and containers won't be killed.", "from", "random", "import", "randrange", "self", ".", "_deployment_client", ".", "cold_start_counter", "=", "randrange", "(", "100", ")", "file_name", "=", "(", "f\"{run_type.str()}_results_{suffix}.json\"", "if", "suffix", "else", "f\"{run_type.str()}_results.json\"", ")", "self", ".", "logging", ".", "info", "(", "f\"Begin {run_type.str()} experiments\"", ")", "incorrect_executions", "=", "[", "]", "error_executions", "=", "[", "]", "error_count", "=", "0", "incorrect_count", "=", "0", "colds_count", "=", "0", "with", "open", "(", "os", ".", "path", ".", "join", "(", "self", ".", "_out_dir", ",", "file_name", ")", ",", "\"w\"", ")", "as", "out_f", ":", "samples_gathered", "=", "0", "client_times", "=", "[", "]", "with", "ThreadPool", "(", "invocations", ")", "as", "pool", ":", "result", "=", "ExperimentResult", "(", "self", ".", "config", ",", "self", ".", "_deployment_client", ".", "config", ")", "result", ".", "begin", "(", ")", "samples_generated", "=", "0", "# Warm up container", "# For \"warm\" runs, we do it automatically by pruning cold results", "if", "run_type", "==", "PerfCost", ".", "RunType", ".", "SEQUENTIAL", ":", "self", ".", "_trigger", ".", "sync_invoke", "(", "self", ".", "_benchmark_input", ")", "first_iteration", "=", "True", "while", "samples_gathered", "<", "repetitions", ":", "if", "run_type", "==", "PerfCost", ".", "RunType", ".", "COLD", "or", "run_type", "==", "PerfCost", ".", "RunType", ".", "BURST", ":", "self", ".", "_deployment_client", ".", "enforce_cold_start", "(", "[", "self", ".", "_function", "]", ",", "self", ".", "_benchmark", ")", "time", ".", "sleep", "(", "5", ")", "results", "=", "[", "]", "for", "i", "in", "range", "(", "0", ",", "invocations", ")", ":", "results", ".", "append", "(", "pool", ".", "apply_async", "(", "self", ".", "_trigger", ".", "sync_invoke", ",", "args", "=", "(", "self", ".", "_benchmark_input", ",", ")", ")", ")", "incorrect", "=", "[", "]", "for", "res", "in", "results", ":", "try", ":", "ret", "=", "res", ".", "get", "(", ")", "if", "first_iteration", ":", "continue", "if", "(", "run_type", "==", "PerfCost", ".", "RunType", ".", "COLD", "and", "not", "ret", ".", "stats", ".", "cold_start", ")", "or", "(", "run_type", "==", "PerfCost", ".", "RunType", ".", "WARM", "and", "ret", ".", "stats", ".", "cold_start", ")", ":", "self", ".", "logging", ".", "info", "(", "f\"Invocation {ret.request_id} \"", "f\"cold: {ret.stats.cold_start} \"", "f\"on experiment {run_type.str()}!\"", ")", "incorrect", ".", "append", "(", "ret", ")", "else", ":", "result", ".", "add_invocation", "(", "self", ".", "_function", ",", "ret", ")", "colds_count", "+=", "ret", ".", "stats", ".", "cold_start", "client_times", ".", "append", "(", "ret", ".", "times", ".", "client", "/", "1000.0", ")", "samples_gathered", "+=", "1", "except", "Exception", "as", "e", ":", "error_count", "+=", "1", "error_executions", ".", "append", "(", "str", "(", "e", ")", ")", "self", ".", "logging", ".", "info", "(", "f\"Processed {samples_gathered} samples out of {repetitions},\"", "f\"{error_count} errors\"", ")", "samples_generated", "+=", "invocations", "if", "first_iteration", ":", "self", ".", "logging", ".", "info", "(", "f\"Processed {samples_gathered} warm-up samples, ignore results.\"", ")", "first_iteration", "=", "False", "if", "len", "(", "incorrect", ")", ">", "0", ":", "incorrect_executions", ".", "extend", "(", "incorrect", ")", "incorrect_count", "+=", "len", "(", "incorrect", ")", "time", ".", "sleep", "(", "5", ")", "result", ".", "end", "(", ")", "self", ".", "compute_statistics", "(", "client_times", ")", "out_f", ".", "write", "(", "serialize", "(", "{", "*", "*", "json", ".", "loads", "(", "serialize", "(", "result", ")", ")", ",", "\"statistics\"", ":", "{", "\"samples_generated\"", ":", "samples_gathered", ",", "\"failures\"", ":", "error_executions", ",", "\"failures_count\"", ":", "error_count", ",", "\"incorrect\"", ":", "incorrect_executions", ",", "\"incorrect_count\"", ":", "incorrect_count", ",", "\"cold_count\"", ":", "colds_count", ",", "}", ",", "}", ")", ")" ]
[ 111, 4 ]
[ 229, 17 ]
python
en
['en', 'error', 'th']
False
UserSerializer.create
(self, validted_data)
Create a new user with encrypted password and return it
Create a new user with encrypted password and return it
def create(self, validted_data): """Create a new user with encrypted password and return it""" return get_user_model().objects.create_user(**validted_data)
[ "def", "create", "(", "self", ",", "validted_data", ")", ":", "return", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "*", "*", "validted_data", ")" ]
[ 13, 4 ]
[ 15, 68 ]
python
en
['en', 'en', 'en']
True
UserSerializer.update
(self, instance, validated_data)
Update a user, setting the password correctly and return it
Update a user, setting the password correctly and return it
def update(self, instance, validated_data): """Update a user, setting the password correctly and return it""" password = validated_data.pop('password', None) user = super().update(instance, validated_data) if password: user.set_password(password) user.save() return user
[ "def", "update", "(", "self", ",", "instance", ",", "validated_data", ")", ":", "password", "=", "validated_data", ".", "pop", "(", "'password'", ",", "None", ")", "user", "=", "super", "(", ")", ".", "update", "(", "instance", ",", "validated_data", ")", "if", "password", ":", "user", ".", "set_password", "(", "password", ")", "user", ".", "save", "(", ")", "return", "user" ]
[ 17, 4 ]
[ 26, 19 ]
python
en
['en', 'en', 'en']
True
AuthTokenSerializer.validate
(self, attrs)
Validate and authenticate the user
Validate and authenticate the user
def validate(self, attrs): """Validate and authenticate the user""" email = attrs.get('email') password = attrs.get('password') user = authenticate( request=self.context.get('request'), username=email, password=password ) if not user: msg = _('Unable to authenticate with provided credentials') raise serializers.ValidationError(msg, code='authentication') attrs['user'] = user return attrs
[ "def", "validate", "(", "self", ",", "attrs", ")", ":", "email", "=", "attrs", ".", "get", "(", "'email'", ")", "password", "=", "attrs", ".", "get", "(", "'password'", ")", "user", "=", "authenticate", "(", "request", "=", "self", ".", "context", ".", "get", "(", "'request'", ")", ",", "username", "=", "email", ",", "password", "=", "password", ")", "if", "not", "user", ":", "msg", "=", "_", "(", "'Unable to authenticate with provided credentials'", ")", "raise", "serializers", ".", "ValidationError", "(", "msg", ",", "code", "=", "'authentication'", ")", "attrs", "[", "'user'", "]", "=", "user", "return", "attrs" ]
[ 37, 4 ]
[ 52, 20 ]
python
en
['en', 'en', 'en']
True
CalculateGeneratorInputInfo
(params)
Calculate the generator specific info that gets fed to input (called by gyp).
Calculate the generator specific info that gets fed to input (called by gyp).
def CalculateGeneratorInputInfo(params): """Calculate the generator specific info that gets fed to input (called by gyp).""" generator_flags = params.get('generator_flags', {}) if generator_flags.get('adjust_static_libraries', False): global generator_wants_static_library_dependencies_adjusted generator_wants_static_library_dependencies_adjusted = True
[ "def", "CalculateGeneratorInputInfo", "(", "params", ")", ":", "generator_flags", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", "if", "generator_flags", ".", "get", "(", "'adjust_static_libraries'", ",", "False", ")", ":", "global", "generator_wants_static_library_dependencies_adjusted", "generator_wants_static_library_dependencies_adjusted", "=", "True" ]
[ 70, 0 ]
[ 76, 63 ]
python
en
['en', 'en', 'en']
True
GetAllIncludeDirectories
(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path)
Calculate the set of include directories to be used. Returns: A list including all the include_dir's specified for every target followed by any include directories that were added as cflag compiler options.
Calculate the set of include directories to be used.
def GetAllIncludeDirectories(target_list, target_dicts, shared_intermediate_dirs, config_name, params, compiler_path): """Calculate the set of include directories to be used. Returns: A list including all the include_dir's specified for every target followed by any include directories that were added as cflag compiler options. """ gyp_includes_set = set() compiler_includes_list = [] # Find compiler's default include dirs. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-xc++', '-v', '-']) proc = subprocess.Popen(args=command, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) output = proc.communicate()[1] # Extract the list of include dirs from the output, which has this format: # ... # #include "..." search starts here: # #include <...> search starts here: # /usr/include/c++/4.6 # /usr/local/include # End of search list. # ... in_include_list = False for line in output.splitlines(): if line.startswith('#include'): in_include_list = True continue if line.startswith('End of search list.'): break if in_include_list: include_dir = line.strip() if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if config_name in target['configurations']: config = target['configurations'][config_name] # Look for any include dirs that were explicitly added via cflags. This # may be done in gyp files to force certain includes to come at the end. # TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and # remove this. if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) cflags = msvs_settings.GetCflags(config_name) else: cflags = config['cflags'] for cflag in cflags: if cflag.startswith('-I'): include_dir = cflag[2:] if include_dir not in compiler_includes_list: compiler_includes_list.append(include_dir) # Find standard gyp include dirs. if config.has_key('include_dirs'): include_dirs = config['include_dirs'] for shared_intermediate_dir in shared_intermediate_dirs: for include_dir in include_dirs: include_dir = include_dir.replace('$SHARED_INTERMEDIATE_DIR', shared_intermediate_dir) if not os.path.isabs(include_dir): base_dir = os.path.dirname(target_name) include_dir = base_dir + '/' + include_dir include_dir = os.path.abspath(include_dir) gyp_includes_set.add(include_dir) # Generate a list that has all the include dirs. all_includes_list = list(gyp_includes_set) all_includes_list.sort() for compiler_include in compiler_includes_list: if not compiler_include in gyp_includes_set: all_includes_list.append(compiler_include) # All done. return all_includes_list
[ "def", "GetAllIncludeDirectories", "(", "target_list", ",", "target_dicts", ",", "shared_intermediate_dirs", ",", "config_name", ",", "params", ",", "compiler_path", ")", ":", "gyp_includes_set", "=", "set", "(", ")", "compiler_includes_list", "=", "[", "]", "# Find compiler's default include dirs.", "if", "compiler_path", ":", "command", "=", "shlex", ".", "split", "(", "compiler_path", ")", "command", ".", "extend", "(", "[", "'-E'", ",", "'-xc++'", ",", "'-v'", ",", "'-'", "]", ")", "proc", "=", "subprocess", ".", "Popen", "(", "args", "=", "command", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ")", "output", "=", "proc", ".", "communicate", "(", ")", "[", "1", "]", "# Extract the list of include dirs from the output, which has this format:", "# ...", "# #include \"...\" search starts here:", "# #include <...> search starts here:", "# /usr/include/c++/4.6", "# /usr/local/include", "# End of search list.", "# ...", "in_include_list", "=", "False", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "'#include'", ")", ":", "in_include_list", "=", "True", "continue", "if", "line", ".", "startswith", "(", "'End of search list.'", ")", ":", "break", "if", "in_include_list", ":", "include_dir", "=", "line", ".", "strip", "(", ")", "if", "include_dir", "not", "in", "compiler_includes_list", ":", "compiler_includes_list", ".", "append", "(", "include_dir", ")", "flavor", "=", "gyp", ".", "common", ".", "GetFlavor", "(", "params", ")", "if", "flavor", "==", "'win'", ":", "generator_flags", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", "for", "target_name", "in", "target_list", ":", "target", "=", "target_dicts", "[", "target_name", "]", "if", "config_name", "in", "target", "[", "'configurations'", "]", ":", "config", "=", "target", "[", "'configurations'", "]", "[", "config_name", "]", "# Look for any include dirs that were explicitly added via cflags. This", "# may be done in gyp files to force certain includes to come at the end.", "# TODO(jgreenwald): Change the gyp files to not abuse cflags for this, and", "# remove this.", "if", "flavor", "==", "'win'", ":", "msvs_settings", "=", "gyp", ".", "msvs_emulation", ".", "MsvsSettings", "(", "target", ",", "generator_flags", ")", "cflags", "=", "msvs_settings", ".", "GetCflags", "(", "config_name", ")", "else", ":", "cflags", "=", "config", "[", "'cflags'", "]", "for", "cflag", "in", "cflags", ":", "if", "cflag", ".", "startswith", "(", "'-I'", ")", ":", "include_dir", "=", "cflag", "[", "2", ":", "]", "if", "include_dir", "not", "in", "compiler_includes_list", ":", "compiler_includes_list", ".", "append", "(", "include_dir", ")", "# Find standard gyp include dirs.", "if", "config", ".", "has_key", "(", "'include_dirs'", ")", ":", "include_dirs", "=", "config", "[", "'include_dirs'", "]", "for", "shared_intermediate_dir", "in", "shared_intermediate_dirs", ":", "for", "include_dir", "in", "include_dirs", ":", "include_dir", "=", "include_dir", ".", "replace", "(", "'$SHARED_INTERMEDIATE_DIR'", ",", "shared_intermediate_dir", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "include_dir", ")", ":", "base_dir", "=", "os", ".", "path", ".", "dirname", "(", "target_name", ")", "include_dir", "=", "base_dir", "+", "'/'", "+", "include_dir", "include_dir", "=", "os", ".", "path", ".", "abspath", "(", "include_dir", ")", "gyp_includes_set", ".", "add", "(", "include_dir", ")", "# Generate a list that has all the include dirs.", "all_includes_list", "=", "list", "(", "gyp_includes_set", ")", "all_includes_list", ".", "sort", "(", ")", "for", "compiler_include", "in", "compiler_includes_list", ":", "if", "not", "compiler_include", "in", "gyp_includes_set", ":", "all_includes_list", ".", "append", "(", "compiler_include", ")", "# All done.", "return", "all_includes_list" ]
[ 79, 0 ]
[ 165, 26 ]
python
en
['en', 'en', 'en']
True
GetCompilerPath
(target_list, data, options)
Determine a command that can be used to invoke the compiler. Returns: If this is a gyp project that has explicit make settings, try to determine the compiler from that. Otherwise, see if a compiler was specified via the CC_target environment variable.
Determine a command that can be used to invoke the compiler.
def GetCompilerPath(target_list, data, options): """Determine a command that can be used to invoke the compiler. Returns: If this is a gyp project that has explicit make settings, try to determine the compiler from that. Otherwise, see if a compiler was specified via the CC_target environment variable. """ # First, see if the compiler is configured in make's settings. build_file, _, _ = gyp.common.ParseQualifiedTarget(target_list[0]) make_global_settings_dict = data[build_file].get('make_global_settings', {}) for key, value in make_global_settings_dict: if key in ['CC', 'CXX']: return os.path.join(options.toplevel_dir, value) # Check to see if the compiler was specified as an environment variable. for key in ['CC_target', 'CC', 'CXX']: compiler = os.environ.get(key) if compiler: return compiler return 'gcc'
[ "def", "GetCompilerPath", "(", "target_list", ",", "data", ",", "options", ")", ":", "# First, see if the compiler is configured in make's settings.", "build_file", ",", "_", ",", "_", "=", "gyp", ".", "common", ".", "ParseQualifiedTarget", "(", "target_list", "[", "0", "]", ")", "make_global_settings_dict", "=", "data", "[", "build_file", "]", ".", "get", "(", "'make_global_settings'", ",", "{", "}", ")", "for", "key", ",", "value", "in", "make_global_settings_dict", ":", "if", "key", "in", "[", "'CC'", ",", "'CXX'", "]", ":", "return", "os", ".", "path", ".", "join", "(", "options", ".", "toplevel_dir", ",", "value", ")", "# Check to see if the compiler was specified as an environment variable.", "for", "key", "in", "[", "'CC_target'", ",", "'CC'", ",", "'CXX'", "]", ":", "compiler", "=", "os", ".", "environ", ".", "get", "(", "key", ")", "if", "compiler", ":", "return", "compiler", "return", "'gcc'" ]
[ 168, 0 ]
[ 189, 14 ]
python
en
['en', 'en', 'en']
True
GetAllDefines
(target_list, target_dicts, data, config_name, params, compiler_path)
Calculate the defines for a project. Returns: A dict that includes explict defines declared in gyp files along with all of the default defines that the compiler uses.
Calculate the defines for a project.
def GetAllDefines(target_list, target_dicts, data, config_name, params, compiler_path): """Calculate the defines for a project. Returns: A dict that includes explict defines declared in gyp files along with all of the default defines that the compiler uses. """ # Get defines declared in the gyp files. all_defines = {} flavor = gyp.common.GetFlavor(params) if flavor == 'win': generator_flags = params.get('generator_flags', {}) for target_name in target_list: target = target_dicts[target_name] if flavor == 'win': msvs_settings = gyp.msvs_emulation.MsvsSettings(target, generator_flags) extra_defines = msvs_settings.GetComputedDefines(config_name) else: extra_defines = [] if config_name in target['configurations']: config = target['configurations'][config_name] target_defines = config['defines'] else: target_defines = [] for define in target_defines + extra_defines: split_define = define.split('=', 1) if len(split_define) == 1: split_define.append('1') if split_define[0].strip() in all_defines: # Already defined continue all_defines[split_define[0].strip()] = split_define[1].strip() # Get default compiler defines (if possible). if flavor == 'win': return all_defines # Default defines already processed in the loop above. if compiler_path: command = shlex.split(compiler_path) command.extend(['-E', '-dM', '-']) cpp_proc = subprocess.Popen(args=command, cwd='.', stdin=subprocess.PIPE, stdout=subprocess.PIPE) cpp_output = cpp_proc.communicate()[0] cpp_lines = cpp_output.split('\n') for cpp_line in cpp_lines: if not cpp_line.strip(): continue cpp_line_parts = cpp_line.split(' ', 2) key = cpp_line_parts[1] if len(cpp_line_parts) >= 3: val = cpp_line_parts[2] else: val = '1' all_defines[key] = val return all_defines
[ "def", "GetAllDefines", "(", "target_list", ",", "target_dicts", ",", "data", ",", "config_name", ",", "params", ",", "compiler_path", ")", ":", "# Get defines declared in the gyp files.", "all_defines", "=", "{", "}", "flavor", "=", "gyp", ".", "common", ".", "GetFlavor", "(", "params", ")", "if", "flavor", "==", "'win'", ":", "generator_flags", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", "for", "target_name", "in", "target_list", ":", "target", "=", "target_dicts", "[", "target_name", "]", "if", "flavor", "==", "'win'", ":", "msvs_settings", "=", "gyp", ".", "msvs_emulation", ".", "MsvsSettings", "(", "target", ",", "generator_flags", ")", "extra_defines", "=", "msvs_settings", ".", "GetComputedDefines", "(", "config_name", ")", "else", ":", "extra_defines", "=", "[", "]", "if", "config_name", "in", "target", "[", "'configurations'", "]", ":", "config", "=", "target", "[", "'configurations'", "]", "[", "config_name", "]", "target_defines", "=", "config", "[", "'defines'", "]", "else", ":", "target_defines", "=", "[", "]", "for", "define", "in", "target_defines", "+", "extra_defines", ":", "split_define", "=", "define", ".", "split", "(", "'='", ",", "1", ")", "if", "len", "(", "split_define", ")", "==", "1", ":", "split_define", ".", "append", "(", "'1'", ")", "if", "split_define", "[", "0", "]", ".", "strip", "(", ")", "in", "all_defines", ":", "# Already defined", "continue", "all_defines", "[", "split_define", "[", "0", "]", ".", "strip", "(", ")", "]", "=", "split_define", "[", "1", "]", ".", "strip", "(", ")", "# Get default compiler defines (if possible).", "if", "flavor", "==", "'win'", ":", "return", "all_defines", "# Default defines already processed in the loop above.", "if", "compiler_path", ":", "command", "=", "shlex", ".", "split", "(", "compiler_path", ")", "command", ".", "extend", "(", "[", "'-E'", ",", "'-dM'", ",", "'-'", "]", ")", "cpp_proc", "=", "subprocess", ".", "Popen", "(", "args", "=", "command", ",", "cwd", "=", "'.'", ",", "stdin", "=", "subprocess", ".", "PIPE", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "cpp_output", "=", "cpp_proc", ".", "communicate", "(", ")", "[", "0", "]", "cpp_lines", "=", "cpp_output", ".", "split", "(", "'\\n'", ")", "for", "cpp_line", "in", "cpp_lines", ":", "if", "not", "cpp_line", ".", "strip", "(", ")", ":", "continue", "cpp_line_parts", "=", "cpp_line", ".", "split", "(", "' '", ",", "2", ")", "key", "=", "cpp_line_parts", "[", "1", "]", "if", "len", "(", "cpp_line_parts", ")", ">=", "3", ":", "val", "=", "cpp_line_parts", "[", "2", "]", "else", ":", "val", "=", "'1'", "all_defines", "[", "key", "]", "=", "val", "return", "all_defines" ]
[ 192, 0 ]
[ 248, 20 ]
python
en
['en', 'en', 'en']
True
WriteIncludePaths
(out, eclipse_langs, include_dirs)
Write the includes section of a CDT settings export file.
Write the includes section of a CDT settings export file.
def WriteIncludePaths(out, eclipse_langs, include_dirs): """Write the includes section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.IncludePaths">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for include_dir in include_dirs: out.write(' <includepath workspace_path="false">%s</includepath>\n' % include_dir) out.write(' </language>\n') out.write(' </section>\n')
[ "def", "WriteIncludePaths", "(", "out", ",", "eclipse_langs", ",", "include_dirs", ")", ":", "out", ".", "write", "(", "' <section name=\"org.eclipse.cdt.internal.ui.wizards.'", "'settingswizards.IncludePaths\">\\n'", ")", "out", ".", "write", "(", "' <language name=\"holder for library settings\"></language>\\n'", ")", "for", "lang", "in", "eclipse_langs", ":", "out", ".", "write", "(", "' <language name=\"%s\">\\n'", "%", "lang", ")", "for", "include_dir", "in", "include_dirs", ":", "out", ".", "write", "(", "' <includepath workspace_path=\"false\">%s</includepath>\\n'", "%", "include_dir", ")", "out", ".", "write", "(", "' </language>\\n'", ")", "out", ".", "write", "(", "' </section>\\n'", ")" ]
[ 251, 0 ]
[ 263, 29 ]
python
en
['en', 'en', 'en']
True
WriteMacros
(out, eclipse_langs, defines)
Write the macros section of a CDT settings export file.
Write the macros section of a CDT settings export file.
def WriteMacros(out, eclipse_langs, defines): """Write the macros section of a CDT settings export file.""" out.write(' <section name="org.eclipse.cdt.internal.ui.wizards.' \ 'settingswizards.Macros">\n') out.write(' <language name="holder for library settings"></language>\n') for lang in eclipse_langs: out.write(' <language name="%s">\n' % lang) for key in sorted(defines.iterkeys()): out.write(' <macro><name>%s</name><value>%s</value></macro>\n' % (escape(key), escape(defines[key]))) out.write(' </language>\n') out.write(' </section>\n')
[ "def", "WriteMacros", "(", "out", ",", "eclipse_langs", ",", "defines", ")", ":", "out", ".", "write", "(", "' <section name=\"org.eclipse.cdt.internal.ui.wizards.'", "'settingswizards.Macros\">\\n'", ")", "out", ".", "write", "(", "' <language name=\"holder for library settings\"></language>\\n'", ")", "for", "lang", "in", "eclipse_langs", ":", "out", ".", "write", "(", "' <language name=\"%s\">\\n'", "%", "lang", ")", "for", "key", "in", "sorted", "(", "defines", ".", "iterkeys", "(", ")", ")", ":", "out", ".", "write", "(", "' <macro><name>%s</name><value>%s</value></macro>\\n'", "%", "(", "escape", "(", "key", ")", ",", "escape", "(", "defines", "[", "key", "]", ")", ")", ")", "out", ".", "write", "(", "' </language>\\n'", ")", "out", ".", "write", "(", "' </section>\\n'", ")" ]
[ 266, 0 ]
[ 278, 29 ]
python
en
['en', 'en', 'en']
True
GenerateClasspathFile
(target_list, target_dicts, toplevel_dir, toplevel_build, out_name)
Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.
Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.
def GenerateClasspathFile(target_list, target_dicts, toplevel_dir, toplevel_build, out_name): '''Generates a classpath file suitable for symbol navigation and code completion of Java code (such as in Android projects) by finding all .java and .jar files used as action inputs.''' gyp.common.EnsureDirExists(out_name) result = ET.Element('classpath') def AddElements(kind, paths): # First, we need to normalize the paths so they are all relative to the # toplevel dir. rel_paths = set() for path in paths: if os.path.isabs(path): rel_paths.add(os.path.relpath(path, toplevel_dir)) else: rel_paths.add(path) for path in sorted(rel_paths): entry_element = ET.SubElement(result, 'classpathentry') entry_element.set('kind', kind) entry_element.set('path', path) AddElements('lib', GetJavaJars(target_list, target_dicts, toplevel_dir)) AddElements('src', GetJavaSourceDirs(target_list, target_dicts, toplevel_dir)) # Include the standard JRE container and a dummy out folder AddElements('con', ['org.eclipse.jdt.launching.JRE_CONTAINER']) # Include a dummy out folder so that Eclipse doesn't use the default /bin # folder in the root of the project. AddElements('output', [os.path.join(toplevel_build, '.eclipse-java-build')]) ET.ElementTree(result).write(out_name)
[ "def", "GenerateClasspathFile", "(", "target_list", ",", "target_dicts", ",", "toplevel_dir", ",", "toplevel_build", ",", "out_name", ")", ":", "gyp", ".", "common", ".", "EnsureDirExists", "(", "out_name", ")", "result", "=", "ET", ".", "Element", "(", "'classpath'", ")", "def", "AddElements", "(", "kind", ",", "paths", ")", ":", "# First, we need to normalize the paths so they are all relative to the", "# toplevel dir.", "rel_paths", "=", "set", "(", ")", "for", "path", "in", "paths", ":", "if", "os", ".", "path", ".", "isabs", "(", "path", ")", ":", "rel_paths", ".", "add", "(", "os", ".", "path", ".", "relpath", "(", "path", ",", "toplevel_dir", ")", ")", "else", ":", "rel_paths", ".", "add", "(", "path", ")", "for", "path", "in", "sorted", "(", "rel_paths", ")", ":", "entry_element", "=", "ET", ".", "SubElement", "(", "result", ",", "'classpathentry'", ")", "entry_element", ".", "set", "(", "'kind'", ",", "kind", ")", "entry_element", ".", "set", "(", "'path'", ",", "path", ")", "AddElements", "(", "'lib'", ",", "GetJavaJars", "(", "target_list", ",", "target_dicts", ",", "toplevel_dir", ")", ")", "AddElements", "(", "'src'", ",", "GetJavaSourceDirs", "(", "target_list", ",", "target_dicts", ",", "toplevel_dir", ")", ")", "# Include the standard JRE container and a dummy out folder", "AddElements", "(", "'con'", ",", "[", "'org.eclipse.jdt.launching.JRE_CONTAINER'", "]", ")", "# Include a dummy out folder so that Eclipse doesn't use the default /bin", "# folder in the root of the project.", "AddElements", "(", "'output'", ",", "[", "os", ".", "path", ".", "join", "(", "toplevel_build", ",", "'.eclipse-java-build'", ")", "]", ")", "ET", ".", "ElementTree", "(", "result", ")", ".", "write", "(", "out_name", ")" ]
[ 336, 0 ]
[ 367, 40 ]
python
en
['en', 'en', 'en']
True
GetJavaJars
(target_list, target_dicts, toplevel_dir)
Generates a sequence of all .jars used as inputs.
Generates a sequence of all .jars used as inputs.
def GetJavaJars(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all .jars used as inputs.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if os.path.splitext(input_)[1] == '.jar' and not input_.startswith('$'): if os.path.isabs(input_): yield input_ else: yield os.path.join(os.path.dirname(target_name), input_)
[ "def", "GetJavaJars", "(", "target_list", ",", "target_dicts", ",", "toplevel_dir", ")", ":", "for", "target_name", "in", "target_list", ":", "target", "=", "target_dicts", "[", "target_name", "]", "for", "action", "in", "target", ".", "get", "(", "'actions'", ",", "[", "]", ")", ":", "for", "input_", "in", "action", "[", "'inputs'", "]", ":", "if", "os", ".", "path", ".", "splitext", "(", "input_", ")", "[", "1", "]", "==", "'.jar'", "and", "not", "input_", ".", "startswith", "(", "'$'", ")", ":", "if", "os", ".", "path", ".", "isabs", "(", "input_", ")", ":", "yield", "input_", "else", ":", "yield", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "target_name", ")", ",", "input_", ")" ]
[ 370, 0 ]
[ 380, 68 ]
python
en
['en', 'en', 'en']
True
GetJavaSourceDirs
(target_list, target_dicts, toplevel_dir)
Generates a sequence of all likely java package root directories.
Generates a sequence of all likely java package root directories.
def GetJavaSourceDirs(target_list, target_dicts, toplevel_dir): '''Generates a sequence of all likely java package root directories.''' for target_name in target_list: target = target_dicts[target_name] for action in target.get('actions', []): for input_ in action['inputs']: if (os.path.splitext(input_)[1] == '.java' and not input_.startswith('$')): dir_ = os.path.dirname(os.path.join(os.path.dirname(target_name), input_)) # If there is a parent 'src' or 'java' folder, navigate up to it - # these are canonical package root names in Chromium. This will # break if 'src' or 'java' exists in the package structure. This # could be further improved by inspecting the java file for the # package name if this proves to be too fragile in practice. parent_search = dir_ while os.path.basename(parent_search) not in ['src', 'java']: parent_search, _ = os.path.split(parent_search) if not parent_search or parent_search == toplevel_dir: # Didn't find a known root, just return the original path yield dir_ break else: yield parent_search
[ "def", "GetJavaSourceDirs", "(", "target_list", ",", "target_dicts", ",", "toplevel_dir", ")", ":", "for", "target_name", "in", "target_list", ":", "target", "=", "target_dicts", "[", "target_name", "]", "for", "action", "in", "target", ".", "get", "(", "'actions'", ",", "[", "]", ")", ":", "for", "input_", "in", "action", "[", "'inputs'", "]", ":", "if", "(", "os", ".", "path", ".", "splitext", "(", "input_", ")", "[", "1", "]", "==", "'.java'", "and", "not", "input_", ".", "startswith", "(", "'$'", ")", ")", ":", "dir_", "=", "os", ".", "path", ".", "dirname", "(", "os", ".", "path", ".", "join", "(", "os", ".", "path", ".", "dirname", "(", "target_name", ")", ",", "input_", ")", ")", "# If there is a parent 'src' or 'java' folder, navigate up to it -", "# these are canonical package root names in Chromium. This will", "# break if 'src' or 'java' exists in the package structure. This", "# could be further improved by inspecting the java file for the", "# package name if this proves to be too fragile in practice.", "parent_search", "=", "dir_", "while", "os", ".", "path", ".", "basename", "(", "parent_search", ")", "not", "in", "[", "'src'", ",", "'java'", "]", ":", "parent_search", ",", "_", "=", "os", ".", "path", ".", "split", "(", "parent_search", ")", "if", "not", "parent_search", "or", "parent_search", "==", "toplevel_dir", ":", "# Didn't find a known root, just return the original path", "yield", "dir_", "break", "else", ":", "yield", "parent_search" ]
[ 383, 0 ]
[ 406, 31 ]
python
en
['en', 'en', 'en']
True
GenerateOutput
(target_list, target_dicts, data, params)
Generate an XML settings file that can be imported into a CDT project.
Generate an XML settings file that can be imported into a CDT project.
def GenerateOutput(target_list, target_dicts, data, params): """Generate an XML settings file that can be imported into a CDT project.""" if params['options'].generator_output: raise NotImplementedError("--generator_output not implemented for eclipse") user_config = params.get('generator_flags', {}).get('config', None) if user_config: GenerateOutputForConfig(target_list, target_dicts, data, params, user_config) else: config_names = target_dicts[target_list[0]]['configurations'].keys() for config_name in config_names: GenerateOutputForConfig(target_list, target_dicts, data, params, config_name)
[ "def", "GenerateOutput", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ")", ":", "if", "params", "[", "'options'", "]", ".", "generator_output", ":", "raise", "NotImplementedError", "(", "\"--generator_output not implemented for eclipse\"", ")", "user_config", "=", "params", ".", "get", "(", "'generator_flags'", ",", "{", "}", ")", ".", "get", "(", "'config'", ",", "None", ")", "if", "user_config", ":", "GenerateOutputForConfig", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ",", "user_config", ")", "else", ":", "config_names", "=", "target_dicts", "[", "target_list", "[", "0", "]", "]", "[", "'configurations'", "]", ".", "keys", "(", ")", "for", "config_name", "in", "config_names", ":", "GenerateOutputForConfig", "(", "target_list", ",", "target_dicts", ",", "data", ",", "params", ",", "config_name", ")" ]
[ 409, 0 ]
[ 423, 42 ]
python
en
['en', 'en', 'en']
True
Scanner.__init__
(self, text, flags=0)
:param text: The text which should be scanned :param flags: default regular expression flags
:param text: The text which should be scanned :param flags: default regular expression flags
def __init__(self, text, flags=0): """ :param text: The text which should be scanned :param flags: default regular expression flags """ self.data = text self.data_length = len(text) self.start_pos = 0 self.pos = 0 self.flags = flags self.last = None self.match = None self._re_cache = {}
[ "def", "__init__", "(", "self", ",", "text", ",", "flags", "=", "0", ")", ":", "self", ".", "data", "=", "text", "self", ".", "data_length", "=", "len", "(", "text", ")", "self", ".", "start_pos", "=", "0", "self", ".", "pos", "=", "0", "self", ".", "flags", "=", "flags", "self", ".", "last", "=", "None", "self", ".", "match", "=", "None", "self", ".", "_re_cache", "=", "{", "}" ]
[ 35, 4 ]
[ 47, 27 ]
python
en
['en', 'error', 'th']
False
Scanner.eos
(self)
`True` if the scanner reached the end of text.
`True` if the scanner reached the end of text.
def eos(self): """`True` if the scanner reached the end of text.""" return self.pos >= self.data_length
[ "def", "eos", "(", "self", ")", ":", "return", "self", ".", "pos", ">=", "self", ".", "data_length" ]
[ 49, 4 ]
[ 51, 43 ]
python
en
['en', 'en', 'en']
True
Scanner.check
(self, pattern)
Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead.
Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead.
def check(self, pattern): """ Apply `pattern` on the current position and return the match object. (Doesn't touch pos). Use this for lookahead. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) return self._re_cache[pattern].match(self.data, self.pos)
[ "def", "check", "(", "self", ",", "pattern", ")", ":", "if", "self", ".", "eos", ":", "raise", "EndOfText", "(", ")", "if", "pattern", "not", "in", "self", ".", "_re_cache", ":", "self", ".", "_re_cache", "[", "pattern", "]", "=", "re", ".", "compile", "(", "pattern", ",", "self", ".", "flags", ")", "return", "self", ".", "_re_cache", "[", "pattern", "]", ".", "match", "(", "self", ".", "data", ",", "self", ".", "pos", ")" ]
[ 54, 4 ]
[ 64, 65 ]
python
en
['en', 'error', 'th']
False
Scanner.test
(self, pattern)
Apply a pattern on the current position and check if it patches. Doesn't touch pos.
Apply a pattern on the current position and check if it patches. Doesn't touch pos.
def test(self, pattern): """Apply a pattern on the current position and check if it patches. Doesn't touch pos.""" return self.check(pattern) is not None
[ "def", "test", "(", "self", ",", "pattern", ")", ":", "return", "self", ".", "check", "(", "pattern", ")", "is", "not", "None" ]
[ 66, 4 ]
[ 69, 46 ]
python
en
['en', 'en', 'en']
True
Scanner.scan
(self, pattern)
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position.
Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position.
def scan(self, pattern): """ Scan the text for the given pattern and update pos/match and related fields. The return value is a boolen that indicates if the pattern matched. The matched value is stored on the instance as ``match``, the last value is stored as ``last``. ``start_pos`` is the position of the pointer before the pattern was matched, ``pos`` is the end position. """ if self.eos: raise EndOfText() if pattern not in self._re_cache: self._re_cache[pattern] = re.compile(pattern, self.flags) self.last = self.match m = self._re_cache[pattern].match(self.data, self.pos) if m is None: return False self.start_pos = m.start() self.pos = m.end() self.match = m.group() return True
[ "def", "scan", "(", "self", ",", "pattern", ")", ":", "if", "self", ".", "eos", ":", "raise", "EndOfText", "(", ")", "if", "pattern", "not", "in", "self", ".", "_re_cache", ":", "self", ".", "_re_cache", "[", "pattern", "]", "=", "re", ".", "compile", "(", "pattern", ",", "self", ".", "flags", ")", "self", ".", "last", "=", "self", ".", "match", "m", "=", "self", ".", "_re_cache", "[", "pattern", "]", ".", "match", "(", "self", ".", "data", ",", "self", ".", "pos", ")", "if", "m", "is", "None", ":", "return", "False", "self", ".", "start_pos", "=", "m", ".", "start", "(", ")", "self", ".", "pos", "=", "m", ".", "end", "(", ")", "self", ".", "match", "=", "m", ".", "group", "(", ")", "return", "True" ]
[ 71, 4 ]
[ 92, 19 ]
python
en
['en', 'error', 'th']
False
Scanner.get_char
(self)
Scan exactly one char.
Scan exactly one char.
def get_char(self): """Scan exactly one char.""" self.scan('.')
[ "def", "get_char", "(", "self", ")", ":", "self", ".", "scan", "(", "'.'", ")" ]
[ 94, 4 ]
[ 96, 22 ]
python
en
['en', 'en', 'en']
True
make_pipe
()
Makes a new pair of pipes.
Makes a new pair of pipes.
async def make_pipe() -> "Tuple[PipeSendStream, PipeReceiveStream]": """Makes a new pair of pipes.""" (r, w) = pipe() return PipeSendStream(w), PipeReceiveStream(r)
[ "async", "def", "make_pipe", "(", ")", "->", "\"Tuple[PipeSendStream, PipeReceiveStream]\"", ":", "(", "r", ",", "w", ")", "=", "pipe", "(", ")", "return", "PipeSendStream", "(", "w", ")", ",", "PipeReceiveStream", "(", "r", ")" ]
[ 22, 0 ]
[ 25, 50 ]
python
en
['en', 'en', 'en']
True
run
(args: List[str])
run is like "subprocess.run(args)", but with helpful settings and obeys "with capture_output(out)".
run is like "subprocess.run(args)", but with helpful settings and obeys "with capture_output(out)".
def run(args: List[str]) -> None: """run is like "subprocess.run(args)", but with helpful settings and obeys "with capture_output(out)". """ if _capturing: try: subprocess.run(args, check=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, text=True) except subprocess.CalledProcessError as err: raise Exception(f"{err.stdout}{err}") from err else: subprocess.run(args, check=True)
[ "def", "run", "(", "args", ":", "List", "[", "str", "]", ")", "->", "None", ":", "if", "_capturing", ":", "try", ":", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "STDOUT", ",", "text", "=", "True", ")", "except", "subprocess", ".", "CalledProcessError", "as", "err", ":", "raise", "Exception", "(", "f\"{err.stdout}{err}\"", ")", "from", "err", "else", ":", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ")" ]
[ 43, 0 ]
[ 53, 40 ]
python
en
['en', 'en', 'en']
True
run_bincapture
(args: List[str])
run is like "subprocess.run(args, capture_out=True, text=False)", but with helpful settings and obeys "with capture_output(out)".
run is like "subprocess.run(args, capture_out=True, text=False)", but with helpful settings and obeys "with capture_output(out)".
def run_bincapture(args: List[str]) -> bytes: """run is like "subprocess.run(args, capture_out=True, text=False)", but with helpful settings and obeys "with capture_output(out)". """ if _capturing: try: return subprocess.run(args, check=True, capture_output=True).stdout except subprocess.CalledProcessError as err: raise Exception(f"{err.stderr.decode('UTF-8')}{err}") from err else: return subprocess.run(args, check=True, stdout=subprocess.PIPE).stdout
[ "def", "run_bincapture", "(", "args", ":", "List", "[", "str", "]", ")", "->", "bytes", ":", "if", "_capturing", ":", "try", ":", "return", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ",", "capture_output", "=", "True", ")", ".", "stdout", "except", "subprocess", ".", "CalledProcessError", "as", "err", ":", "raise", "Exception", "(", "f\"{err.stderr.decode('UTF-8')}{err}\"", ")", "from", "err", "else", ":", "return", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", ".", "stdout" ]
[ 56, 0 ]
[ 66, 78 ]
python
en
['en', 'en', 'en']
True
run_txtcapture
(args: List[str])
run is like "subprocess.run(args, capture_out=True, text=true)", but with helpful settings and obeys "with capture_output(out)".
run is like "subprocess.run(args, capture_out=True, text=true)", but with helpful settings and obeys "with capture_output(out)".
def run_txtcapture(args: List[str]) -> str: """run is like "subprocess.run(args, capture_out=True, text=true)", but with helpful settings and obeys "with capture_output(out)". """ if _capturing: try: out = subprocess.run(args, check=True, capture_output=True, text=True).stdout except subprocess.CalledProcessError as err: raise Exception(f"{err.stderr}{err}") from err else: out = subprocess.run(args, check=True, stdout=subprocess.PIPE, text=True).stdout if out.endswith("\n"): out = out[:-1] return out
[ "def", "run_txtcapture", "(", "args", ":", "List", "[", "str", "]", ")", "->", "str", ":", "if", "_capturing", ":", "try", ":", "out", "=", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ",", "capture_output", "=", "True", ",", "text", "=", "True", ")", ".", "stdout", "except", "subprocess", ".", "CalledProcessError", "as", "err", ":", "raise", "Exception", "(", "f\"{err.stderr}{err}\"", ")", "from", "err", "else", ":", "out", "=", "subprocess", ".", "run", "(", "args", ",", "check", "=", "True", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "text", "=", "True", ")", ".", "stdout", "if", "out", ".", "endswith", "(", "\"\\n\"", ")", ":", "out", "=", "out", "[", ":", "-", "1", "]", "return", "out" ]
[ 69, 0 ]
[ 82, 14 ]
python
en
['en', 'en', 'en']
True
capture_output
(log: io.StringIO)
capture_output is like contextlib.redirect_stdout but also redirects stderr, and also does some extra stuff so that we can have run/run_bincapture/run_txtcapture functions that obey it.
capture_output is like contextlib.redirect_stdout but also redirects stderr, and also does some extra stuff so that we can have run/run_bincapture/run_txtcapture functions that obey it.
def capture_output(log: io.StringIO) -> Generator[None, None, None]: """capture_output is like contextlib.redirect_stdout but also redirects stderr, and also does some extra stuff so that we can have run/run_bincapture/run_txtcapture functions that obey it. """ global _capturing saved_capturing = _capturing saved_stdout = sys.stdout saved_stderr = sys.stderr _capturing = True sys.stdout = sys.stderr = log try: yield finally: _capturing = saved_capturing sys.stdout = saved_stdout sys.stderr = saved_stderr
[ "def", "capture_output", "(", "log", ":", "io", ".", "StringIO", ")", "->", "Generator", "[", "None", ",", "None", ",", "None", "]", ":", "global", "_capturing", "saved_capturing", "=", "_capturing", "saved_stdout", "=", "sys", ".", "stdout", "saved_stderr", "=", "sys", ".", "stderr", "_capturing", "=", "True", "sys", ".", "stdout", "=", "sys", ".", "stderr", "=", "log", "try", ":", "yield", "finally", ":", "_capturing", "=", "saved_capturing", "sys", ".", "stdout", "=", "saved_stdout", "sys", ".", "stderr", "=", "saved_stderr" ]
[ 86, 0 ]
[ 104, 33 ]
python
en
['en', 'en', 'en']
True
_lex_char_or_cs
(text: str)
Look atthe beginning of the given text and trim either a byte, or an ANSI control sequence from the beginning, returning a tuple ("char-or-cs", "remaining-text"). If it looks like the text is a truncated control seqence, then it doesn't trim anything, and returns ("", "original"); signaling that it needs to wait for more input before successfully lexing anything.
Look atthe beginning of the given text and trim either a byte, or an ANSI control sequence from the beginning, returning a tuple ("char-or-cs", "remaining-text"). If it looks like the text is a truncated control seqence, then it doesn't trim anything, and returns ("", "original"); signaling that it needs to wait for more input before successfully lexing anything.
def _lex_char_or_cs(text: str) -> Tuple[str, str]: """Look atthe beginning of the given text and trim either a byte, or an ANSI control sequence from the beginning, returning a tuple ("char-or-cs", "remaining-text"). If it looks like the text is a truncated control seqence, then it doesn't trim anything, and returns ("", "original"); signaling that it needs to wait for more input before successfully lexing anything. """ if text == '\033': # wait to see if this is a control sequence return '', text i = 1 if text.startswith('\033['): try: i = len('\033[') while text[i] not in string.ascii_letters: i += 1 i += 1 except IndexError: # wait for a complete control sequence return '', text return text[:i], text[i:]
[ "def", "_lex_char_or_cs", "(", "text", ":", "str", ")", "->", "Tuple", "[", "str", ",", "str", "]", ":", "if", "text", "==", "'\\033'", ":", "# wait to see if this is a control sequence", "return", "''", ",", "text", "i", "=", "1", "if", "text", ".", "startswith", "(", "'\\033['", ")", ":", "try", ":", "i", "=", "len", "(", "'\\033['", ")", "while", "text", "[", "i", "]", "not", "in", "string", ".", "ascii_letters", ":", "i", "+=", "1", "i", "+=", "1", "except", "IndexError", ":", "# wait for a complete control sequence", "return", "''", ",", "text", "return", "text", "[", ":", "i", "]", ",", "text", "[", "i", ":", "]" ]
[ 107, 0 ]
[ 128, 29 ]
python
en
['en', 'en', 'en']
True
Indent.__init__
(self, indent: str = "", output: Optional[TextIO] = None, columns: Optional[int] = None)
Arguments: indent: str: The string to indent with. output: Optional[TextIO]: A TextIO to write to, instead of building an in-memory buffer. columns: Optional[int]: How wide the terminal is; this is imporant because a line wrap needs to trigger an indent. If not given, then 'output.columns' is used if 'output' is set and has a 'columns' attribute, otherwise shutil.get_terminal_size() is used. Use a value <= 0 to explicitly disable wrapping. The 'columns' attribute on the resulting object is set to the number of usable colums; "arg_columns - len(indent)". This allows Indent objects to be nested. Indent understands "\r" and "\n", but not "\t" or ANSI control sequences that move the cursor; it assumes that all ANSI control sequences do not move the cursor.
Arguments: indent: str: The string to indent with. output: Optional[TextIO]: A TextIO to write to, instead of building an in-memory buffer. columns: Optional[int]: How wide the terminal is; this is imporant because a line wrap needs to trigger an indent. If not given, then 'output.columns' is used if 'output' is set and has a 'columns' attribute, otherwise shutil.get_terminal_size() is used. Use a value <= 0 to explicitly disable wrapping. The 'columns' attribute on the resulting object is set to the number of usable colums; "arg_columns - len(indent)". This allows Indent objects to be nested. Indent understands "\r" and "\n", but not "\t" or ANSI control sequences that move the cursor; it assumes that all ANSI control sequences do not move the cursor.
def __init__(self, indent: str = "", output: Optional[TextIO] = None, columns: Optional[int] = None) -> None: """Arguments: indent: str: The string to indent with. output: Optional[TextIO]: A TextIO to write to, instead of building an in-memory buffer. columns: Optional[int]: How wide the terminal is; this is imporant because a line wrap needs to trigger an indent. If not given, then 'output.columns' is used if 'output' is set and has a 'columns' attribute, otherwise shutil.get_terminal_size() is used. Use a value <= 0 to explicitly disable wrapping. The 'columns' attribute on the resulting object is set to the number of usable colums; "arg_columns - len(indent)". This allows Indent objects to be nested. Indent understands "\r" and "\n", but not "\t" or ANSI control sequences that move the cursor; it assumes that all ANSI control sequences do not move the cursor. """ super().__init__() self._indent = indent self._output = output if columns is None: if output and hasattr(output, 'columns'): columns = output.columns # type: ignore else: columns = shutil.get_terminal_size().columns self.columns = columns - len(self._indent)
[ "def", "__init__", "(", "self", ",", "indent", ":", "str", "=", "\"\"", ",", "output", ":", "Optional", "[", "TextIO", "]", "=", "None", ",", "columns", ":", "Optional", "[", "int", "]", "=", "None", ")", "->", "None", ":", "super", "(", ")", ".", "__init__", "(", ")", "self", ".", "_indent", "=", "indent", "self", ".", "_output", "=", "output", "if", "columns", "is", "None", ":", "if", "output", "and", "hasattr", "(", "output", ",", "'columns'", ")", ":", "columns", "=", "output", ".", "columns", "# type: ignore", "else", ":", "columns", "=", "shutil", ".", "get_terminal_size", "(", ")", ".", "columns", "self", ".", "columns", "=", "columns", "-", "len", "(", "self", ".", "_indent", ")" ]
[ 135, 4 ]
[ 163, 50 ]
python
en
['en', 'fr', 'en']
False
Indent.input
(self)
Use "myindent.input()" instead of "input()" in order to nest well with LineTrackers.
Use "myindent.input()" instead of "input()" in order to nest well with LineTrackers.
def input(self) -> str: """Use "myindent.input()" instead of "input()" in order to nest well with LineTrackers. """ if hasattr(self._output, 'input'): text: str = self._output.input() # type: ignore else: text = input() return text
[ "def", "input", "(", "self", ")", "->", "str", ":", "if", "hasattr", "(", "self", ".", "_output", ",", "'input'", ")", ":", "text", ":", "str", "=", "self", ".", "_output", ".", "input", "(", ")", "# type: ignore", "else", ":", "text", "=", "input", "(", ")", "return", "text" ]
[ 219, 4 ]
[ 227, 19 ]
python
en
['en', 'en', 'en']
True
LineTracker.input
(self)
Use "mylinetracker.input()" instead of "input()" to avoid the LineTracker not seeing any newlines input by the user.
Use "mylinetracker.input()" instead of "input()" to avoid the LineTracker not seeing any newlines input by the user.
def input(self) -> str: """Use "mylinetracker.input()" instead of "input()" to avoid the LineTracker not seeing any newlines input by the user. """ if hasattr(self._output, 'input'): text: str = self._output.input() # type: ignore else: text = input() self._handle(text + "\n") return text
[ "def", "input", "(", "self", ")", "->", "str", ":", "if", "hasattr", "(", "self", ".", "_output", ",", "'input'", ")", ":", "text", ":", "str", "=", "self", ".", "_output", ".", "input", "(", ")", "# type: ignore", "else", ":", "text", "=", "input", "(", ")", "self", ".", "_handle", "(", "text", "+", "\"\\n\"", ")", "return", "text" ]
[ 259, 4 ]
[ 268, 19 ]
python
en
['en', 'fi', 'en']
True
LineTracker.goto_line
(self, line: int)
goto_line moves the cursor to the beginning of the given line; where line 1 is the line that the LineTracker started on, line 0 is the line above that, and line 1 is the line below that.
goto_line moves the cursor to the beginning of the given line; where line 1 is the line that the LineTracker started on, line 0 is the line above that, and line 1 is the line below that.
def goto_line(self, line: int) -> None: """goto_line moves the cursor to the beginning of the given line; where line 1 is the line that the LineTracker started on, line 0 is the line above that, and line 1 is the line below that. """ self.write("\r") if line < self.cur_line: total_lines = shutil.get_terminal_size().lines if (self.cur_line - line) >= total_lines: raise Exception(f"cannot go back {self.cur_line - line} lines (limit={total_lines - 1})") self.write(ansiterm.cursor_up(self.cur_line - line)) else: self.write("\n" * (line - self.cur_line))
[ "def", "goto_line", "(", "self", ",", "line", ":", "int", ")", "->", "None", ":", "self", ".", "write", "(", "\"\\r\"", ")", "if", "line", "<", "self", ".", "cur_line", ":", "total_lines", "=", "shutil", ".", "get_terminal_size", "(", ")", ".", "lines", "if", "(", "self", ".", "cur_line", "-", "line", ")", ">=", "total_lines", ":", "raise", "Exception", "(", "f\"cannot go back {self.cur_line - line} lines (limit={total_lines - 1})\"", ")", "self", ".", "write", "(", "ansiterm", ".", "cursor_up", "(", "self", ".", "cur_line", "-", "line", ")", ")", "else", ":", "self", ".", "write", "(", "\"\\n\"", "*", "(", "line", "-", "self", ".", "cur_line", ")", ")" ]
[ 270, 4 ]
[ 283, 53 ]
python
en
['en', 'en', 'en']
True
Checker.check
(self, name: str, clear_on_success: bool = True)
check returns a context manager that handles printing a '[....]' / '[ OK ]' / '[FAIL]' check. While the check is running, it will stream whatever you write to stdout/stderr. If clear_on_success is True, then once the check finishes, if the check passed then it will erase that stdout/stderr output, since you probably only want diagnostic output if the check fails. You can provide a (1-line) textual check result that will be shown on both success and failure by writing to "mycheck.result". You may cause a check to fail by either raising an Exception, or by setting "mycheck.ok = False". If you do neither of these, then the check will be considered to pass. The mycheck.subcheck method returns a context manager for a nested child check.
check returns a context manager that handles printing a '[....]' / '[ OK ]' / '[FAIL]' check. While the check is running, it will stream whatever you write to stdout/stderr. If clear_on_success is True, then once the check finishes, if the check passed then it will erase that stdout/stderr output, since you probably only want diagnostic output if the check fails. You can provide a (1-line) textual check result that will be shown on both success and failure by writing to "mycheck.result". You may cause a check to fail by either raising an Exception, or by setting "mycheck.ok = False". If you do neither of these, then the check will be considered to pass. The mycheck.subcheck method returns a context manager for a nested child check.
def check(self, name: str, clear_on_success: bool = True) -> Generator['CheckResult', None, None]: """check returns a context manager that handles printing a '[....]' / '[ OK ]' / '[FAIL]' check. While the check is running, it will stream whatever you write to stdout/stderr. If clear_on_success is True, then once the check finishes, if the check passed then it will erase that stdout/stderr output, since you probably only want diagnostic output if the check fails. You can provide a (1-line) textual check result that will be shown on both success and failure by writing to "mycheck.result". You may cause a check to fail by either raising an Exception, or by setting "mycheck.ok = False". If you do neither of these, then the check will be considered to pass. The mycheck.subcheck method returns a context manager for a nested child check. """ def line(status: str, rest: Optional[str] = None) -> str: txt = name if rest: txt = f'{txt}: {rest}' return f" {status}{ansiterm.sgr} {txt}" output = LineTracker(output=sys.stdout) output.write(line(status=f'{ansiterm.sgr.bold.fg_blu}[....]') + "\n") check = CheckResult() with capture_output(Indent(output=output, indent=" > ")): try: yield check except Exception as err: if str(err).strip(): print(err) check.ok = False end = output.cur_line output.goto_line(1) if check.ok: output.write(line(status=f'{ansiterm.sgr.bold.fg_grn}[ OK ]', rest=check.result)) else: output.write(line(status=f'{ansiterm.sgr.bold.fg_red}[FAIL]', rest=check.result)) if check.ok and clear_on_success: output.write(ansiterm.clear_rest_of_screen + "\n") else: output.write(ansiterm.clear_rest_of_line) output.goto_line(end) self.ok &= check.ok
[ "def", "check", "(", "self", ",", "name", ":", "str", ",", "clear_on_success", ":", "bool", "=", "True", ")", "->", "Generator", "[", "'CheckResult'", ",", "None", ",", "None", "]", ":", "def", "line", "(", "status", ":", "str", ",", "rest", ":", "Optional", "[", "str", "]", "=", "None", ")", "->", "str", ":", "txt", "=", "name", "if", "rest", ":", "txt", "=", "f'{txt}: {rest}'", "return", "f\" {status}{ansiterm.sgr} {txt}\"", "output", "=", "LineTracker", "(", "output", "=", "sys", ".", "stdout", ")", "output", ".", "write", "(", "line", "(", "status", "=", "f'{ansiterm.sgr.bold.fg_blu}[....]'", ")", "+", "\"\\n\"", ")", "check", "=", "CheckResult", "(", ")", "with", "capture_output", "(", "Indent", "(", "output", "=", "output", ",", "indent", "=", "\" > \"", ")", ")", ":", "try", ":", "yield", "check", "except", "Exception", "as", "err", ":", "if", "str", "(", "err", ")", ".", "strip", "(", ")", ":", "print", "(", "err", ")", "check", ".", "ok", "=", "False", "end", "=", "output", ".", "cur_line", "output", ".", "goto_line", "(", "1", ")", "if", "check", ".", "ok", ":", "output", ".", "write", "(", "line", "(", "status", "=", "f'{ansiterm.sgr.bold.fg_grn}[ OK ]'", ",", "rest", "=", "check", ".", "result", ")", ")", "else", ":", "output", ".", "write", "(", "line", "(", "status", "=", "f'{ansiterm.sgr.bold.fg_red}[FAIL]'", ",", "rest", "=", "check", ".", "result", ")", ")", "if", "check", ".", "ok", "and", "clear_on_success", ":", "output", ".", "write", "(", "ansiterm", ".", "clear_rest_of_screen", "+", "\"\\n\"", ")", "else", ":", "output", ".", "write", "(", "ansiterm", ".", "clear_rest_of_line", ")", "output", ".", "goto_line", "(", "end", ")", "self", ".", "ok", "&=", "check", ".", "ok" ]
[ 309, 4 ]
[ 356, 27 ]
python
en
['en', 'en', 'en']
True
currently_ki_protected
()
r"""Check whether the calling code has :exc:`KeyboardInterrupt` protection enabled. It's surprisingly easy to think that one's :exc:`KeyboardInterrupt` protection is enabled when it isn't, or vice-versa. This function tells you what Trio thinks of the matter, which makes it useful for ``assert``\s and unit tests. Returns: bool: True if protection is enabled, and False otherwise.
r"""Check whether the calling code has :exc:`KeyboardInterrupt` protection enabled.
def currently_ki_protected(): r"""Check whether the calling code has :exc:`KeyboardInterrupt` protection enabled. It's surprisingly easy to think that one's :exc:`KeyboardInterrupt` protection is enabled when it isn't, or vice-versa. This function tells you what Trio thinks of the matter, which makes it useful for ``assert``\s and unit tests. Returns: bool: True if protection is enabled, and False otherwise. """ return ki_protection_enabled(sys._getframe())
[ "def", "currently_ki_protected", "(", ")", ":", "return", "ki_protection_enabled", "(", "sys", ".", "_getframe", "(", ")", ")" ]
[ 95, 0 ]
[ 108, 49 ]
python
en
['en', 'en', 'en']
True
IRTCPMappingGroup.finalize
(self, ir: 'IR', aconf: Config)
Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc. :param ir: the IR we're working from :param aconf: the Config we're working from :return: a list of the IRClusters this Group uses
Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc.
def finalize(self, ir: 'IR', aconf: Config) -> List[IRCluster]: """ Finalize a MappingGroup based on the attributes of its Mappings. Core elements get lifted into the Group so we can more easily build Envoy routes; host-redirect and shadow get handled, etc. :param ir: the IR we're working from :param aconf: the Config we're working from :return: a list of the IRClusters this Group uses """ metadata_labels: Dict[str, str] = {} for mapping in sorted(self.mappings, key=lambda m: m.route_weight): self.ir.logger.debug("%s mapping %s" % (self, mapping.as_json())) for k in mapping.keys(): if k.startswith('_') or mapping.skip_key(k) or (k in IRTCPMappingGroup.DoNotFlattenKeys): # self.ir.logger.debug("%s: don't flatten %s" % (self, k)) continue # self.ir.logger.debug("%s: flatten %s" % (self, k)) self[k] = mapping[k] # Should we have higher weights win over lower if there are conflicts? # Should we disallow conflicts? metadata_labels.update(mapping.get('metadata_labels') or {}) if metadata_labels: self.metadata_labels = metadata_labels # self.ir.logger.debug("%s after flattening %s" % (self, self.as_json())) total_weight = 0.0 unspecified_mappings = 0 # # OK. Save some typing with local variables for default labels and our labels... # labels: Dict[str, Any] = self.get('labels', None) # # if not labels: # # No labels. Use the default label domain to see if we have some valid defaults. # defaults = ir.ambassador_module.get_default_labels() # # if defaults: # domain = ir.ambassador_module.get_default_label_domain() # # self.labels = { # domain: [ # { # 'defaults': defaults # } # ] # } # else: # # Walk all the domains in our labels, and prepend the defaults, if any. # # ir.logger.info("%s: labels %s" % (self.as_json(), labels)) # # for domain in labels.keys(): # defaults = ir.ambassador_module.get_default_labels(domain) # ir.logger.debug("%s: defaults %s" % (domain, defaults)) # # if defaults: # ir.logger.debug("%s: labels %s" % (domain, labels[domain])) # # for label in labels[domain]: # ir.logger.debug("%s: label %s" % (domain, label)) # # lkeys = label.keys() # if len(lkeys) > 1: # err = RichStatus.fromError("label has multiple entries (%s) instead of just one" % # lkeys) # aconf.post_error(err, self) # # lkey = list(lkeys)[0] # # if lkey.startswith('v0_ratelimit_'): # # Don't prepend defaults, as this was imported from a V0 rate_limit. # continue # # label[lkey] = defaults + label[lkey] for mapping in self.mappings: mapping.cluster = self.add_cluster_for_mapping(mapping, mapping.cluster_tag) self.logger.debug(f"Normalizing weights in mappings now...") if not self.normalize_weights_in_mappings(): self.post_error(f"Could not normalize mapping weights, ignoring...") return [] return list([ mapping.cluster for mapping in self.mappings ])
[ "def", "finalize", "(", "self", ",", "ir", ":", "'IR'", ",", "aconf", ":", "Config", ")", "->", "List", "[", "IRCluster", "]", ":", "metadata_labels", ":", "Dict", "[", "str", ",", "str", "]", "=", "{", "}", "for", "mapping", "in", "sorted", "(", "self", ".", "mappings", ",", "key", "=", "lambda", "m", ":", "m", ".", "route_weight", ")", ":", "self", ".", "ir", ".", "logger", ".", "debug", "(", "\"%s mapping %s\"", "%", "(", "self", ",", "mapping", ".", "as_json", "(", ")", ")", ")", "for", "k", "in", "mapping", ".", "keys", "(", ")", ":", "if", "k", ".", "startswith", "(", "'_'", ")", "or", "mapping", ".", "skip_key", "(", "k", ")", "or", "(", "k", "in", "IRTCPMappingGroup", ".", "DoNotFlattenKeys", ")", ":", "# self.ir.logger.debug(\"%s: don't flatten %s\" % (self, k))", "continue", "# self.ir.logger.debug(\"%s: flatten %s\" % (self, k))", "self", "[", "k", "]", "=", "mapping", "[", "k", "]", "# Should we have higher weights win over lower if there are conflicts?", "# Should we disallow conflicts?", "metadata_labels", ".", "update", "(", "mapping", ".", "get", "(", "'metadata_labels'", ")", "or", "{", "}", ")", "if", "metadata_labels", ":", "self", ".", "metadata_labels", "=", "metadata_labels", "# self.ir.logger.debug(\"%s after flattening %s\" % (self, self.as_json()))", "total_weight", "=", "0.0", "unspecified_mappings", "=", "0", "# # OK. Save some typing with local variables for default labels and our labels...", "# labels: Dict[str, Any] = self.get('labels', None)", "#", "# if not labels:", "# # No labels. Use the default label domain to see if we have some valid defaults.", "# defaults = ir.ambassador_module.get_default_labels()", "#", "# if defaults:", "# domain = ir.ambassador_module.get_default_label_domain()", "#", "# self.labels = {", "# domain: [", "# {", "# 'defaults': defaults", "# }", "# ]", "# }", "# else:", "# # Walk all the domains in our labels, and prepend the defaults, if any.", "# # ir.logger.info(\"%s: labels %s\" % (self.as_json(), labels))", "#", "# for domain in labels.keys():", "# defaults = ir.ambassador_module.get_default_labels(domain)", "# ir.logger.debug(\"%s: defaults %s\" % (domain, defaults))", "#", "# if defaults:", "# ir.logger.debug(\"%s: labels %s\" % (domain, labels[domain]))", "#", "# for label in labels[domain]:", "# ir.logger.debug(\"%s: label %s\" % (domain, label))", "#", "# lkeys = label.keys()", "# if len(lkeys) > 1:", "# err = RichStatus.fromError(\"label has multiple entries (%s) instead of just one\" %", "# lkeys)", "# aconf.post_error(err, self)", "#", "# lkey = list(lkeys)[0]", "#", "# if lkey.startswith('v0_ratelimit_'):", "# # Don't prepend defaults, as this was imported from a V0 rate_limit.", "# continue", "#", "# label[lkey] = defaults + label[lkey]", "for", "mapping", "in", "self", ".", "mappings", ":", "mapping", ".", "cluster", "=", "self", ".", "add_cluster_for_mapping", "(", "mapping", ",", "mapping", ".", "cluster_tag", ")", "self", ".", "logger", ".", "debug", "(", "f\"Normalizing weights in mappings now...\"", ")", "if", "not", "self", ".", "normalize_weights_in_mappings", "(", ")", ":", "self", ".", "post_error", "(", "f\"Could not normalize mapping weights, ignoring...\"", ")", "return", "[", "]", "return", "list", "(", "[", "mapping", ".", "cluster", "for", "mapping", "in", "self", ".", "mappings", "]", ")" ]
[ 168, 4 ]
[ 257, 69 ]
python
en
['en', 'error', 'th']
False
wait_child_exiting
(process: "_subprocess.Process")
Block until the child process managed by ``process`` is exiting. It is invalid to call this function if the process has already been waited on; that is, ``process.returncode`` must be None. When this function returns, it indicates that a call to :meth:`subprocess.Popen.wait` will immediately be able to return the process's exit status. The actual exit status is not consumed by this call, since :class:`~subprocess.Popen` wants to be able to do that itself.
Block until the child process managed by ``process`` is exiting.
async def wait_child_exiting(process: "_subprocess.Process") -> None: """Block until the child process managed by ``process`` is exiting. It is invalid to call this function if the process has already been waited on; that is, ``process.returncode`` must be None. When this function returns, it indicates that a call to :meth:`subprocess.Popen.wait` will immediately be able to return the process's exit status. The actual exit status is not consumed by this call, since :class:`~subprocess.Popen` wants to be able to do that itself. """ raise NotImplementedError from _wait_child_exiting_error
[ "async", "def", "wait_child_exiting", "(", "process", ":", "\"_subprocess.Process\"", ")", "->", "None", ":", "raise", "NotImplementedError", "from", "_wait_child_exiting_error" ]
[ 16, 0 ]
[ 28, 60 ]
python
en
['en', 'en', 'en']
True
create_pipe_to_child_stdin
()
Create a new pipe suitable for sending data from this process to the standard input of a child we're about to spawn. Returns: A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a :class:`~trio.abc.SendStream` and ``subprocess_end`` is something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`.
Create a new pipe suitable for sending data from this process to the standard input of a child we're about to spawn.
def create_pipe_to_child_stdin() -> Tuple[SendStream, int]: """Create a new pipe suitable for sending data from this process to the standard input of a child we're about to spawn. Returns: A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a :class:`~trio.abc.SendStream` and ``subprocess_end`` is something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ raise NotImplementedError from _create_child_pipe_error
[ "def", "create_pipe_to_child_stdin", "(", ")", "->", "Tuple", "[", "SendStream", ",", "int", "]", ":", "raise", "NotImplementedError", "from", "_create_child_pipe_error" ]
[ 31, 0 ]
[ 41, 59 ]
python
en
['en', 'en', 'en']
True
create_pipe_from_child_output
()
Create a new pipe suitable for receiving data into this process from the standard output or error stream of a child we're about to spawn. Returns: A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a :class:`~trio.abc.ReceiveStream` and ``subprocess_end`` is something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`.
Create a new pipe suitable for receiving data into this process from the standard output or error stream of a child we're about to spawn.
def create_pipe_from_child_output() -> Tuple[ReceiveStream, int]: """Create a new pipe suitable for receiving data into this process from the standard output or error stream of a child we're about to spawn. Returns: A pair ``(trio_end, subprocess_end)`` where ``trio_end`` is a :class:`~trio.abc.ReceiveStream` and ``subprocess_end`` is something suitable for passing as the ``stdin`` argument of :class:`subprocess.Popen`. """ raise NotImplementedError from _create_child_pipe_error
[ "def", "create_pipe_from_child_output", "(", ")", "->", "Tuple", "[", "ReceiveStream", ",", "int", "]", ":", "raise", "NotImplementedError", "from", "_create_child_pipe_error" ]
[ 44, 0 ]
[ 55, 59 ]
python
en
['en', 'en', 'en']
True
sample_user
(email='[email protected]', password='testpass')
Create a sample user
Create a sample user
def sample_user(email='[email protected]', password='testpass'): """Create a sample user""" return get_user_model().objects.create_user(email, password)
[ "def", "sample_user", "(", "email", "=", "'[email protected]'", ",", "password", "=", "'testpass'", ")", ":", "return", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "email", ",", "password", ")" ]
[ 8, 0 ]
[ 10, 64 ]
python
en
['en', 'co', 'en']
True
ModelTests.test_create_user_with_email_successful
(self)
Test creating a new user with an email is successful
Test creating a new user with an email is successful
def test_create_user_with_email_successful(self): """Test creating a new user with an email is successful""" email = '[email protected]' password = 'testpass123' user = get_user_model().objects.create_user( email=email, password=password ) self.assertEqual(user.email, email) self.assertTrue(user.check_password(password))
[ "def", "test_create_user_with_email_successful", "(", "self", ")", ":", "email", "=", "'[email protected]'", "password", "=", "'testpass123'", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "email", "=", "email", ",", "password", "=", "password", ")", "self", ".", "assertEqual", "(", "user", ".", "email", ",", "email", ")", "self", ".", "assertTrue", "(", "user", ".", "check_password", "(", "password", ")", ")" ]
[ 15, 4 ]
[ 25, 54 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_new_user_email_normalize
(self)
Test the email for a new user normalized
Test the email for a new user normalized
def test_new_user_email_normalize(self): """Test the email for a new user normalized""" email = '[email protected]' user = get_user_model().objects.create_user(email, 'testpass123') self.assertEqual(user.email, email.lower())
[ "def", "test_new_user_email_normalize", "(", "self", ")", ":", "email", "=", "'[email protected]'", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "email", ",", "'testpass123'", ")", "self", ".", "assertEqual", "(", "user", ".", "email", ",", "email", ".", "lower", "(", ")", ")" ]
[ 27, 4 ]
[ 32, 51 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_new_user_invalid_email
(self)
Test creating user with no email raises error
Test creating user with no email raises error
def test_new_user_invalid_email(self): """Test creating user with no email raises error""" with self.assertRaises(ValueError): get_user_model().objects.create_user(None, 'testpass123')
[ "def", "test_new_user_invalid_email", "(", "self", ")", ":", "with", "self", ".", "assertRaises", "(", "ValueError", ")", ":", "get_user_model", "(", ")", ".", "objects", ".", "create_user", "(", "None", ",", "'testpass123'", ")" ]
[ 34, 4 ]
[ 37, 69 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_create_new_superuser
(self)
Test creating a new superuser
Test creating a new superuser
def test_create_new_superuser(self): """Test creating a new superuser""" user = get_user_model().objects.create_superuser( '[email protected]', 'testpass123' ) self.assertTrue(user.is_superuser) self.assertTrue(user.is_staff)
[ "def", "test_create_new_superuser", "(", "self", ")", ":", "user", "=", "get_user_model", "(", ")", ".", "objects", ".", "create_superuser", "(", "'[email protected]'", ",", "'testpass123'", ")", "self", ".", "assertTrue", "(", "user", ".", "is_superuser", ")", "self", ".", "assertTrue", "(", "user", ".", "is_staff", ")" ]
[ 39, 4 ]
[ 47, 38 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_tag_str
(self)
Test the tag string representation
Test the tag string representation
def test_tag_str(self): """Test the tag string representation""" tag = models.Tag.objects.create( user=sample_user(), name='Vegan' ) self.assertEqual(str(tag), tag.name)
[ "def", "test_tag_str", "(", "self", ")", ":", "tag", "=", "models", ".", "Tag", ".", "objects", ".", "create", "(", "user", "=", "sample_user", "(", ")", ",", "name", "=", "'Vegan'", ")", "self", ".", "assertEqual", "(", "str", "(", "tag", ")", ",", "tag", ".", "name", ")" ]
[ 49, 4 ]
[ 56, 44 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_ingredient_str
(self)
Test the ingredient string representation
Test the ingredient string representation
def test_ingredient_str(self): """Test the ingredient string representation""" ingredient = models.Ingredient.objects.create( user=sample_user(), name='Cucumber' ) self.assertEqual(str(ingredient), ingredient.name)
[ "def", "test_ingredient_str", "(", "self", ")", ":", "ingredient", "=", "models", ".", "Ingredient", ".", "objects", ".", "create", "(", "user", "=", "sample_user", "(", ")", ",", "name", "=", "'Cucumber'", ")", "self", ".", "assertEqual", "(", "str", "(", "ingredient", ")", ",", "ingredient", ".", "name", ")" ]
[ 58, 4 ]
[ 65, 58 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_recipe_str
(self)
Test the recipe string representation
Test the recipe string representation
def test_recipe_str(self): """Test the recipe string representation""" recipe = models.Recipe.objects.create( user=sample_user(), title='Steak and mashroom sauce', time_minutes=5, price=5.00 ) self.assertEqual(str(recipe), recipe.title)
[ "def", "test_recipe_str", "(", "self", ")", ":", "recipe", "=", "models", ".", "Recipe", ".", "objects", ".", "create", "(", "user", "=", "sample_user", "(", ")", ",", "title", "=", "'Steak and mashroom sauce'", ",", "time_minutes", "=", "5", ",", "price", "=", "5.00", ")", "self", ".", "assertEqual", "(", "str", "(", "recipe", ")", ",", "recipe", ".", "title", ")" ]
[ 67, 4 ]
[ 76, 51 ]
python
en
['en', 'en', 'en']
True
ModelTests.test_recipe_file_name_uuid
(self, mock_uuid)
Test that image is saved in the correct location
Test that image is saved in the correct location
def test_recipe_file_name_uuid(self, mock_uuid): """Test that image is saved in the correct location""" uuid = 'test-uuid' mock_uuid.return_value = uuid file_path = models.recipe_image_file_path(None, 'myimage.jpg') exp_path = f'upload/recipe/{uuid}.jpg' self.assertEqual(file_path, exp_path)
[ "def", "test_recipe_file_name_uuid", "(", "self", ",", "mock_uuid", ")", ":", "uuid", "=", "'test-uuid'", "mock_uuid", ".", "return_value", "=", "uuid", "file_path", "=", "models", ".", "recipe_image_file_path", "(", "None", ",", "'myimage.jpg'", ")", "exp_path", "=", "f'upload/recipe/{uuid}.jpg'", "self", ".", "assertEqual", "(", "file_path", ",", "exp_path", ")" ]
[ 79, 4 ]
[ 86, 45 ]
python
en
['en', 'en', 'en']
True
_WriteWorkspace
(main_gyp, sources_gyp, params)
Create a workspace to wrap main and sources gyp paths.
Create a workspace to wrap main and sources gyp paths.
def _WriteWorkspace(main_gyp, sources_gyp, params): """ Create a workspace to wrap main and sources gyp paths. """ (build_file_root, build_file_ext) = os.path.splitext(main_gyp) workspace_path = build_file_root + '.xcworkspace' options = params['options'] if options.generator_output: workspace_path = os.path.join(options.generator_output, workspace_path) try: os.makedirs(workspace_path) except OSError, e: if e.errno != errno.EEXIST: raise output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \ '<Workspace version = "1.0">\n' for gyp_name in [main_gyp, sources_gyp]: name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj' name = xml.sax.saxutils.quoteattr("group:" + name) output_string += ' <FileRef location = %s></FileRef>\n' % name output_string += '</Workspace>\n' workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata") try: with open(workspace_file, 'r') as input_file: input_string = input_file.read() if input_string == output_string: return except IOError: # Ignore errors if the file doesn't exist. pass with open(workspace_file, 'w') as output_file: output_file.write(output_string)
[ "def", "_WriteWorkspace", "(", "main_gyp", ",", "sources_gyp", ",", "params", ")", ":", "(", "build_file_root", ",", "build_file_ext", ")", "=", "os", ".", "path", ".", "splitext", "(", "main_gyp", ")", "workspace_path", "=", "build_file_root", "+", "'.xcworkspace'", "options", "=", "params", "[", "'options'", "]", "if", "options", ".", "generator_output", ":", "workspace_path", "=", "os", ".", "path", ".", "join", "(", "options", ".", "generator_output", ",", "workspace_path", ")", "try", ":", "os", ".", "makedirs", "(", "workspace_path", ")", "except", "OSError", ",", "e", ":", "if", "e", ".", "errno", "!=", "errno", ".", "EEXIST", ":", "raise", "output_string", "=", "'<?xml version=\"1.0\" encoding=\"UTF-8\"?>\\n'", "+", "'<Workspace version = \"1.0\">\\n'", "for", "gyp_name", "in", "[", "main_gyp", ",", "sources_gyp", "]", ":", "name", "=", "os", ".", "path", ".", "splitext", "(", "os", ".", "path", ".", "basename", "(", "gyp_name", ")", ")", "[", "0", "]", "+", "'.xcodeproj'", "name", "=", "xml", ".", "sax", ".", "saxutils", ".", "quoteattr", "(", "\"group:\"", "+", "name", ")", "output_string", "+=", "' <FileRef location = %s></FileRef>\\n'", "%", "name", "output_string", "+=", "'</Workspace>\\n'", "workspace_file", "=", "os", ".", "path", ".", "join", "(", "workspace_path", ",", "\"contents.xcworkspacedata\"", ")", "try", ":", "with", "open", "(", "workspace_file", ",", "'r'", ")", "as", "input_file", ":", "input_string", "=", "input_file", ".", "read", "(", ")", "if", "input_string", "==", "output_string", ":", "return", "except", "IOError", ":", "# Ignore errors if the file doesn't exist.", "pass", "with", "open", "(", "workspace_file", ",", "'w'", ")", "as", "output_file", ":", "output_file", ".", "write", "(", "output_string", ")" ]
[ 21, 0 ]
[ 53, 36 ]
python
en
['en', 'en', 'en']
True