Search is not available for this dataset
identifier
stringlengths
1
155
parameters
stringlengths
2
6.09k
docstring
stringlengths
11
63.4k
docstring_summary
stringlengths
0
63.4k
function
stringlengths
29
99.8k
function_tokens
sequence
start_point
sequence
end_point
sequence
language
stringclasses
1 value
docstring_language
stringlengths
2
7
docstring_language_predictions
stringlengths
18
23
is_langid_reliable
stringclasses
2 values
with_scopes
(credentials, scopes)
Scopes the credentials if necessary. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to scope. scopes (Sequence[str]): The list of scopes. Returns: Union[google.auth.credentials.Credentials, oauth2client.client.Credentials]: The scoped credentials.
Scopes the credentials if necessary.
def with_scopes(credentials, scopes): """Scopes the credentials if necessary. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to scope. scopes (Sequence[str]): The list of scopes. Returns: Union[google.auth.credentials.Credentials, oauth2client.client.Credentials]: The scoped credentials. """ if HAS_GOOGLE_AUTH and isinstance( credentials, google.auth.credentials.Credentials): return google.auth.credentials.with_scopes_if_required( credentials, scopes) else: try: if credentials.create_scoped_required(): return credentials.create_scoped(scopes) else: return credentials except AttributeError: return credentials
[ "def", "with_scopes", "(", "credentials", ",", "scopes", ")", ":", "if", "HAS_GOOGLE_AUTH", "and", "isinstance", "(", "credentials", ",", "google", ".", "auth", ".", "credentials", ".", "Credentials", ")", ":", "return", "google", ".", "auth", ".", "credentials", ".", "with_scopes_if_required", "(", "credentials", ",", "scopes", ")", "else", ":", "try", ":", "if", "credentials", ".", "create_scoped_required", "(", ")", ":", "return", "credentials", ".", "create_scoped", "(", "scopes", ")", "else", ":", "return", "credentials", "except", "AttributeError", ":", "return", "credentials" ]
[ 47, 0 ]
[ 71, 30 ]
python
en
['en', 'en', 'en']
True
authorized_http
(credentials)
Returns an http client that is authorized with the given credentials. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to use. Returns: Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An authorized http client.
Returns an http client that is authorized with the given credentials.
def authorized_http(credentials): """Returns an http client that is authorized with the given credentials. Args: credentials (Union[ google.auth.credentials.Credentials, oauth2client.client.Credentials]): The credentials to use. Returns: Union[httplib2.Http, google_auth_httplib2.AuthorizedHttp]: An authorized http client. """ if HAS_GOOGLE_AUTH and isinstance( credentials, google.auth.credentials.Credentials): return google_auth_httplib2.AuthorizedHttp(credentials, http=build_http()) else: return credentials.authorize(build_http())
[ "def", "authorized_http", "(", "credentials", ")", ":", "if", "HAS_GOOGLE_AUTH", "and", "isinstance", "(", "credentials", ",", "google", ".", "auth", ".", "credentials", ".", "Credentials", ")", ":", "return", "google_auth_httplib2", ".", "AuthorizedHttp", "(", "credentials", ",", "http", "=", "build_http", "(", ")", ")", "else", ":", "return", "credentials", ".", "authorize", "(", "build_http", "(", ")", ")" ]
[ 74, 0 ]
[ 91, 50 ]
python
en
['en', 'en', 'en']
True
Deserializer
(stream_or_string, **options)
Deserialize a stream or string of JSON data.
Deserialize a stream or string of JSON data.
def Deserializer(stream_or_string, **options): """Deserialize a stream or string of JSON data.""" if not isinstance(stream_or_string, (bytes, str)): stream_or_string = stream_or_string.read() if isinstance(stream_or_string, bytes): stream_or_string = stream_or_string.decode() try: objects = json.loads(stream_or_string) yield from PythonDeserializer(objects, **options) except (GeneratorExit, DeserializationError): raise except Exception as exc: raise DeserializationError() from exc
[ "def", "Deserializer", "(", "stream_or_string", ",", "*", "*", "options", ")", ":", "if", "not", "isinstance", "(", "stream_or_string", ",", "(", "bytes", ",", "str", ")", ")", ":", "stream_or_string", "=", "stream_or_string", ".", "read", "(", ")", "if", "isinstance", "(", "stream_or_string", ",", "bytes", ")", ":", "stream_or_string", "=", "stream_or_string", ".", "decode", "(", ")", "try", ":", "objects", "=", "json", ".", "loads", "(", "stream_or_string", ")", "yield", "from", "PythonDeserializer", "(", "objects", ",", "*", "*", "options", ")", "except", "(", "GeneratorExit", ",", "DeserializationError", ")", ":", "raise", "except", "Exception", "as", "exc", ":", "raise", "DeserializationError", "(", ")", "from", "exc" ]
[ 61, 0 ]
[ 73, 45 ]
python
en
['en', 'en', 'en']
True
lookup
(tag, group=None)
:param tag: Integer tag number :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, otherwise just populating the value and name from TAGS. If the tag is not recognized, "unknown" is returned for the name
:param tag: Integer tag number :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, otherwise just populating the value and name from TAGS. If the tag is not recognized, "unknown" is returned for the name
def lookup(tag, group=None): """ :param tag: Integer tag number :returns: Taginfo namedtuple, From the TAGS_V2 info if possible, otherwise just populating the value and name from TAGS. If the tag is not recognized, "unknown" is returned for the name """ if group is not None: info = TAGS_V2_GROUPS[group].get(tag) if group in TAGS_V2_GROUPS else None else: info = TAGS_V2.get(tag) return info or TagInfo(tag, TAGS.get(tag, "unknown"))
[ "def", "lookup", "(", "tag", ",", "group", "=", "None", ")", ":", "if", "group", "is", "not", "None", ":", "info", "=", "TAGS_V2_GROUPS", "[", "group", "]", ".", "get", "(", "tag", ")", "if", "group", "in", "TAGS_V2_GROUPS", "else", "None", "else", ":", "info", "=", "TAGS_V2", ".", "get", "(", "tag", ")", "return", "info", "or", "TagInfo", "(", "tag", ",", "TAGS", ".", "get", "(", "tag", ",", "\"unknown\"", ")", ")" ]
[ 35, 0 ]
[ 48, 57 ]
python
en
['en', 'error', 'th']
False
ItemKNN.__init__
(self, train_file=None, test_file=None, output_file=None, similarity_metric="cosine", k_neighbors=None, rank_length=10, as_binary=False, as_similar_first=True, sep='\t', output_sep='\t')
Item KNN for Item Recommendation This algorithm predicts a rank for each user based on the similar items that he/her consumed. Usage:: >> ItemKNN(train, test, as_similar_first=True).compute() >> ItemKNN(train, test, ranking_file, as_binary=True).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html :type similarity_metric: str, default cosine :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_items)) :type k_neighbors: int, default None :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm :type rank_length: int, default 10 :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. :type as_similar_first: bool, default True :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t'
Item KNN for Item Recommendation
def __init__(self, train_file=None, test_file=None, output_file=None, similarity_metric="cosine", k_neighbors=None, rank_length=10, as_binary=False, as_similar_first=True, sep='\t', output_sep='\t'): """ Item KNN for Item Recommendation This algorithm predicts a rank for each user based on the similar items that he/her consumed. Usage:: >> ItemKNN(train, test, as_similar_first=True).compute() >> ItemKNN(train, test, ranking_file, as_binary=True).compute() :param train_file: File which contains the train set. This file needs to have at least 3 columns (user item feedback_value). :type train_file: str :param test_file: File which contains the test set. This file needs to have at least 3 columns (user item feedback_value). :type test_file: str, default None :param output_file: File with dir to write the final predictions :type output_file: str, default None :param similarity_metric: Pairwise metric to compute the similarity between the items. Reference about distances: http://docs.scipy.org/doc/scipy-0.14.0/reference/generated/scipy.spatial.distance.pdist.html :type similarity_metric: str, default cosine :param k_neighbors: Number of neighbors to use. If None, k_neighbor = int(sqrt(n_items)) :type k_neighbors: int, default None :param rank_length: Size of the rank that must be generated by the predictions of the recommender algorithm :type rank_length: int, default 10 :param as_binary: If True, the explicit feedback will be transform to binary :type as_binary: bool, default False :param as_similar_first: If True, for each unknown item, which will be predicted, we first look for its k most similar users and then take the intersection with the users that seen that item. :type as_similar_first: bool, default True :param sep: Delimiter for input files :type sep: str, default '\t' :param output_sep: Delimiter for output file :type output_sep: str, default '\t' """ super(ItemKNN, self).__init__(train_file=train_file, test_file=test_file, output_file=output_file, as_binary=as_binary, rank_length=rank_length, similarity_metric=similarity_metric, sep=sep, output_sep=output_sep) self.recommender_name = 'ItemKNN Algorithm' self.as_similar_first = as_similar_first self.k_neighbors = k_neighbors # internal vars self.si_matrix = None self.similar_items = None
[ "def", "__init__", "(", "self", ",", "train_file", "=", "None", ",", "test_file", "=", "None", ",", "output_file", "=", "None", ",", "similarity_metric", "=", "\"cosine\"", ",", "k_neighbors", "=", "None", ",", "rank_length", "=", "10", ",", "as_binary", "=", "False", ",", "as_similar_first", "=", "True", ",", "sep", "=", "'\\t'", ",", "output_sep", "=", "'\\t'", ")", ":", "super", "(", "ItemKNN", ",", "self", ")", ".", "__init__", "(", "train_file", "=", "train_file", ",", "test_file", "=", "test_file", ",", "output_file", "=", "output_file", ",", "as_binary", "=", "as_binary", ",", "rank_length", "=", "rank_length", ",", "similarity_metric", "=", "similarity_metric", ",", "sep", "=", "sep", ",", "output_sep", "=", "output_sep", ")", "self", ".", "recommender_name", "=", "'ItemKNN Algorithm'", "self", ".", "as_similar_first", "=", "as_similar_first", "self", ".", "k_neighbors", "=", "k_neighbors", "# internal vars", "self", ".", "si_matrix", "=", "None", "self", ".", "similar_items", "=", "None" ]
[ 21, 4 ]
[ 82, 33 ]
python
en
['en', 'error', 'th']
False
ItemKNN.init_model
(self)
Method to initialize the model. Create and calculate a similarity matrix
Method to initialize the model. Create and calculate a similarity matrix
def init_model(self): """ Method to initialize the model. Create and calculate a similarity matrix """ self.similar_items = defaultdict(list) # Set the value for k if self.k_neighbors is None: self.k_neighbors = int(np.sqrt(len(self.items))) self.create_matrix() self.si_matrix = self.compute_similarity(transpose=True) for i_id, item in enumerate(self.items): self.similar_items[i_id] = sorted(range(len(self.si_matrix[i_id])), key=lambda k: -self.si_matrix[i_id][k])[1:self.k_neighbors + 1]
[ "def", "init_model", "(", "self", ")", ":", "self", ".", "similar_items", "=", "defaultdict", "(", "list", ")", "# Set the value for k", "if", "self", ".", "k_neighbors", "is", "None", ":", "self", ".", "k_neighbors", "=", "int", "(", "np", ".", "sqrt", "(", "len", "(", "self", ".", "items", ")", ")", ")", "self", ".", "create_matrix", "(", ")", "self", ".", "si_matrix", "=", "self", ".", "compute_similarity", "(", "transpose", "=", "True", ")", "for", "i_id", ",", "item", "in", "enumerate", "(", "self", ".", "items", ")", ":", "self", ".", "similar_items", "[", "i_id", "]", "=", "sorted", "(", "range", "(", "len", "(", "self", ".", "si_matrix", "[", "i_id", "]", ")", ")", ",", "key", "=", "lambda", "k", ":", "-", "self", ".", "si_matrix", "[", "i_id", "]", "[", "k", "]", ")", "[", "1", ":", "self", ".", "k_neighbors", "+", "1", "]" ]
[ 84, 4 ]
[ 100, 109 ]
python
en
['en', 'error', 'th']
False
ItemKNN.predict
(self)
This method predict a rank for a specific user.
This method predict a rank for a specific user.
def predict(self): """ This method predict a rank for a specific user. """ for u_id, user in enumerate(self.users): if len(self.train_set['feedback'].get(user, [])) != 0: if self.as_similar_first: self.ranking += self.predict_similar_first_scores(user, u_id) else: self.ranking += self.predict_scores(user, u_id) else: # Implement cold start user pass
[ "def", "predict", "(", "self", ")", ":", "for", "u_id", ",", "user", "in", "enumerate", "(", "self", ".", "users", ")", ":", "if", "len", "(", "self", ".", "train_set", "[", "'feedback'", "]", ".", "get", "(", "user", ",", "[", "]", ")", ")", "!=", "0", ":", "if", "self", ".", "as_similar_first", ":", "self", ".", "ranking", "+=", "self", ".", "predict_similar_first_scores", "(", "user", ",", "u_id", ")", "else", ":", "self", ".", "ranking", "+=", "self", ".", "predict_scores", "(", "user", ",", "u_id", ")", "else", ":", "# Implement cold start user", "pass" ]
[ 102, 4 ]
[ 117, 20 ]
python
en
['en', 'error', 'th']
False
ItemKNN.predict_similar_first_scores
(self, user, user_id)
In this implementation, for each unknown item, which will be predicted, we first look for its k most similar items and then take the intersection with the seen items of the user. Finally, the score of the unknown item will be the sum of the similarities of k's most similar to it, taking into account only the items that each user seen.
In this implementation, for each unknown item, which will be predicted, we first look for its k most similar items and then take the intersection with the seen items of the user. Finally, the score of the unknown item will be the sum of the similarities of k's most similar to it, taking into account only the items that each user seen.
def predict_similar_first_scores(self, user, user_id): """ In this implementation, for each unknown item, which will be predicted, we first look for its k most similar items and then take the intersection with the seen items of the user. Finally, the score of the unknown item will be the sum of the similarities of k's most similar to it, taking into account only the items that each user seen. """ predictions = [] # Selects items that user has not interacted with. u_list = list(np.flatnonzero(self.matrix[user_id] == 0)) seen_items_id = np.flatnonzero(self.matrix[user_id]) # predict score for item_i for i_id in u_list: # s_id = list(filter(set(self.similar_items[i]).__contains__, seen_items_id)) s_id = list(set(self.similar_items[i_id]).intersection(seen_items_id)) sim_sum = np.take(self.si_matrix[i_id], s_id) predictions.append((user, self.items[i_id], sum(sim_sum))) return sorted(predictions, key=lambda x: -x[2])[:self.rank_length]
[ "def", "predict_similar_first_scores", "(", "self", ",", "user", ",", "user_id", ")", ":", "predictions", "=", "[", "]", "# Selects items that user has not interacted with.", "u_list", "=", "list", "(", "np", ".", "flatnonzero", "(", "self", ".", "matrix", "[", "user_id", "]", "==", "0", ")", ")", "seen_items_id", "=", "np", ".", "flatnonzero", "(", "self", ".", "matrix", "[", "user_id", "]", ")", "# predict score for item_i", "for", "i_id", "in", "u_list", ":", "# s_id = list(filter(set(self.similar_items[i]).__contains__, seen_items_id))", "s_id", "=", "list", "(", "set", "(", "self", ".", "similar_items", "[", "i_id", "]", ")", ".", "intersection", "(", "seen_items_id", ")", ")", "sim_sum", "=", "np", ".", "take", "(", "self", ".", "si_matrix", "[", "i_id", "]", ",", "s_id", ")", "predictions", ".", "append", "(", "(", "user", ",", "self", ".", "items", "[", "i_id", "]", ",", "sum", "(", "sim_sum", ")", ")", ")", "return", "sorted", "(", "predictions", ",", "key", "=", "lambda", "x", ":", "-", "x", "[", "2", "]", ")", "[", ":", "self", ".", "rank_length", "]" ]
[ 132, 4 ]
[ 154, 74 ]
python
en
['en', 'error', 'th']
False
ItemKNN.compute
(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t', n_ranks=None)
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param n_ranks: List of positions to evaluate the ranking :type n_ranks: list, None
Extends compute method from BaseItemRecommendation. Method to run recommender algorithm
def compute(self, verbose=True, metrics=None, verbose_evaluation=True, as_table=False, table_sep='\t', n_ranks=None): """ Extends compute method from BaseItemRecommendation. Method to run recommender algorithm :param verbose: Print recommender and database information :type verbose: bool, default True :param metrics: List of evaluation metrics :type metrics: list, default None :param verbose_evaluation: Print the evaluation results :type verbose_evaluation: bool, default True :param as_table: Print the evaluation results as table :type as_table: bool, default False :param table_sep: Delimiter for print results (only work with verbose=True and as_table=True) :type table_sep: str, default '\t' :param n_ranks: List of positions to evaluate the ranking :type n_ranks: list, None """ super(ItemKNN, self).compute(verbose=verbose) if verbose: print("training_time:: %4f sec" % timed(self.init_model)) if self.extra_info_header is not None: print(self.extra_info_header) print("prediction_time:: %4f sec" % timed(self.predict)) print('\n') else: self.init_model() self.predict() self.write_ranking() if self.test_file is not None: self.evaluate(metrics, verbose_evaluation, as_table=as_table, table_sep=table_sep, n_ranks=n_ranks)
[ "def", "compute", "(", "self", ",", "verbose", "=", "True", ",", "metrics", "=", "None", ",", "verbose_evaluation", "=", "True", ",", "as_table", "=", "False", ",", "table_sep", "=", "'\\t'", ",", "n_ranks", "=", "None", ")", ":", "super", "(", "ItemKNN", ",", "self", ")", ".", "compute", "(", "verbose", "=", "verbose", ")", "if", "verbose", ":", "print", "(", "\"training_time:: %4f sec\"", "%", "timed", "(", "self", ".", "init_model", ")", ")", "if", "self", ".", "extra_info_header", "is", "not", "None", ":", "print", "(", "self", ".", "extra_info_header", ")", "print", "(", "\"prediction_time:: %4f sec\"", "%", "timed", "(", "self", ".", "predict", ")", ")", "print", "(", "'\\n'", ")", "else", ":", "self", ".", "init_model", "(", ")", "self", ".", "predict", "(", ")", "self", ".", "write_ranking", "(", ")", "if", "self", ".", "test_file", "is", "not", "None", ":", "self", ".", "evaluate", "(", "metrics", ",", "verbose_evaluation", ",", "as_table", "=", "as_table", ",", "table_sep", "=", "table_sep", ",", "n_ranks", "=", "n_ranks", ")" ]
[ 156, 4 ]
[ 196, 111 ]
python
en
['en', 'error', 'th']
False
getInnerText
(node)
Get all the inner text of a DOM node (recursively).
Get all the inner text of a DOM node (recursively).
def getInnerText(node): """Get all the inner text of a DOM node (recursively).""" # inspired by https://mail.python.org/pipermail/xml-sig/2005-March/011022.html inner_text = [] for child in node.childNodes: if child.nodeType == child.TEXT_NODE or child.nodeType == child.CDATA_SECTION_NODE: inner_text.append(child.data) elif child.nodeType == child.ELEMENT_NODE: inner_text.extend(getInnerText(child)) else: pass return "".join(inner_text)
[ "def", "getInnerText", "(", "node", ")", ":", "# inspired by https://mail.python.org/pipermail/xml-sig/2005-March/011022.html", "inner_text", "=", "[", "]", "for", "child", "in", "node", ".", "childNodes", ":", "if", "child", ".", "nodeType", "==", "child", ".", "TEXT_NODE", "or", "child", ".", "nodeType", "==", "child", ".", "CDATA_SECTION_NODE", ":", "inner_text", ".", "append", "(", "child", ".", "data", ")", "elif", "child", ".", "nodeType", "==", "child", ".", "ELEMENT_NODE", ":", "inner_text", ".", "extend", "(", "getInnerText", "(", "child", ")", ")", "else", ":", "pass", "return", "\"\"", ".", "join", "(", "inner_text", ")" ]
[ 334, 0 ]
[ 345, 30 ]
python
en
['en', 'en', 'en']
True
Serializer.start_serialization
(self)
Start serialization -- open the XML document and the root element.
Start serialization -- open the XML document and the root element.
def start_serialization(self): """ Start serialization -- open the XML document and the root element. """ self.xml = SimplerXMLGenerator(self.stream, self.options.get("encoding", settings.DEFAULT_CHARSET)) self.xml.startDocument() self.xml.startElement("django-objects", {"version": "1.0"})
[ "def", "start_serialization", "(", "self", ")", ":", "self", ".", "xml", "=", "SimplerXMLGenerator", "(", "self", ".", "stream", ",", "self", ".", "options", ".", "get", "(", "\"encoding\"", ",", "settings", ".", "DEFAULT_CHARSET", ")", ")", "self", ".", "xml", ".", "startDocument", "(", ")", "self", ".", "xml", ".", "startElement", "(", "\"django-objects\"", ",", "{", "\"version\"", ":", "\"1.0\"", "}", ")" ]
[ 25, 4 ]
[ 31, 67 ]
python
en
['en', 'error', 'th']
False
Serializer.end_serialization
(self)
End serialization -- end the document.
End serialization -- end the document.
def end_serialization(self): """ End serialization -- end the document. """ self.indent(0) self.xml.endElement("django-objects") self.xml.endDocument()
[ "def", "end_serialization", "(", "self", ")", ":", "self", ".", "indent", "(", "0", ")", "self", ".", "xml", ".", "endElement", "(", "\"django-objects\"", ")", "self", ".", "xml", ".", "endDocument", "(", ")" ]
[ 33, 4 ]
[ 39, 30 ]
python
en
['en', 'error', 'th']
False
Serializer.start_object
(self, obj)
Called as each object is handled.
Called as each object is handled.
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) attrs = {'model': str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj.pk if obj_pk is not None: attrs['pk'] = str(obj_pk) self.xml.startElement("object", attrs)
[ "def", "start_object", "(", "self", ",", "obj", ")", ":", "if", "not", "hasattr", "(", "obj", ",", "\"_meta\"", ")", ":", "raise", "base", ".", "SerializationError", "(", "\"Non-model object (%s) encountered during serialization\"", "%", "type", "(", "obj", ")", ")", "self", ".", "indent", "(", "1", ")", "attrs", "=", "{", "'model'", ":", "str", "(", "obj", ".", "_meta", ")", "}", "if", "not", "self", ".", "use_natural_primary_keys", "or", "not", "hasattr", "(", "obj", ",", "'natural_key'", ")", ":", "obj_pk", "=", "obj", ".", "pk", "if", "obj_pk", "is", "not", "None", ":", "attrs", "[", "'pk'", "]", "=", "str", "(", "obj_pk", ")", "self", ".", "xml", ".", "startElement", "(", "\"object\"", ",", "attrs", ")" ]
[ 41, 4 ]
[ 55, 46 ]
python
en
['en', 'error', 'th']
False
Serializer.end_object
(self, obj)
Called after handling all fields for an object.
Called after handling all fields for an object.
def end_object(self, obj): """ Called after handling all fields for an object. """ self.indent(1) self.xml.endElement("object")
[ "def", "end_object", "(", "self", ",", "obj", ")", ":", "self", ".", "indent", "(", "1", ")", "self", ".", "xml", ".", "endElement", "(", "\"object\"", ")" ]
[ 57, 4 ]
[ 62, 37 ]
python
en
['en', 'error', 'th']
False
Serializer.handle_field
(self, obj, field)
Handle each field on an object (except for ForeignKeys and ManyToManyFields).
Handle each field on an object (except for ForeignKeys and ManyToManyFields).
def handle_field(self, obj, field): """ Handle each field on an object (except for ForeignKeys and ManyToManyFields). """ self.indent(2) self.xml.startElement('field', { 'name': field.name, 'type': field.get_internal_type(), }) # Get a "string version" of the object's data. if getattr(obj, field.name) is not None: value = field.value_to_string(obj) if field.get_internal_type() == 'JSONField': # Dump value since JSONField.value_to_string() doesn't output # strings. value = json.dumps(value, cls=field.encoder) try: self.xml.characters(value) except UnserializableContentError: raise ValueError("%s.%s (pk:%s) contains unserializable characters" % ( obj.__class__.__name__, field.name, obj.pk)) else: self.xml.addQuickElement("None") self.xml.endElement("field")
[ "def", "handle_field", "(", "self", ",", "obj", ",", "field", ")", ":", "self", ".", "indent", "(", "2", ")", "self", ".", "xml", ".", "startElement", "(", "'field'", ",", "{", "'name'", ":", "field", ".", "name", ",", "'type'", ":", "field", ".", "get_internal_type", "(", ")", ",", "}", ")", "# Get a \"string version\" of the object's data.", "if", "getattr", "(", "obj", ",", "field", ".", "name", ")", "is", "not", "None", ":", "value", "=", "field", ".", "value_to_string", "(", "obj", ")", "if", "field", ".", "get_internal_type", "(", ")", "==", "'JSONField'", ":", "# Dump value since JSONField.value_to_string() doesn't output", "# strings.", "value", "=", "json", ".", "dumps", "(", "value", ",", "cls", "=", "field", ".", "encoder", ")", "try", ":", "self", ".", "xml", ".", "characters", "(", "value", ")", "except", "UnserializableContentError", ":", "raise", "ValueError", "(", "\"%s.%s (pk:%s) contains unserializable characters\"", "%", "(", "obj", ".", "__class__", ".", "__name__", ",", "field", ".", "name", ",", "obj", ".", "pk", ")", ")", "else", ":", "self", ".", "xml", ".", "addQuickElement", "(", "\"None\"", ")", "self", ".", "xml", ".", "endElement", "(", "\"field\"", ")" ]
[ 64, 4 ]
[ 90, 36 ]
python
en
['en', 'error', 'th']
False
Serializer.handle_fk_field
(self, obj, field)
Handle a ForeignKey (they need to be treated slightly differently from regular fields).
Handle a ForeignKey (they need to be treated slightly differently from regular fields).
def handle_fk_field(self, obj, field): """ Handle a ForeignKey (they need to be treated slightly differently from regular fields). """ self._start_relational_field(field) related_att = getattr(obj, field.get_attname()) if related_att is not None: if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): related = getattr(obj, field.name) # If related object has a natural key, use it related = related.natural_key() # Iterable natural keys are rolled out as subelements for key_value in related: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") else: self.xml.characters(str(related_att)) else: self.xml.addQuickElement("None") self.xml.endElement("field")
[ "def", "handle_fk_field", "(", "self", ",", "obj", ",", "field", ")", ":", "self", ".", "_start_relational_field", "(", "field", ")", "related_att", "=", "getattr", "(", "obj", ",", "field", ".", "get_attname", "(", ")", ")", "if", "related_att", "is", "not", "None", ":", "if", "self", ".", "use_natural_foreign_keys", "and", "hasattr", "(", "field", ".", "remote_field", ".", "model", ",", "'natural_key'", ")", ":", "related", "=", "getattr", "(", "obj", ",", "field", ".", "name", ")", "# If related object has a natural key, use it", "related", "=", "related", ".", "natural_key", "(", ")", "# Iterable natural keys are rolled out as subelements", "for", "key_value", "in", "related", ":", "self", ".", "xml", ".", "startElement", "(", "\"natural\"", ",", "{", "}", ")", "self", ".", "xml", ".", "characters", "(", "str", "(", "key_value", ")", ")", "self", ".", "xml", ".", "endElement", "(", "\"natural\"", ")", "else", ":", "self", ".", "xml", ".", "characters", "(", "str", "(", "related_att", ")", ")", "else", ":", "self", ".", "xml", ".", "addQuickElement", "(", "\"None\"", ")", "self", ".", "xml", ".", "endElement", "(", "\"field\"", ")" ]
[ 92, 4 ]
[ 113, 36 ]
python
en
['en', 'error', 'th']
False
Serializer.handle_m2m_field
(self, obj, field)
Handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation).
Handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation).
def handle_m2m_field(self, obj, field): """ Handle a ManyToManyField. Related objects are only serialized as references to the object's PK (i.e. the related *data* is not dumped, just the relation). """ if field.remote_field.through._meta.auto_created: self._start_relational_field(field) if self.use_natural_foreign_keys and hasattr(field.remote_field.model, 'natural_key'): # If the objects in the m2m have a natural key, use it def handle_m2m(value): natural = value.natural_key() # Iterable natural keys are rolled out as subelements self.xml.startElement("object", {}) for key_value in natural: self.xml.startElement("natural", {}) self.xml.characters(str(key_value)) self.xml.endElement("natural") self.xml.endElement("object") else: def handle_m2m(value): self.xml.addQuickElement("object", attrs={ 'pk': str(value.pk) }) m2m_iter = getattr(obj, '_prefetched_objects_cache', {}).get( field.name, getattr(obj, field.name).iterator(), ) for relobj in m2m_iter: handle_m2m(relobj) self.xml.endElement("field")
[ "def", "handle_m2m_field", "(", "self", ",", "obj", ",", "field", ")", ":", "if", "field", ".", "remote_field", ".", "through", ".", "_meta", ".", "auto_created", ":", "self", ".", "_start_relational_field", "(", "field", ")", "if", "self", ".", "use_natural_foreign_keys", "and", "hasattr", "(", "field", ".", "remote_field", ".", "model", ",", "'natural_key'", ")", ":", "# If the objects in the m2m have a natural key, use it", "def", "handle_m2m", "(", "value", ")", ":", "natural", "=", "value", ".", "natural_key", "(", ")", "# Iterable natural keys are rolled out as subelements", "self", ".", "xml", ".", "startElement", "(", "\"object\"", ",", "{", "}", ")", "for", "key_value", "in", "natural", ":", "self", ".", "xml", ".", "startElement", "(", "\"natural\"", ",", "{", "}", ")", "self", ".", "xml", ".", "characters", "(", "str", "(", "key_value", ")", ")", "self", ".", "xml", ".", "endElement", "(", "\"natural\"", ")", "self", ".", "xml", ".", "endElement", "(", "\"object\"", ")", "else", ":", "def", "handle_m2m", "(", "value", ")", ":", "self", ".", "xml", ".", "addQuickElement", "(", "\"object\"", ",", "attrs", "=", "{", "'pk'", ":", "str", "(", "value", ".", "pk", ")", "}", ")", "m2m_iter", "=", "getattr", "(", "obj", ",", "'_prefetched_objects_cache'", ",", "{", "}", ")", ".", "get", "(", "field", ".", "name", ",", "getattr", "(", "obj", ",", "field", ".", "name", ")", ".", "iterator", "(", ")", ",", ")", "for", "relobj", "in", "m2m_iter", ":", "handle_m2m", "(", "relobj", ")", "self", ".", "xml", ".", "endElement", "(", "\"field\"", ")" ]
[ 115, 4 ]
[ 146, 40 ]
python
en
['en', 'error', 'th']
False
Serializer._start_relational_field
(self, field)
Output the <field> element for relational fields.
Output the <field> element for relational fields.
def _start_relational_field(self, field): """Output the <field> element for relational fields.""" self.indent(2) self.xml.startElement('field', { 'name': field.name, 'rel': field.remote_field.__class__.__name__, 'to': str(field.remote_field.model._meta), })
[ "def", "_start_relational_field", "(", "self", ",", "field", ")", ":", "self", ".", "indent", "(", "2", ")", "self", ".", "xml", ".", "startElement", "(", "'field'", ",", "{", "'name'", ":", "field", ".", "name", ",", "'rel'", ":", "field", ".", "remote_field", ".", "__class__", ".", "__name__", ",", "'to'", ":", "str", "(", "field", ".", "remote_field", ".", "model", ".", "_meta", ")", ",", "}", ")" ]
[ 148, 4 ]
[ 155, 10 ]
python
en
['en', 'en', 'en']
True
Deserializer._make_parser
(self)
Create a hardened XML parser (no custom/external entities).
Create a hardened XML parser (no custom/external entities).
def _make_parser(self): """Create a hardened XML parser (no custom/external entities).""" return DefusedExpatParser()
[ "def", "_make_parser", "(", "self", ")", ":", "return", "DefusedExpatParser", "(", ")" ]
[ 168, 4 ]
[ 170, 35 ]
python
en
['en', 'af', 'en']
True
Deserializer._handle_object
(self, node)
Convert an <object> node to a DeserializedObject.
Convert an <object> node to a DeserializedObject.
def _handle_object(self, node): """Convert an <object> node to a DeserializedObject.""" # Look up the model using the model loading mechanism. If this fails, # bail. Model = self._get_model_from_node(node, "model") # Start building a data dictionary from the object. data = {} if node.hasAttribute('pk'): data[Model._meta.pk.attname] = Model._meta.pk.to_python( node.getAttribute('pk')) # Also start building a dict of m2m data (this is saved as # {m2m_accessor_attribute : [list_of_related_objects]}) m2m_data = {} deferred_fields = {} field_names = {f.name for f in Model._meta.get_fields()} # Deserialize each field. for field_node in node.getElementsByTagName("field"): # If the field is missing the name attribute, bail (are you # sensing a pattern here?) field_name = field_node.getAttribute("name") if not field_name: raise base.DeserializationError("<field> node is missing the 'name' attribute") # Get the field from the Model. This will raise a # FieldDoesNotExist if, well, the field doesn't exist, which will # be propagated correctly unless ignorenonexistent=True is used. if self.ignore and field_name not in field_names: continue field = Model._meta.get_field(field_name) # As is usually the case, relation fields get the special treatment. if field.remote_field and isinstance(field.remote_field, models.ManyToManyRel): value = self._handle_m2m_field_node(field_node, field) if value == base.DEFER_FIELD: deferred_fields[field] = [ [ getInnerText(nat_node).strip() for nat_node in obj_node.getElementsByTagName('natural') ] for obj_node in field_node.getElementsByTagName('object') ] else: m2m_data[field.name] = value elif field.remote_field and isinstance(field.remote_field, models.ManyToOneRel): value = self._handle_fk_field_node(field_node, field) if value == base.DEFER_FIELD: deferred_fields[field] = [ getInnerText(k).strip() for k in field_node.getElementsByTagName('natural') ] else: data[field.attname] = value else: if field_node.getElementsByTagName('None'): value = None else: value = field.to_python(getInnerText(field_node).strip()) # Load value since JSONField.to_python() outputs strings. if field.get_internal_type() == 'JSONField': value = json.loads(value, cls=field.decoder) data[field.name] = value obj = base.build_instance(Model, data, self.db) # Return a DeserializedObject so that the m2m data has a place to live. return base.DeserializedObject(obj, m2m_data, deferred_fields)
[ "def", "_handle_object", "(", "self", ",", "node", ")", ":", "# Look up the model using the model loading mechanism. If this fails,", "# bail.", "Model", "=", "self", ".", "_get_model_from_node", "(", "node", ",", "\"model\"", ")", "# Start building a data dictionary from the object.", "data", "=", "{", "}", "if", "node", ".", "hasAttribute", "(", "'pk'", ")", ":", "data", "[", "Model", ".", "_meta", ".", "pk", ".", "attname", "]", "=", "Model", ".", "_meta", ".", "pk", ".", "to_python", "(", "node", ".", "getAttribute", "(", "'pk'", ")", ")", "# Also start building a dict of m2m data (this is saved as", "# {m2m_accessor_attribute : [list_of_related_objects]})", "m2m_data", "=", "{", "}", "deferred_fields", "=", "{", "}", "field_names", "=", "{", "f", ".", "name", "for", "f", "in", "Model", ".", "_meta", ".", "get_fields", "(", ")", "}", "# Deserialize each field.", "for", "field_node", "in", "node", ".", "getElementsByTagName", "(", "\"field\"", ")", ":", "# If the field is missing the name attribute, bail (are you", "# sensing a pattern here?)", "field_name", "=", "field_node", ".", "getAttribute", "(", "\"name\"", ")", "if", "not", "field_name", ":", "raise", "base", ".", "DeserializationError", "(", "\"<field> node is missing the 'name' attribute\"", ")", "# Get the field from the Model. This will raise a", "# FieldDoesNotExist if, well, the field doesn't exist, which will", "# be propagated correctly unless ignorenonexistent=True is used.", "if", "self", ".", "ignore", "and", "field_name", "not", "in", "field_names", ":", "continue", "field", "=", "Model", ".", "_meta", ".", "get_field", "(", "field_name", ")", "# As is usually the case, relation fields get the special treatment.", "if", "field", ".", "remote_field", "and", "isinstance", "(", "field", ".", "remote_field", ",", "models", ".", "ManyToManyRel", ")", ":", "value", "=", "self", ".", "_handle_m2m_field_node", "(", "field_node", ",", "field", ")", "if", "value", "==", "base", ".", "DEFER_FIELD", ":", "deferred_fields", "[", "field", "]", "=", "[", "[", "getInnerText", "(", "nat_node", ")", ".", "strip", "(", ")", "for", "nat_node", "in", "obj_node", ".", "getElementsByTagName", "(", "'natural'", ")", "]", "for", "obj_node", "in", "field_node", ".", "getElementsByTagName", "(", "'object'", ")", "]", "else", ":", "m2m_data", "[", "field", ".", "name", "]", "=", "value", "elif", "field", ".", "remote_field", "and", "isinstance", "(", "field", ".", "remote_field", ",", "models", ".", "ManyToOneRel", ")", ":", "value", "=", "self", ".", "_handle_fk_field_node", "(", "field_node", ",", "field", ")", "if", "value", "==", "base", ".", "DEFER_FIELD", ":", "deferred_fields", "[", "field", "]", "=", "[", "getInnerText", "(", "k", ")", ".", "strip", "(", ")", "for", "k", "in", "field_node", ".", "getElementsByTagName", "(", "'natural'", ")", "]", "else", ":", "data", "[", "field", ".", "attname", "]", "=", "value", "else", ":", "if", "field_node", ".", "getElementsByTagName", "(", "'None'", ")", ":", "value", "=", "None", "else", ":", "value", "=", "field", ".", "to_python", "(", "getInnerText", "(", "field_node", ")", ".", "strip", "(", ")", ")", "# Load value since JSONField.to_python() outputs strings.", "if", "field", ".", "get_internal_type", "(", ")", "==", "'JSONField'", ":", "value", "=", "json", ".", "loads", "(", "value", ",", "cls", "=", "field", ".", "decoder", ")", "data", "[", "field", ".", "name", "]", "=", "value", "obj", "=", "base", ".", "build_instance", "(", "Model", ",", "data", ",", "self", ".", "db", ")", "# Return a DeserializedObject so that the m2m data has a place to live.", "return", "base", ".", "DeserializedObject", "(", "obj", ",", "m2m_data", ",", "deferred_fields", ")" ]
[ 179, 4 ]
[ 247, 70 ]
python
en
['en', 'en', 'en']
True
Deserializer._handle_fk_field_node
(self, node, field)
Handle a <field> node for a ForeignKey
Handle a <field> node for a ForeignKey
def _handle_fk_field_node(self, node, field): """ Handle a <field> node for a ForeignKey """ # Check if there is a child node named 'None', returning None if so. if node.getElementsByTagName('None'): return None else: model = field.remote_field.model if hasattr(model._default_manager, 'get_by_natural_key'): keys = node.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] try: obj = model._default_manager.db_manager(self.db).get_by_natural_key(*field_value) except ObjectDoesNotExist: if self.handle_forward_references: return base.DEFER_FIELD else: raise obj_pk = getattr(obj, field.remote_field.field_name) # If this is a natural foreign key to an object that # has a FK/O2O as the foreign key, use the FK value if field.remote_field.model._meta.pk.remote_field: obj_pk = obj_pk.pk else: # Otherwise, treat like a normal PK field_value = getInnerText(node).strip() obj_pk = model._meta.get_field(field.remote_field.field_name).to_python(field_value) return obj_pk else: field_value = getInnerText(node).strip() return model._meta.get_field(field.remote_field.field_name).to_python(field_value)
[ "def", "_handle_fk_field_node", "(", "self", ",", "node", ",", "field", ")", ":", "# Check if there is a child node named 'None', returning None if so.", "if", "node", ".", "getElementsByTagName", "(", "'None'", ")", ":", "return", "None", "else", ":", "model", "=", "field", ".", "remote_field", ".", "model", "if", "hasattr", "(", "model", ".", "_default_manager", ",", "'get_by_natural_key'", ")", ":", "keys", "=", "node", ".", "getElementsByTagName", "(", "'natural'", ")", "if", "keys", ":", "# If there are 'natural' subelements, it must be a natural key", "field_value", "=", "[", "getInnerText", "(", "k", ")", ".", "strip", "(", ")", "for", "k", "in", "keys", "]", "try", ":", "obj", "=", "model", ".", "_default_manager", ".", "db_manager", "(", "self", ".", "db", ")", ".", "get_by_natural_key", "(", "*", "field_value", ")", "except", "ObjectDoesNotExist", ":", "if", "self", ".", "handle_forward_references", ":", "return", "base", ".", "DEFER_FIELD", "else", ":", "raise", "obj_pk", "=", "getattr", "(", "obj", ",", "field", ".", "remote_field", ".", "field_name", ")", "# If this is a natural foreign key to an object that", "# has a FK/O2O as the foreign key, use the FK value", "if", "field", ".", "remote_field", ".", "model", ".", "_meta", ".", "pk", ".", "remote_field", ":", "obj_pk", "=", "obj_pk", ".", "pk", "else", ":", "# Otherwise, treat like a normal PK", "field_value", "=", "getInnerText", "(", "node", ")", ".", "strip", "(", ")", "obj_pk", "=", "model", ".", "_meta", ".", "get_field", "(", "field", ".", "remote_field", ".", "field_name", ")", ".", "to_python", "(", "field_value", ")", "return", "obj_pk", "else", ":", "field_value", "=", "getInnerText", "(", "node", ")", ".", "strip", "(", ")", "return", "model", ".", "_meta", ".", "get_field", "(", "field", ".", "remote_field", ".", "field_name", ")", ".", "to_python", "(", "field_value", ")" ]
[ 249, 4 ]
[ 282, 98 ]
python
en
['en', 'error', 'th']
False
Deserializer._handle_m2m_field_node
(self, node, field)
Handle a <field> node for a ManyToManyField.
Handle a <field> node for a ManyToManyField.
def _handle_m2m_field_node(self, node, field): """ Handle a <field> node for a ManyToManyField. """ model = field.remote_field.model default_manager = model._default_manager if hasattr(default_manager, 'get_by_natural_key'): def m2m_convert(n): keys = n.getElementsByTagName('natural') if keys: # If there are 'natural' subelements, it must be a natural key field_value = [getInnerText(k).strip() for k in keys] obj_pk = default_manager.db_manager(self.db).get_by_natural_key(*field_value).pk else: # Otherwise, treat like a normal PK value. obj_pk = model._meta.pk.to_python(n.getAttribute('pk')) return obj_pk else: def m2m_convert(n): return model._meta.pk.to_python(n.getAttribute('pk')) values = [] try: for c in node.getElementsByTagName('object'): values.append(m2m_convert(c)) except Exception as e: if isinstance(e, ObjectDoesNotExist) and self.handle_forward_references: return base.DEFER_FIELD else: raise base.M2MDeserializationError(e, c) else: return values
[ "def", "_handle_m2m_field_node", "(", "self", ",", "node", ",", "field", ")", ":", "model", "=", "field", ".", "remote_field", ".", "model", "default_manager", "=", "model", ".", "_default_manager", "if", "hasattr", "(", "default_manager", ",", "'get_by_natural_key'", ")", ":", "def", "m2m_convert", "(", "n", ")", ":", "keys", "=", "n", ".", "getElementsByTagName", "(", "'natural'", ")", "if", "keys", ":", "# If there are 'natural' subelements, it must be a natural key", "field_value", "=", "[", "getInnerText", "(", "k", ")", ".", "strip", "(", ")", "for", "k", "in", "keys", "]", "obj_pk", "=", "default_manager", ".", "db_manager", "(", "self", ".", "db", ")", ".", "get_by_natural_key", "(", "*", "field_value", ")", ".", "pk", "else", ":", "# Otherwise, treat like a normal PK value.", "obj_pk", "=", "model", ".", "_meta", ".", "pk", ".", "to_python", "(", "n", ".", "getAttribute", "(", "'pk'", ")", ")", "return", "obj_pk", "else", ":", "def", "m2m_convert", "(", "n", ")", ":", "return", "model", ".", "_meta", ".", "pk", ".", "to_python", "(", "n", ".", "getAttribute", "(", "'pk'", ")", ")", "values", "=", "[", "]", "try", ":", "for", "c", "in", "node", ".", "getElementsByTagName", "(", "'object'", ")", ":", "values", ".", "append", "(", "m2m_convert", "(", "c", ")", ")", "except", "Exception", "as", "e", ":", "if", "isinstance", "(", "e", ",", "ObjectDoesNotExist", ")", "and", "self", ".", "handle_forward_references", ":", "return", "base", ".", "DEFER_FIELD", "else", ":", "raise", "base", ".", "M2MDeserializationError", "(", "e", ",", "c", ")", "else", ":", "return", "values" ]
[ 284, 4 ]
[ 314, 25 ]
python
en
['en', 'error', 'th']
False
Deserializer._get_model_from_node
(self, node, attr)
Look up a model from a <object model=...> or a <field rel=... to=...> node.
Look up a model from a <object model=...> or a <field rel=... to=...> node.
def _get_model_from_node(self, node, attr): """ Look up a model from a <object model=...> or a <field rel=... to=...> node. """ model_identifier = node.getAttribute(attr) if not model_identifier: raise base.DeserializationError( "<%s> node is missing the required '%s' attribute" % (node.nodeName, attr)) try: return apps.get_model(model_identifier) except (LookupError, TypeError): raise base.DeserializationError( "<%s> node has invalid model identifier: '%s'" % (node.nodeName, model_identifier))
[ "def", "_get_model_from_node", "(", "self", ",", "node", ",", "attr", ")", ":", "model_identifier", "=", "node", ".", "getAttribute", "(", "attr", ")", "if", "not", "model_identifier", ":", "raise", "base", ".", "DeserializationError", "(", "\"<%s> node is missing the required '%s' attribute\"", "%", "(", "node", ".", "nodeName", ",", "attr", ")", ")", "try", ":", "return", "apps", ".", "get_model", "(", "model_identifier", ")", "except", "(", "LookupError", ",", "TypeError", ")", ":", "raise", "base", ".", "DeserializationError", "(", "\"<%s> node has invalid model identifier: '%s'\"", "%", "(", "node", ".", "nodeName", ",", "model_identifier", ")", ")" ]
[ 316, 4 ]
[ 331, 52 ]
python
en
['en', 'error', 'th']
False
Git.get_current_branch
(cls, location)
Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD).
Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD).
def get_current_branch(cls, location): # type: (str) -> Optional[str] """ Return the current branch, or None if HEAD isn't at a branch (e.g. detached HEAD). """ # git-symbolic-ref exits with empty stdout if "HEAD" is a detached # HEAD rather than a symbolic ref. In addition, the -q causes the # command to exit with status code 1 instead of 128 in this case # and to suppress the message to stderr. args = ['symbolic-ref', '-q', 'HEAD'] output = cls.run_command( args, extra_ok_returncodes=(1, ), show_stdout=False, stdout_only=True, cwd=location, ) ref = output.strip() if ref.startswith('refs/heads/'): return ref[len('refs/heads/'):] return None
[ "def", "get_current_branch", "(", "cls", ",", "location", ")", ":", "# type: (str) -> Optional[str]", "# git-symbolic-ref exits with empty stdout if \"HEAD\" is a detached", "# HEAD rather than a symbolic ref. In addition, the -q causes the", "# command to exit with status code 1 instead of 128 in this case", "# and to suppress the message to stderr.", "args", "=", "[", "'symbolic-ref'", ",", "'-q'", ",", "'HEAD'", "]", "output", "=", "cls", ".", "run_command", "(", "args", ",", "extra_ok_returncodes", "=", "(", "1", ",", ")", ",", "show_stdout", "=", "False", ",", "stdout_only", "=", "True", ",", "cwd", "=", "location", ",", ")", "ref", "=", "output", ".", "strip", "(", ")", "if", "ref", ".", "startswith", "(", "'refs/heads/'", ")", ":", "return", "ref", "[", "len", "(", "'refs/heads/'", ")", ":", "]", "return", "None" ]
[ 100, 4 ]
[ 123, 19 ]
python
en
['en', 'error', 'th']
False
Git.get_revision_sha
(cls, dest, rev)
Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. Args: dest: the repository directory. rev: the revision name.
Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None.
def get_revision_sha(cls, dest, rev): # type: (str, str) -> Tuple[Optional[str], bool] """ Return (sha_or_none, is_branch), where sha_or_none is a commit hash if the revision names a remote branch or tag, otherwise None. Args: dest: the repository directory. rev: the revision name. """ # Pass rev to pre-filter the list. output = cls.run_command( ['show-ref', rev], cwd=dest, show_stdout=False, stdout_only=True, on_returncode='ignore', ) refs = {} # NOTE: We do not use splitlines here since that would split on other # unicode separators, which can be maliciously used to install a # different revision. for line in output.strip().split("\n"): line = line.rstrip("\r") if not line: continue try: ref_sha, ref_name = line.split(" ", maxsplit=2) except ValueError: # Include the offending line to simplify troubleshooting if # this error ever occurs. raise ValueError(f'unexpected show-ref line: {line!r}') refs[ref_name] = ref_sha branch_ref = f'refs/remotes/origin/{rev}' tag_ref = f'refs/tags/{rev}' sha = refs.get(branch_ref) if sha is not None: return (sha, True) sha = refs.get(tag_ref) return (sha, False)
[ "def", "get_revision_sha", "(", "cls", ",", "dest", ",", "rev", ")", ":", "# type: (str, str) -> Tuple[Optional[str], bool]", "# Pass rev to pre-filter the list.", "output", "=", "cls", ".", "run_command", "(", "[", "'show-ref'", ",", "rev", "]", ",", "cwd", "=", "dest", ",", "show_stdout", "=", "False", ",", "stdout_only", "=", "True", ",", "on_returncode", "=", "'ignore'", ",", ")", "refs", "=", "{", "}", "# NOTE: We do not use splitlines here since that would split on other", "# unicode separators, which can be maliciously used to install a", "# different revision.", "for", "line", "in", "output", ".", "strip", "(", ")", ".", "split", "(", "\"\\n\"", ")", ":", "line", "=", "line", ".", "rstrip", "(", "\"\\r\"", ")", "if", "not", "line", ":", "continue", "try", ":", "ref_sha", ",", "ref_name", "=", "line", ".", "split", "(", "\" \"", ",", "maxsplit", "=", "2", ")", "except", "ValueError", ":", "# Include the offending line to simplify troubleshooting if", "# this error ever occurs.", "raise", "ValueError", "(", "f'unexpected show-ref line: {line!r}'", ")", "refs", "[", "ref_name", "]", "=", "ref_sha", "branch_ref", "=", "f'refs/remotes/origin/{rev}'", "tag_ref", "=", "f'refs/tags/{rev}'", "sha", "=", "refs", ".", "get", "(", "branch_ref", ")", "if", "sha", "is", "not", "None", ":", "return", "(", "sha", ",", "True", ")", "sha", "=", "refs", ".", "get", "(", "tag_ref", ")", "return", "(", "sha", ",", "False", ")" ]
[ 126, 4 ]
[ 170, 27 ]
python
en
['en', 'error', 'th']
False
Git._should_fetch
(cls, dest, rev)
Return true if rev is a ref or is a commit that we don't have locally. Branches and tags are not considered in this method because they are assumed to be always available locally (which is a normal outcome of ``git clone`` and ``git fetch --tags``).
Return true if rev is a ref or is a commit that we don't have locally.
def _should_fetch(cls, dest, rev): # type: (str, str) -> bool """ Return true if rev is a ref or is a commit that we don't have locally. Branches and tags are not considered in this method because they are assumed to be always available locally (which is a normal outcome of ``git clone`` and ``git fetch --tags``). """ if rev.startswith("refs/"): # Always fetch remote refs. return True if not looks_like_hash(rev): # Git fetch would fail with abbreviated commits. return False if cls.has_commit(dest, rev): # Don't fetch if we have the commit locally. return False return True
[ "def", "_should_fetch", "(", "cls", ",", "dest", ",", "rev", ")", ":", "# type: (str, str) -> bool", "if", "rev", ".", "startswith", "(", "\"refs/\"", ")", ":", "# Always fetch remote refs.", "return", "True", "if", "not", "looks_like_hash", "(", "rev", ")", ":", "# Git fetch would fail with abbreviated commits.", "return", "False", "if", "cls", ".", "has_commit", "(", "dest", ",", "rev", ")", ":", "# Don't fetch if we have the commit locally.", "return", "False", "return", "True" ]
[ 173, 4 ]
[ 194, 19 ]
python
en
['en', 'error', 'th']
False
Git.resolve_revision
(cls, dest, url, rev_options)
Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. Args: rev_options: a RevOptions object.
Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found.
def resolve_revision(cls, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> RevOptions """ Resolve a revision to a new RevOptions object with the SHA1 of the branch, tag, or ref if found. Args: rev_options: a RevOptions object. """ rev = rev_options.arg_rev # The arg_rev property's implementation for Git ensures that the # rev return value is always non-None. assert rev is not None sha, is_branch = cls.get_revision_sha(dest, rev) if sha is not None: rev_options = rev_options.make_new(sha) rev_options.branch_name = rev if is_branch else None return rev_options # Do not show a warning for the common case of something that has # the form of a Git commit hash. if not looks_like_hash(rev): logger.warning( "Did not find branch or tag '%s', assuming revision or ref.", rev, ) if not cls._should_fetch(dest, rev): return rev_options # fetch the requested revision cls.run_command( make_command('fetch', '-q', url, rev_options.to_args()), cwd=dest, ) # Change the revision to the SHA of the ref we fetched sha = cls.get_revision(dest, rev='FETCH_HEAD') rev_options = rev_options.make_new(sha) return rev_options
[ "def", "resolve_revision", "(", "cls", ",", "dest", ",", "url", ",", "rev_options", ")", ":", "# type: (str, HiddenText, RevOptions) -> RevOptions", "rev", "=", "rev_options", ".", "arg_rev", "# The arg_rev property's implementation for Git ensures that the", "# rev return value is always non-None.", "assert", "rev", "is", "not", "None", "sha", ",", "is_branch", "=", "cls", ".", "get_revision_sha", "(", "dest", ",", "rev", ")", "if", "sha", "is", "not", "None", ":", "rev_options", "=", "rev_options", ".", "make_new", "(", "sha", ")", "rev_options", ".", "branch_name", "=", "rev", "if", "is_branch", "else", "None", "return", "rev_options", "# Do not show a warning for the common case of something that has", "# the form of a Git commit hash.", "if", "not", "looks_like_hash", "(", "rev", ")", ":", "logger", ".", "warning", "(", "\"Did not find branch or tag '%s', assuming revision or ref.\"", ",", "rev", ",", ")", "if", "not", "cls", ".", "_should_fetch", "(", "dest", ",", "rev", ")", ":", "return", "rev_options", "# fetch the requested revision", "cls", ".", "run_command", "(", "make_command", "(", "'fetch'", ",", "'-q'", ",", "url", ",", "rev_options", ".", "to_args", "(", ")", ")", ",", "cwd", "=", "dest", ",", ")", "# Change the revision to the SHA of the ref we fetched", "sha", "=", "cls", ".", "get_revision", "(", "dest", ",", "rev", "=", "'FETCH_HEAD'", ")", "rev_options", "=", "rev_options", ".", "make_new", "(", "sha", ")", "return", "rev_options" ]
[ 197, 4 ]
[ 239, 26 ]
python
en
['en', 'error', 'th']
False
Git.is_commit_id_equal
(cls, dest, name)
Return whether the current commit hash equals the given name. Args: dest: the repository directory. name: a string name.
Return whether the current commit hash equals the given name.
def is_commit_id_equal(cls, dest, name): # type: (str, Optional[str]) -> bool """ Return whether the current commit hash equals the given name. Args: dest: the repository directory. name: a string name. """ if not name: # Then avoid an unnecessary subprocess call. return False return cls.get_revision(dest) == name
[ "def", "is_commit_id_equal", "(", "cls", ",", "dest", ",", "name", ")", ":", "# type: (str, Optional[str]) -> bool", "if", "not", "name", ":", "# Then avoid an unnecessary subprocess call.", "return", "False", "return", "cls", ".", "get_revision", "(", "dest", ")", "==", "name" ]
[ 242, 4 ]
[ 255, 45 ]
python
en
['en', 'error', 'th']
False
Git.get_remote_url
(cls, location)
Return URL of the first remote encountered. Raises RemoteNotFoundError if the repository does not have a remote url configured.
Return URL of the first remote encountered.
def get_remote_url(cls, location): # type: (str) -> str """ Return URL of the first remote encountered. Raises RemoteNotFoundError if the repository does not have a remote url configured. """ # We need to pass 1 for extra_ok_returncodes since the command # exits with return code 1 if there are no matching lines. stdout = cls.run_command( ['config', '--get-regexp', r'remote\..*\.url'], extra_ok_returncodes=(1, ), show_stdout=False, stdout_only=True, cwd=location, ) remotes = stdout.splitlines() try: found_remote = remotes[0] except IndexError: raise RemoteNotFoundError for remote in remotes: if remote.startswith('remote.origin.url '): found_remote = remote break url = found_remote.split(' ')[1] return cls._git_remote_to_pip_url(url.strip())
[ "def", "get_remote_url", "(", "cls", ",", "location", ")", ":", "# type: (str) -> str", "# We need to pass 1 for extra_ok_returncodes since the command", "# exits with return code 1 if there are no matching lines.", "stdout", "=", "cls", ".", "run_command", "(", "[", "'config'", ",", "'--get-regexp'", ",", "r'remote\\..*\\.url'", "]", ",", "extra_ok_returncodes", "=", "(", "1", ",", ")", ",", "show_stdout", "=", "False", ",", "stdout_only", "=", "True", ",", "cwd", "=", "location", ",", ")", "remotes", "=", "stdout", ".", "splitlines", "(", ")", "try", ":", "found_remote", "=", "remotes", "[", "0", "]", "except", "IndexError", ":", "raise", "RemoteNotFoundError", "for", "remote", "in", "remotes", ":", "if", "remote", ".", "startswith", "(", "'remote.origin.url '", ")", ":", "found_remote", "=", "remote", "break", "url", "=", "found_remote", ".", "split", "(", "' '", ")", "[", "1", "]", "return", "cls", ".", "_git_remote_to_pip_url", "(", "url", ".", "strip", "(", ")", ")" ]
[ 319, 4 ]
[ 347, 54 ]
python
en
['en', 'error', 'th']
False
Git._git_remote_to_pip_url
(url)
Convert a remote url from what git uses to what pip accepts. There are 3 legal forms **url** may take: 1. A fully qualified url: ssh://[email protected]/foo/bar.git 2. A local project.git folder: /path/to/bare/repository.git 3. SCP shorthand for form 1: [email protected]:foo/bar.git Form 1 is output as-is. Form 2 must be converted to URI and form 3 must be converted to form 1. See the corresponding test test_git_remote_url_to_pip() for examples of sample inputs/outputs.
Convert a remote url from what git uses to what pip accepts.
def _git_remote_to_pip_url(url): # type: (str) -> str """ Convert a remote url from what git uses to what pip accepts. There are 3 legal forms **url** may take: 1. A fully qualified url: ssh://[email protected]/foo/bar.git 2. A local project.git folder: /path/to/bare/repository.git 3. SCP shorthand for form 1: [email protected]:foo/bar.git Form 1 is output as-is. Form 2 must be converted to URI and form 3 must be converted to form 1. See the corresponding test test_git_remote_url_to_pip() for examples of sample inputs/outputs. """ if re.match(r"\w+://", url): # This is already valid. Pass it though as-is. return url if os.path.exists(url): # A local bare remote (git clone --mirror). # Needs a file:// prefix. return pathlib.PurePath(url).as_uri() scp_match = SCP_REGEX.match(url) if scp_match: # Add an ssh:// prefix and replace the ':' with a '/'. return scp_match.expand(r"ssh://\1\2/\3") # Otherwise, bail out. raise RemoteNotValidError(url)
[ "def", "_git_remote_to_pip_url", "(", "url", ")", ":", "# type: (str) -> str", "if", "re", ".", "match", "(", "r\"\\w+://\"", ",", "url", ")", ":", "# This is already valid. Pass it though as-is.", "return", "url", "if", "os", ".", "path", ".", "exists", "(", "url", ")", ":", "# A local bare remote (git clone --mirror).", "# Needs a file:// prefix.", "return", "pathlib", ".", "PurePath", "(", "url", ")", ".", "as_uri", "(", ")", "scp_match", "=", "SCP_REGEX", ".", "match", "(", "url", ")", "if", "scp_match", ":", "# Add an ssh:// prefix and replace the ':' with a '/'.", "return", "scp_match", ".", "expand", "(", "r\"ssh://\\1\\2/\\3\"", ")", "# Otherwise, bail out.", "raise", "RemoteNotValidError", "(", "url", ")" ]
[ 350, 4 ]
[ 379, 38 ]
python
en
['en', 'error', 'th']
False
Git.has_commit
(cls, location, rev)
Check if rev is a commit that is available in the local repository.
Check if rev is a commit that is available in the local repository.
def has_commit(cls, location, rev): # type: (str, str) -> bool """ Check if rev is a commit that is available in the local repository. """ try: cls.run_command( ['rev-parse', '-q', '--verify', "sha^" + rev], cwd=location, log_failed_cmd=False, ) except InstallationError: return False else: return True
[ "def", "has_commit", "(", "cls", ",", "location", ",", "rev", ")", ":", "# type: (str, str) -> bool", "try", ":", "cls", ".", "run_command", "(", "[", "'rev-parse'", ",", "'-q'", ",", "'--verify'", ",", "\"sha^\"", "+", "rev", "]", ",", "cwd", "=", "location", ",", "log_failed_cmd", "=", "False", ",", ")", "except", "InstallationError", ":", "return", "False", "else", ":", "return", "True" ]
[ 382, 4 ]
[ 396, 23 ]
python
en
['en', 'error', 'th']
False
Git.get_subdirectory
(cls, location)
Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root.
Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root.
def get_subdirectory(cls, location): # type: (str) -> Optional[str] """ Return the path to Python project root, relative to the repo root. Return None if the project root is in the repo root. """ # find the repo root git_dir = cls.run_command( ['rev-parse', '--git-dir'], show_stdout=False, stdout_only=True, cwd=location, ).strip() if not os.path.isabs(git_dir): git_dir = os.path.join(location, git_dir) repo_root = os.path.abspath(os.path.join(git_dir, '..')) return find_path_to_project_root_from_repo_root(location, repo_root)
[ "def", "get_subdirectory", "(", "cls", ",", "location", ")", ":", "# type: (str) -> Optional[str]", "# find the repo root", "git_dir", "=", "cls", ".", "run_command", "(", "[", "'rev-parse'", ",", "'--git-dir'", "]", ",", "show_stdout", "=", "False", ",", "stdout_only", "=", "True", ",", "cwd", "=", "location", ",", ")", ".", "strip", "(", ")", "if", "not", "os", ".", "path", ".", "isabs", "(", "git_dir", ")", ":", "git_dir", "=", "os", ".", "path", ".", "join", "(", "location", ",", "git_dir", ")", "repo_root", "=", "os", ".", "path", ".", "abspath", "(", "os", ".", "path", ".", "join", "(", "git_dir", ",", "'..'", ")", ")", "return", "find_path_to_project_root_from_repo_root", "(", "location", ",", "repo_root", ")" ]
[ 412, 4 ]
[ 428, 76 ]
python
en
['en', 'error', 'th']
False
Git.get_url_rev_and_auth
(cls, url)
Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't work with a ssh:// scheme (e.g. GitHub). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub.
Prefixes stub URLs like 'user
def get_url_rev_and_auth(cls, url): # type: (str) -> Tuple[str, Optional[str], AuthInfo] """ Prefixes stub URLs like 'user@hostname:user/repo.git' with 'ssh://'. That's required because although they use SSH they sometimes don't work with a ssh:// scheme (e.g. GitHub). But we need a scheme for parsing. Hence we remove it again afterwards and return it as a stub. """ # Works around an apparent Git bug # (see https://article.gmane.org/gmane.comp.version-control.git/146500) scheme, netloc, path, query, fragment = urlsplit(url) if scheme.endswith('file'): initial_slashes = path[:-len(path.lstrip('/'))] newpath = ( initial_slashes + urllib.request.url2pathname(path) .replace('\\', '/').lstrip('/') ) after_plus = scheme.find('+') + 1 url = scheme[:after_plus] + urlunsplit( (scheme[after_plus:], netloc, newpath, query, fragment), ) if '://' not in url: assert 'file:' not in url url = url.replace('git+', 'git+ssh://') url, rev, user_pass = super().get_url_rev_and_auth(url) url = url.replace('ssh://', '') else: url, rev, user_pass = super().get_url_rev_and_auth(url) return url, rev, user_pass
[ "def", "get_url_rev_and_auth", "(", "cls", ",", "url", ")", ":", "# type: (str) -> Tuple[str, Optional[str], AuthInfo]", "# Works around an apparent Git bug", "# (see https://article.gmane.org/gmane.comp.version-control.git/146500)", "scheme", ",", "netloc", ",", "path", ",", "query", ",", "fragment", "=", "urlsplit", "(", "url", ")", "if", "scheme", ".", "endswith", "(", "'file'", ")", ":", "initial_slashes", "=", "path", "[", ":", "-", "len", "(", "path", ".", "lstrip", "(", "'/'", ")", ")", "]", "newpath", "=", "(", "initial_slashes", "+", "urllib", ".", "request", ".", "url2pathname", "(", "path", ")", ".", "replace", "(", "'\\\\'", ",", "'/'", ")", ".", "lstrip", "(", "'/'", ")", ")", "after_plus", "=", "scheme", ".", "find", "(", "'+'", ")", "+", "1", "url", "=", "scheme", "[", ":", "after_plus", "]", "+", "urlunsplit", "(", "(", "scheme", "[", "after_plus", ":", "]", ",", "netloc", ",", "newpath", ",", "query", ",", "fragment", ")", ",", ")", "if", "'://'", "not", "in", "url", ":", "assert", "'file:'", "not", "in", "url", "url", "=", "url", ".", "replace", "(", "'git+'", ",", "'git+ssh://'", ")", "url", ",", "rev", ",", "user_pass", "=", "super", "(", ")", ".", "get_url_rev_and_auth", "(", "url", ")", "url", "=", "url", ".", "replace", "(", "'ssh://'", ",", "''", ")", "else", ":", "url", ",", "rev", ",", "user_pass", "=", "super", "(", ")", ".", "get_url_rev_and_auth", "(", "url", ")", "return", "url", ",", "rev", ",", "user_pass" ]
[ 431, 4 ]
[ 462, 34 ]
python
en
['en', 'error', 'th']
False
Git.should_add_vcs_url_prefix
(repo_url)
In either https or ssh form, requirements must be prefixed with git+.
In either https or ssh form, requirements must be prefixed with git+.
def should_add_vcs_url_prefix(repo_url): # type: (str) -> bool """In either https or ssh form, requirements must be prefixed with git+. """ return True
[ "def", "should_add_vcs_url_prefix", "(", "repo_url", ")", ":", "# type: (str) -> bool", "return", "True" ]
[ 498, 4 ]
[ 502, 19 ]
python
en
['en', 'en', 'en']
True
Marker.evaluate
(self, environment=None)
Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process.
Evaluate a marker.
def evaluate(self, environment=None): """Evaluate a marker. Return the boolean from evaluating the given marker against the environment. environment is an optional argument to override all or part of the determined environment. The environment is determined from the current Python process. """ current_environment = default_environment() if environment is not None: current_environment.update(environment) return _evaluate_markers(self._markers, current_environment)
[ "def", "evaluate", "(", "self", ",", "environment", "=", "None", ")", ":", "current_environment", "=", "default_environment", "(", ")", "if", "environment", "is", "not", "None", ":", "current_environment", ".", "update", "(", "environment", ")", "return", "_evaluate_markers", "(", "self", ".", "_markers", ",", "current_environment", ")" ]
[ 287, 4 ]
[ 300, 68 ]
python
en
['en', 'en', 'en']
True
SessionStore.load
(self)
Load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raise BadSignature if signature fails.
Load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raise BadSignature if signature fails.
def load(self): """ Load the data from the key itself instead of fetching from some external data store. Opposite of _get_session_key(), raise BadSignature if signature fails. """ try: return signing.loads( self.session_key, serializer=self.serializer, # This doesn't handle non-default expiry dates, see #19201 max_age=self.get_session_cookie_age(), salt='django.contrib.sessions.backends.signed_cookies', ) except Exception: # BadSignature, ValueError, or unpickling exceptions. If any of # these happen, reset the session. self.create() return {}
[ "def", "load", "(", "self", ")", ":", "try", ":", "return", "signing", ".", "loads", "(", "self", ".", "session_key", ",", "serializer", "=", "self", ".", "serializer", ",", "# This doesn't handle non-default expiry dates, see #19201", "max_age", "=", "self", ".", "get_session_cookie_age", "(", ")", ",", "salt", "=", "'django.contrib.sessions.backends.signed_cookies'", ",", ")", "except", "Exception", ":", "# BadSignature, ValueError, or unpickling exceptions. If any of", "# these happen, reset the session.", "self", ".", "create", "(", ")", "return", "{", "}" ]
[ 6, 4 ]
[ 24, 17 ]
python
en
['en', 'error', 'th']
False
SessionStore.create
(self)
To create a new key, set the modified flag so that the cookie is set on the client for the current request.
To create a new key, set the modified flag so that the cookie is set on the client for the current request.
def create(self): """ To create a new key, set the modified flag so that the cookie is set on the client for the current request. """ self.modified = True
[ "def", "create", "(", "self", ")", ":", "self", ".", "modified", "=", "True" ]
[ 26, 4 ]
[ 31, 28 ]
python
en
['en', 'error', 'th']
False
SessionStore.save
(self, must_create=False)
To save, get the session key as a securely signed string and then set the modified flag so that the cookie is set on the client for the current request.
To save, get the session key as a securely signed string and then set the modified flag so that the cookie is set on the client for the current request.
def save(self, must_create=False): """ To save, get the session key as a securely signed string and then set the modified flag so that the cookie is set on the client for the current request. """ self._session_key = self._get_session_key() self.modified = True
[ "def", "save", "(", "self", ",", "must_create", "=", "False", ")", ":", "self", ".", "_session_key", "=", "self", ".", "_get_session_key", "(", ")", "self", ".", "modified", "=", "True" ]
[ 33, 4 ]
[ 40, 28 ]
python
en
['en', 'error', 'th']
False
SessionStore.exists
(self, session_key=None)
This method makes sense when you're talking to a shared resource, but it doesn't matter when you're storing the information in the client's cookie.
This method makes sense when you're talking to a shared resource, but it doesn't matter when you're storing the information in the client's cookie.
def exists(self, session_key=None): """ This method makes sense when you're talking to a shared resource, but it doesn't matter when you're storing the information in the client's cookie. """ return False
[ "def", "exists", "(", "self", ",", "session_key", "=", "None", ")", ":", "return", "False" ]
[ 42, 4 ]
[ 48, 20 ]
python
en
['en', 'error', 'th']
False
SessionStore.delete
(self, session_key=None)
To delete, clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request.
To delete, clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request.
def delete(self, session_key=None): """ To delete, clear the session key and the underlying data structure and set the modified flag so that the cookie is set on the client for the current request. """ self._session_key = '' self._session_cache = {} self.modified = True
[ "def", "delete", "(", "self", ",", "session_key", "=", "None", ")", ":", "self", ".", "_session_key", "=", "''", "self", ".", "_session_cache", "=", "{", "}", "self", ".", "modified", "=", "True" ]
[ 50, 4 ]
[ 58, 28 ]
python
en
['en', 'error', 'th']
False
SessionStore.cycle_key
(self)
Keep the same data but with a new key. Call save() and it will automatically save a cookie with a new key at the end of the request.
Keep the same data but with a new key. Call save() and it will automatically save a cookie with a new key at the end of the request.
def cycle_key(self): """ Keep the same data but with a new key. Call save() and it will automatically save a cookie with a new key at the end of the request. """ self.save()
[ "def", "cycle_key", "(", "self", ")", ":", "self", ".", "save", "(", ")" ]
[ 60, 4 ]
[ 65, 19 ]
python
en
['en', 'error', 'th']
False
SessionStore._get_session_key
(self)
Instead of generating a random string, generate a secure url-safe base64-encoded string of data as our session key.
Instead of generating a random string, generate a secure url-safe base64-encoded string of data as our session key.
def _get_session_key(self): """ Instead of generating a random string, generate a secure url-safe base64-encoded string of data as our session key. """ return signing.dumps( self._session, compress=True, salt='django.contrib.sessions.backends.signed_cookies', serializer=self.serializer, )
[ "def", "_get_session_key", "(", "self", ")", ":", "return", "signing", ".", "dumps", "(", "self", ".", "_session", ",", "compress", "=", "True", ",", "salt", "=", "'django.contrib.sessions.backends.signed_cookies'", ",", "serializer", "=", "self", ".", "serializer", ",", ")" ]
[ 67, 4 ]
[ 76, 9 ]
python
en
['en', 'error', 'th']
False
resolve_relation
(scope_model, relation)
Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged.
Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model.
def resolve_relation(scope_model, relation): """ Transform relation into a model or fully-qualified model string of the form "app_label.ModelName", relative to scope_model. The relation argument can be: * RECURSIVE_RELATIONSHIP_CONSTANT, i.e. the string "self", in which case the model argument will be returned. * A bare model name without an app_label, in which case scope_model's app_label will be prepended. * An "app_label.ModelName" string. * A model class, which will be returned unchanged. """ # Check for recursive relations if relation == RECURSIVE_RELATIONSHIP_CONSTANT: relation = scope_model # Look for an "app.Model" relation if isinstance(relation, str): if "." not in relation: relation = "%s.%s" % (scope_model._meta.app_label, relation) return relation
[ "def", "resolve_relation", "(", "scope_model", ",", "relation", ")", ":", "# Check for recursive relations", "if", "relation", "==", "RECURSIVE_RELATIONSHIP_CONSTANT", ":", "relation", "=", "scope_model", "# Look for an \"app.Model\" relation", "if", "isinstance", "(", "relation", ",", "str", ")", ":", "if", "\".\"", "not", "in", "relation", ":", "relation", "=", "\"%s.%s\"", "%", "(", "scope_model", ".", "_meta", ".", "app_label", ",", "relation", ")", "return", "relation" ]
[ 36, 0 ]
[ 58, 19 ]
python
en
['en', 'error', 'th']
False
lazy_related_operation
(function, model, *related_models, **kwargs)
Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`.
Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments.
def lazy_related_operation(function, model, *related_models, **kwargs): """ Schedule `function` to be called once `model` and all `related_models` have been imported and registered with the app registry. `function` will be called with the newly-loaded model classes as its positional arguments, plus any optional keyword arguments. The `model` argument must be a model class. Each subsequent positional argument is another model, or a reference to another model - see `resolve_relation()` for the various forms these may take. Any relative references will be resolved relative to `model`. This is a convenience wrapper for `Apps.lazy_model_operation` - the app registry model used is the one found in `model._meta.apps`. """ models = [model] + [resolve_relation(model, rel) for rel in related_models] model_keys = (make_model_tuple(m) for m in models) apps = model._meta.apps return apps.lazy_model_operation(partial(function, **kwargs), *model_keys)
[ "def", "lazy_related_operation", "(", "function", ",", "model", ",", "*", "related_models", ",", "*", "*", "kwargs", ")", ":", "models", "=", "[", "model", "]", "+", "[", "resolve_relation", "(", "model", ",", "rel", ")", "for", "rel", "in", "related_models", "]", "model_keys", "=", "(", "make_model_tuple", "(", "m", ")", "for", "m", "in", "models", ")", "apps", "=", "model", ".", "_meta", ".", "apps", "return", "apps", ".", "lazy_model_operation", "(", "partial", "(", "function", ",", "*", "*", "kwargs", ")", ",", "*", "model_keys", ")" ]
[ 61, 0 ]
[ 79, 78 ]
python
en
['en', 'error', 'th']
False
RelatedField._check_clashes
(self)
Check accessor and reverse query name clashes.
Check accessor and reverse query name clashes.
def _check_clashes(self): """Check accessor and reverse query name clashes.""" from django.db.models.base import ModelBase errors = [] opts = self.model._meta # `f.remote_field.model` may be a string instead of a model. Skip if model name is # not resolved. if not isinstance(self.remote_field.model, ModelBase): return [] # Consider that we are checking field `Model.foreign` and the models # are: # # class Target(models.Model): # model = models.IntegerField() # model_set = models.IntegerField() # # class Model(models.Model): # foreign = models.ForeignKey(Target) # m2m = models.ManyToManyField(Target) # rel_opts.object_name == "Target" rel_opts = self.remote_field.model._meta # If the field doesn't install a backward relation on the target model # (so `is_hidden` returns True), then there are no clashes to check # and we can skip these fields. rel_is_hidden = self.remote_field.is_hidden() rel_name = self.remote_field.get_accessor_name() # i. e. "model_set" rel_query_name = self.related_query_name() # i. e. "model" # i.e. "app_label.Model.field". field_name = '%s.%s' % (opts.label, self.name) # Check clashes between accessor or reverse query name of `field` # and any other field name -- i.e. accessor for Model.foreign is # model_set and it clashes with Target.model_set. potential_clashes = rel_opts.fields + rel_opts.many_to_many for clash_field in potential_clashes: # i.e. "app_label.Target.model_set". clash_name = '%s.%s' % (rel_opts.label, clash_field.name) if not rel_is_hidden and clash_field.name == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E302', ) ) if clash_field.name == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with field name '%s'." % (field_name, clash_name), hint=("Rename field '%s', or add/change a related_name " "argument to the definition for field '%s'.") % (clash_name, field_name), obj=self, id='fields.E303', ) ) # Check clashes between accessors/reverse query names of `field` and # any other field accessor -- i. e. Model.foreign accessor clashes with # Model.m2m accessor. potential_clashes = (r for r in rel_opts.related_objects if r.field is not self) for clash_field in potential_clashes: # i.e. "app_label.Model.m2m". clash_name = '%s.%s' % ( clash_field.related_model._meta.label, clash_field.field.name, ) if not rel_is_hidden and clash_field.get_accessor_name() == rel_name: errors.append( checks.Error( "Reverse accessor for '%s' clashes with reverse accessor for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E304', ) ) if clash_field.get_accessor_name() == rel_query_name: errors.append( checks.Error( "Reverse query name for '%s' clashes with reverse query name for '%s'." % (field_name, clash_name), hint=("Add or change a related_name argument " "to the definition for '%s' or '%s'.") % (field_name, clash_name), obj=self, id='fields.E305', ) ) return errors
[ "def", "_check_clashes", "(", "self", ")", ":", "from", "django", ".", "db", ".", "models", ".", "base", "import", "ModelBase", "errors", "=", "[", "]", "opts", "=", "self", ".", "model", ".", "_meta", "# `f.remote_field.model` may be a string instead of a model. Skip if model name is", "# not resolved.", "if", "not", "isinstance", "(", "self", ".", "remote_field", ".", "model", ",", "ModelBase", ")", ":", "return", "[", "]", "# Consider that we are checking field `Model.foreign` and the models", "# are:", "#", "# class Target(models.Model):", "# model = models.IntegerField()", "# model_set = models.IntegerField()", "#", "# class Model(models.Model):", "# foreign = models.ForeignKey(Target)", "# m2m = models.ManyToManyField(Target)", "# rel_opts.object_name == \"Target\"", "rel_opts", "=", "self", ".", "remote_field", ".", "model", ".", "_meta", "# If the field doesn't install a backward relation on the target model", "# (so `is_hidden` returns True), then there are no clashes to check", "# and we can skip these fields.", "rel_is_hidden", "=", "self", ".", "remote_field", ".", "is_hidden", "(", ")", "rel_name", "=", "self", ".", "remote_field", ".", "get_accessor_name", "(", ")", "# i. e. \"model_set\"", "rel_query_name", "=", "self", ".", "related_query_name", "(", ")", "# i. e. \"model\"", "# i.e. \"app_label.Model.field\".", "field_name", "=", "'%s.%s'", "%", "(", "opts", ".", "label", ",", "self", ".", "name", ")", "# Check clashes between accessor or reverse query name of `field`", "# and any other field name -- i.e. accessor for Model.foreign is", "# model_set and it clashes with Target.model_set.", "potential_clashes", "=", "rel_opts", ".", "fields", "+", "rel_opts", ".", "many_to_many", "for", "clash_field", "in", "potential_clashes", ":", "# i.e. \"app_label.Target.model_set\".", "clash_name", "=", "'%s.%s'", "%", "(", "rel_opts", ".", "label", ",", "clash_field", ".", "name", ")", "if", "not", "rel_is_hidden", "and", "clash_field", ".", "name", "==", "rel_name", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "\"Reverse accessor for '%s' clashes with field name '%s'.\"", "%", "(", "field_name", ",", "clash_name", ")", ",", "hint", "=", "(", "\"Rename field '%s', or add/change a related_name \"", "\"argument to the definition for field '%s'.\"", ")", "%", "(", "clash_name", ",", "field_name", ")", ",", "obj", "=", "self", ",", "id", "=", "'fields.E302'", ",", ")", ")", "if", "clash_field", ".", "name", "==", "rel_query_name", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "\"Reverse query name for '%s' clashes with field name '%s'.\"", "%", "(", "field_name", ",", "clash_name", ")", ",", "hint", "=", "(", "\"Rename field '%s', or add/change a related_name \"", "\"argument to the definition for field '%s'.\"", ")", "%", "(", "clash_name", ",", "field_name", ")", ",", "obj", "=", "self", ",", "id", "=", "'fields.E303'", ",", ")", ")", "# Check clashes between accessors/reverse query names of `field` and", "# any other field accessor -- i. e. Model.foreign accessor clashes with", "# Model.m2m accessor.", "potential_clashes", "=", "(", "r", "for", "r", "in", "rel_opts", ".", "related_objects", "if", "r", ".", "field", "is", "not", "self", ")", "for", "clash_field", "in", "potential_clashes", ":", "# i.e. \"app_label.Model.m2m\".", "clash_name", "=", "'%s.%s'", "%", "(", "clash_field", ".", "related_model", ".", "_meta", ".", "label", ",", "clash_field", ".", "field", ".", "name", ",", ")", "if", "not", "rel_is_hidden", "and", "clash_field", ".", "get_accessor_name", "(", ")", "==", "rel_name", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "\"Reverse accessor for '%s' clashes with reverse accessor for '%s'.\"", "%", "(", "field_name", ",", "clash_name", ")", ",", "hint", "=", "(", "\"Add or change a related_name argument \"", "\"to the definition for '%s' or '%s'.\"", ")", "%", "(", "field_name", ",", "clash_name", ")", ",", "obj", "=", "self", ",", "id", "=", "'fields.E304'", ",", ")", ")", "if", "clash_field", ".", "get_accessor_name", "(", ")", "==", "rel_query_name", ":", "errors", ".", "append", "(", "checks", ".", "Error", "(", "\"Reverse query name for '%s' clashes with reverse query name for '%s'.\"", "%", "(", "field_name", ",", "clash_name", ")", ",", "hint", "=", "(", "\"Add or change a related_name argument \"", "\"to the definition for '%s' or '%s'.\"", ")", "%", "(", "field_name", ",", "clash_name", ")", ",", "obj", "=", "self", ",", "id", "=", "'fields.E305'", ",", ")", ")", "return", "errors" ]
[ 185, 4 ]
[ 281, 21 ]
python
en
['en', 'en', 'en']
True
RelatedField.get_forward_related_filter
(self, obj)
Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model.
Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model.
def get_forward_related_filter(self, obj): """ Return the keyword arguments that when supplied to self.model.object.filter(), would select all instances related through this field to the remote obj. This is used to build the querysets returned by related descriptors. obj is an instance of self.related_field.model. """ return { '%s__%s' % (self.name, rh_field.name): getattr(obj, rh_field.attname) for _, rh_field in self.related_fields }
[ "def", "get_forward_related_filter", "(", "self", ",", "obj", ")", ":", "return", "{", "'%s__%s'", "%", "(", "self", ".", "name", ",", "rh_field", ".", "name", ")", ":", "getattr", "(", "obj", ",", "rh_field", ".", "attname", ")", "for", "_", ",", "rh_field", "in", "self", ".", "related_fields", "}" ]
[ 329, 4 ]
[ 340, 9 ]
python
en
['en', 'error', 'th']
False
RelatedField.get_reverse_related_filter
(self, obj)
Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model.
Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model.
def get_reverse_related_filter(self, obj): """ Complement to get_forward_related_filter(). Return the keyword arguments that when passed to self.related_field.model.object.filter() select all instances of self.related_field.model related through this field to obj. obj is an instance of self.model. """ base_filter = { rh_field.attname: getattr(obj, lh_field.attname) for lh_field, rh_field in self.related_fields } descriptor_filter = self.get_extra_descriptor_filter(obj) base_q = Q(**base_filter) if isinstance(descriptor_filter, dict): return base_q & Q(**descriptor_filter) elif descriptor_filter: return base_q & descriptor_filter return base_q
[ "def", "get_reverse_related_filter", "(", "self", ",", "obj", ")", ":", "base_filter", "=", "{", "rh_field", ".", "attname", ":", "getattr", "(", "obj", ",", "lh_field", ".", "attname", ")", "for", "lh_field", ",", "rh_field", "in", "self", ".", "related_fields", "}", "descriptor_filter", "=", "self", ".", "get_extra_descriptor_filter", "(", "obj", ")", "base_q", "=", "Q", "(", "*", "*", "base_filter", ")", "if", "isinstance", "(", "descriptor_filter", ",", "dict", ")", ":", "return", "base_q", "&", "Q", "(", "*", "*", "descriptor_filter", ")", "elif", "descriptor_filter", ":", "return", "base_q", "&", "descriptor_filter", "return", "base_q" ]
[ 342, 4 ]
[ 359, 21 ]
python
en
['en', 'error', 'th']
False
RelatedField.swappable_setting
(self)
Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False.
Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False.
def swappable_setting(self): """ Get the setting that this is powered from for swapping, or None if it's not swapped in / marked with swappable=False. """ if self.swappable: # Work out string form of "to" if isinstance(self.remote_field.model, str): to_string = self.remote_field.model else: to_string = self.remote_field.model._meta.label return apps.get_swappable_settings_name(to_string) return None
[ "def", "swappable_setting", "(", "self", ")", ":", "if", "self", ".", "swappable", ":", "# Work out string form of \"to\"", "if", "isinstance", "(", "self", ".", "remote_field", ".", "model", ",", "str", ")", ":", "to_string", "=", "self", ".", "remote_field", ".", "model", "else", ":", "to_string", "=", "self", ".", "remote_field", ".", "model", ".", "_meta", ".", "label", "return", "apps", ".", "get_swappable_settings_name", "(", "to_string", ")", "return", "None" ]
[ 362, 4 ]
[ 374, 19 ]
python
en
['en', 'error', 'th']
False
RelatedField.get_limit_choices_to
(self)
Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned.
Return ``limit_choices_to`` for this model field.
def get_limit_choices_to(self): """ Return ``limit_choices_to`` for this model field. If it is a callable, it will be invoked and the result will be returned. """ if callable(self.remote_field.limit_choices_to): return self.remote_field.limit_choices_to() return self.remote_field.limit_choices_to
[ "def", "get_limit_choices_to", "(", "self", ")", ":", "if", "callable", "(", "self", ".", "remote_field", ".", "limit_choices_to", ")", ":", "return", "self", ".", "remote_field", ".", "limit_choices_to", "(", ")", "return", "self", ".", "remote_field", ".", "limit_choices_to" ]
[ 389, 4 ]
[ 398, 49 ]
python
en
['en', 'error', 'th']
False
RelatedField.formfield
(self, **kwargs)
Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed.
Pass ``limit_choices_to`` to the field being constructed.
def formfield(self, **kwargs): """ Pass ``limit_choices_to`` to the field being constructed. Only passes it if there is a type that supports related fields. This is a similar strategy used to pass the ``queryset`` to the field being constructed. """ defaults = {} if hasattr(self.remote_field, 'get_related_field'): # If this is a callable, do not invoke it here. Just pass # it in the defaults for when the form class will later be # instantiated. limit_choices_to = self.remote_field.limit_choices_to defaults.update({ 'limit_choices_to': limit_choices_to, }) defaults.update(kwargs) return super().formfield(**defaults)
[ "def", "formfield", "(", "self", ",", "*", "*", "kwargs", ")", ":", "defaults", "=", "{", "}", "if", "hasattr", "(", "self", ".", "remote_field", ",", "'get_related_field'", ")", ":", "# If this is a callable, do not invoke it here. Just pass", "# it in the defaults for when the form class will later be", "# instantiated.", "limit_choices_to", "=", "self", ".", "remote_field", ".", "limit_choices_to", "defaults", ".", "update", "(", "{", "'limit_choices_to'", ":", "limit_choices_to", ",", "}", ")", "defaults", ".", "update", "(", "kwargs", ")", "return", "super", "(", ")", ".", "formfield", "(", "*", "*", "defaults", ")" ]
[ 400, 4 ]
[ 418, 44 ]
python
en
['en', 'error', 'th']
False
RelatedField.related_query_name
(self)
Define the name that can be used to identify this related object in a table-spanning query.
Define the name that can be used to identify this related object in a table-spanning query.
def related_query_name(self): """ Define the name that can be used to identify this related object in a table-spanning query. """ return self.remote_field.related_query_name or self.remote_field.related_name or self.opts.model_name
[ "def", "related_query_name", "(", "self", ")", ":", "return", "self", ".", "remote_field", ".", "related_query_name", "or", "self", ".", "remote_field", ".", "related_name", "or", "self", ".", "opts", ".", "model_name" ]
[ 420, 4 ]
[ 425, 109 ]
python
en
['en', 'error', 'th']
False
RelatedField.target_field
(self)
When filtering against this relation, return the field on the remote model against which the filtering should happen.
When filtering against this relation, return the field on the remote model against which the filtering should happen.
def target_field(self): """ When filtering against this relation, return the field on the remote model against which the filtering should happen. """ target_fields = self.get_path_info()[-1].target_fields if len(target_fields) > 1: raise exceptions.FieldError( "The relation has multiple target fields, but only single target field was asked for") return target_fields[0]
[ "def", "target_field", "(", "self", ")", ":", "target_fields", "=", "self", ".", "get_path_info", "(", ")", "[", "-", "1", "]", ".", "target_fields", "if", "len", "(", "target_fields", ")", ">", "1", ":", "raise", "exceptions", ".", "FieldError", "(", "\"The relation has multiple target fields, but only single target field was asked for\"", ")", "return", "target_fields", "[", "0", "]" ]
[ 428, 4 ]
[ 437, 31 ]
python
en
['en', 'error', 'th']
False
ForeignObject.get_extra_descriptor_filter
(self, instance)
Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions.
Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field.
def get_extra_descriptor_filter(self, instance): """ Return an extra filter condition for related object fetching when user does 'instance.fieldname', that is the extra filter is used in the descriptor of the field. The filter should be either a dict usable in .filter(**kwargs) call or a Q-object. The condition will be ANDed together with the relation's joining columns. A parallel method is get_extra_restriction() which is used in JOIN and subquery conditions. """ return {}
[ "def", "get_extra_descriptor_filter", "(", "self", ",", "instance", ")", ":", "return", "{", "}" ]
[ 680, 4 ]
[ 693, 17 ]
python
en
['en', 'error', 'th']
False
ForeignObject.get_extra_restriction
(self, where_class, alias, related_alias)
Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching.
Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method.
def get_extra_restriction(self, where_class, alias, related_alias): """ Return a pair condition used for joining and subquery pushdown. The condition is something that responds to as_sql(compiler, connection) method. Note that currently referring both the 'alias' and 'related_alias' will not work in some conditions, like subquery pushdown. A parallel method is get_extra_descriptor_filter() which is used in instance.fieldname related object fetching. """ return None
[ "def", "get_extra_restriction", "(", "self", ",", "where_class", ",", "alias", ",", "related_alias", ")", ":", "return", "None" ]
[ 695, 4 ]
[ 707, 19 ]
python
en
['en', 'error', 'th']
False
ForeignObject.get_path_info
(self, filtered_relation=None)
Get path from this field to the related model.
Get path from this field to the related model.
def get_path_info(self, filtered_relation=None): """Get path from this field to the related model.""" opts = self.remote_field.model._meta from_opts = self.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=self.foreign_related_fields, join_field=self, m2m=False, direct=True, filtered_relation=filtered_relation, )]
[ "def", "get_path_info", "(", "self", ",", "filtered_relation", "=", "None", ")", ":", "opts", "=", "self", ".", "remote_field", ".", "model", ".", "_meta", "from_opts", "=", "self", ".", "model", ".", "_meta", "return", "[", "PathInfo", "(", "from_opts", "=", "from_opts", ",", "to_opts", "=", "opts", ",", "target_fields", "=", "self", ".", "foreign_related_fields", ",", "join_field", "=", "self", ",", "m2m", "=", "False", ",", "direct", "=", "True", ",", "filtered_relation", "=", "filtered_relation", ",", ")", "]" ]
[ 709, 4 ]
[ 721, 10 ]
python
en
['en', 'en', 'en']
True
ForeignObject.get_reverse_path_info
(self, filtered_relation=None)
Get path from the related model to this field's model.
Get path from the related model to this field's model.
def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )]
[ "def", "get_reverse_path_info", "(", "self", ",", "filtered_relation", "=", "None", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "from_opts", "=", "self", ".", "remote_field", ".", "model", ".", "_meta", "return", "[", "PathInfo", "(", "from_opts", "=", "from_opts", ",", "to_opts", "=", "opts", ",", "target_fields", "=", "(", "opts", ".", "pk", ",", ")", ",", "join_field", "=", "self", ".", "remote_field", ",", "m2m", "=", "not", "self", ".", "unique", ",", "direct", "=", "False", ",", "filtered_relation", "=", "filtered_relation", ",", ")", "]" ]
[ 723, 4 ]
[ 735, 10 ]
python
en
['en', 'en', 'en']
True
ForeignKey.get_reverse_path_info
(self, filtered_relation=None)
Get path from the related model to this field's model.
Get path from the related model to this field's model.
def get_reverse_path_info(self, filtered_relation=None): """Get path from the related model to this field's model.""" opts = self.model._meta from_opts = self.remote_field.model._meta return [PathInfo( from_opts=from_opts, to_opts=opts, target_fields=(opts.pk,), join_field=self.remote_field, m2m=not self.unique, direct=False, filtered_relation=filtered_relation, )]
[ "def", "get_reverse_path_info", "(", "self", ",", "filtered_relation", "=", "None", ")", ":", "opts", "=", "self", ".", "model", ".", "_meta", "from_opts", "=", "self", ".", "remote_field", ".", "model", ".", "_meta", "return", "[", "PathInfo", "(", "from_opts", "=", "from_opts", ",", "to_opts", "=", "opts", ",", "target_fields", "=", "(", "opts", ".", "pk", ",", ")", ",", "join_field", "=", "self", ".", "remote_field", ",", "m2m", "=", "not", "self", ".", "unique", ",", "direct", "=", "False", ",", "filtered_relation", "=", "filtered_relation", ",", ")", "]" ]
[ 898, 4 ]
[ 910, 10 ]
python
en
['en', 'en', 'en']
True
ForeignKey.get_default
(self)
Return the to_field if the default value is an object.
Return the to_field if the default value is an object.
def get_default(self): """Return the to_field if the default value is an object.""" field_default = super().get_default() if isinstance(field_default, self.remote_field.model): return getattr(field_default, self.target_field.attname) return field_default
[ "def", "get_default", "(", "self", ")", ":", "field_default", "=", "super", "(", ")", ".", "get_default", "(", ")", "if", "isinstance", "(", "field_default", ",", "self", ".", "remote_field", ".", "model", ")", ":", "return", "getattr", "(", "field_default", ",", "self", ".", "target_field", ".", "attname", ")", "return", "field_default" ]
[ 957, 4 ]
[ 962, 28 ]
python
en
['en', 'en', 'en']
True
ManyToManyField._get_path_info
(self, direct=False, filtered_relation=None)
Called by both direct and indirect m2m traversal.
Called by both direct and indirect m2m traversal.
def _get_path_info(self, direct=False, filtered_relation=None): """Called by both direct and indirect m2m traversal.""" int_model = self.remote_field.through linkfield1 = int_model._meta.get_field(self.m2m_field_name()) linkfield2 = int_model._meta.get_field(self.m2m_reverse_field_name()) if direct: join1infos = linkfield1.get_reverse_path_info() join2infos = linkfield2.get_path_info(filtered_relation) else: join1infos = linkfield2.get_reverse_path_info() join2infos = linkfield1.get_path_info(filtered_relation) # Get join infos between the last model of join 1 and the first model # of join 2. Assume the only reason these may differ is due to model # inheritance. join1_final = join1infos[-1].to_opts join2_initial = join2infos[0].from_opts if join1_final is join2_initial: intermediate_infos = [] elif issubclass(join1_final.model, join2_initial.model): intermediate_infos = join1_final.get_path_to_parent(join2_initial.model) else: intermediate_infos = join2_initial.get_path_from_parent(join1_final.model) return [*join1infos, *intermediate_infos, *join2infos]
[ "def", "_get_path_info", "(", "self", ",", "direct", "=", "False", ",", "filtered_relation", "=", "None", ")", ":", "int_model", "=", "self", ".", "remote_field", ".", "through", "linkfield1", "=", "int_model", ".", "_meta", ".", "get_field", "(", "self", ".", "m2m_field_name", "(", ")", ")", "linkfield2", "=", "int_model", ".", "_meta", ".", "get_field", "(", "self", ".", "m2m_reverse_field_name", "(", ")", ")", "if", "direct", ":", "join1infos", "=", "linkfield1", ".", "get_reverse_path_info", "(", ")", "join2infos", "=", "linkfield2", ".", "get_path_info", "(", "filtered_relation", ")", "else", ":", "join1infos", "=", "linkfield2", ".", "get_reverse_path_info", "(", ")", "join2infos", "=", "linkfield1", ".", "get_path_info", "(", "filtered_relation", ")", "# Get join infos between the last model of join 1 and the first model", "# of join 2. Assume the only reason these may differ is due to model", "# inheritance.", "join1_final", "=", "join1infos", "[", "-", "1", "]", ".", "to_opts", "join2_initial", "=", "join2infos", "[", "0", "]", ".", "from_opts", "if", "join1_final", "is", "join2_initial", ":", "intermediate_infos", "=", "[", "]", "elif", "issubclass", "(", "join1_final", ".", "model", ",", "join2_initial", ".", "model", ")", ":", "intermediate_infos", "=", "join1_final", ".", "get_path_to_parent", "(", "join2_initial", ".", "model", ")", "else", ":", "intermediate_infos", "=", "join2_initial", ".", "get_path_from_parent", "(", "join1_final", ".", "model", ")", "return", "[", "*", "join1infos", ",", "*", "intermediate_infos", ",", "*", "join2infos", "]" ]
[ 1506, 4 ]
[ 1530, 62 ]
python
en
['en', 'en', 'en']
True
ManyToManyField._get_m2m_db_table
(self, opts)
Function that can be curried to provide the m2m table name for this relation.
Function that can be curried to provide the m2m table name for this relation.
def _get_m2m_db_table(self, opts): """ Function that can be curried to provide the m2m table name for this relation. """ if self.remote_field.through is not None: return self.remote_field.through._meta.db_table elif self.db_table: return self.db_table else: m2m_table_name = '%s_%s' % (utils.strip_quotes(opts.db_table), self.name) return utils.truncate_name(m2m_table_name, connection.ops.max_name_length())
[ "def", "_get_m2m_db_table", "(", "self", ",", "opts", ")", ":", "if", "self", ".", "remote_field", ".", "through", "is", "not", "None", ":", "return", "self", ".", "remote_field", ".", "through", ".", "_meta", ".", "db_table", "elif", "self", ".", "db_table", ":", "return", "self", ".", "db_table", "else", ":", "m2m_table_name", "=", "'%s_%s'", "%", "(", "utils", ".", "strip_quotes", "(", "opts", ".", "db_table", ")", ",", "self", ".", "name", ")", "return", "utils", ".", "truncate_name", "(", "m2m_table_name", ",", "connection", ".", "ops", ".", "max_name_length", "(", ")", ")" ]
[ 1538, 4 ]
[ 1549, 88 ]
python
en
['en', 'error', 'th']
False
ManyToManyField._get_m2m_attr
(self, related, attr)
Function that can be curried to provide the source accessor or DB column name for the m2m table.
Function that can be curried to provide the source accessor or DB column name for the m2m table.
def _get_m2m_attr(self, related, attr): """ Function that can be curried to provide the source accessor or DB column name for the m2m table. """ cache_attr = '_m2m_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[0] else: link_field_name = None for f in self.remote_field.through._meta.fields: if (f.is_relation and f.remote_field.model == related.related_model and (link_field_name is None or link_field_name == f.name)): setattr(self, cache_attr, getattr(f, attr)) return getattr(self, cache_attr)
[ "def", "_get_m2m_attr", "(", "self", ",", "related", ",", "attr", ")", ":", "cache_attr", "=", "'_m2m_%s_cache'", "%", "attr", "if", "hasattr", "(", "self", ",", "cache_attr", ")", ":", "return", "getattr", "(", "self", ",", "cache_attr", ")", "if", "self", ".", "remote_field", ".", "through_fields", "is", "not", "None", ":", "link_field_name", "=", "self", ".", "remote_field", ".", "through_fields", "[", "0", "]", "else", ":", "link_field_name", "=", "None", "for", "f", "in", "self", ".", "remote_field", ".", "through", ".", "_meta", ".", "fields", ":", "if", "(", "f", ".", "is_relation", "and", "f", ".", "remote_field", ".", "model", "==", "related", ".", "related_model", "and", "(", "link_field_name", "is", "None", "or", "link_field_name", "==", "f", ".", "name", ")", ")", ":", "setattr", "(", "self", ",", "cache_attr", ",", "getattr", "(", "f", ",", "attr", ")", ")", "return", "getattr", "(", "self", ",", "cache_attr", ")" ]
[ 1551, 4 ]
[ 1567, 48 ]
python
en
['en', 'error', 'th']
False
ManyToManyField._get_m2m_reverse_attr
(self, related, attr)
Function that can be curried to provide the related accessor or DB column name for the m2m table.
Function that can be curried to provide the related accessor or DB column name for the m2m table.
def _get_m2m_reverse_attr(self, related, attr): """ Function that can be curried to provide the related accessor or DB column name for the m2m table. """ cache_attr = '_m2m_reverse_%s_cache' % attr if hasattr(self, cache_attr): return getattr(self, cache_attr) found = False if self.remote_field.through_fields is not None: link_field_name = self.remote_field.through_fields[1] else: link_field_name = None for f in self.remote_field.through._meta.fields: if f.is_relation and f.remote_field.model == related.model: if link_field_name is None and related.related_model == related.model: # If this is an m2m-intermediate to self, # the first foreign key you find will be # the source column. Keep searching for # the second foreign key. if found: setattr(self, cache_attr, getattr(f, attr)) break else: found = True elif link_field_name is None or link_field_name == f.name: setattr(self, cache_attr, getattr(f, attr)) break return getattr(self, cache_attr)
[ "def", "_get_m2m_reverse_attr", "(", "self", ",", "related", ",", "attr", ")", ":", "cache_attr", "=", "'_m2m_reverse_%s_cache'", "%", "attr", "if", "hasattr", "(", "self", ",", "cache_attr", ")", ":", "return", "getattr", "(", "self", ",", "cache_attr", ")", "found", "=", "False", "if", "self", ".", "remote_field", ".", "through_fields", "is", "not", "None", ":", "link_field_name", "=", "self", ".", "remote_field", ".", "through_fields", "[", "1", "]", "else", ":", "link_field_name", "=", "None", "for", "f", "in", "self", ".", "remote_field", ".", "through", ".", "_meta", ".", "fields", ":", "if", "f", ".", "is_relation", "and", "f", ".", "remote_field", ".", "model", "==", "related", ".", "model", ":", "if", "link_field_name", "is", "None", "and", "related", ".", "related_model", "==", "related", ".", "model", ":", "# If this is an m2m-intermediate to self,", "# the first foreign key you find will be", "# the source column. Keep searching for", "# the second foreign key.", "if", "found", ":", "setattr", "(", "self", ",", "cache_attr", ",", "getattr", "(", "f", ",", "attr", ")", ")", "break", "else", ":", "found", "=", "True", "elif", "link_field_name", "is", "None", "or", "link_field_name", "==", "f", ".", "name", ":", "setattr", "(", "self", ",", "cache_attr", ",", "getattr", "(", "f", ",", "attr", ")", ")", "break", "return", "getattr", "(", "self", ",", "cache_attr", ")" ]
[ 1569, 4 ]
[ 1597, 40 ]
python
en
['en', 'error', 'th']
False
_add_doc
(func, doc)
Add documentation to a function.
Add documentation to a function.
def _add_doc(func, doc): """Add documentation to a function.""" func.__doc__ = doc
[ "def", "_add_doc", "(", "func", ",", "doc", ")", ":", "func", ".", "__doc__", "=", "doc" ]
[ 74, 0 ]
[ 76, 22 ]
python
en
['en', 'en', 'en']
True
_import_module
(name)
Import module, returning the module after the last dot.
Import module, returning the module after the last dot.
def _import_module(name): """Import module, returning the module after the last dot.""" __import__(name) return sys.modules[name]
[ "def", "_import_module", "(", "name", ")", ":", "__import__", "(", "name", ")", "return", "sys", ".", "modules", "[", "name", "]" ]
[ 79, 0 ]
[ 82, 28 ]
python
en
['en', 'en', 'en']
True
add_move
(move)
Add an item to six.moves.
Add an item to six.moves.
def add_move(move): """Add an item to six.moves.""" setattr(_MovedItems, move.name, move)
[ "def", "add_move", "(", "move", ")", ":", "setattr", "(", "_MovedItems", ",", "move", ".", "name", ",", "move", ")" ]
[ 491, 0 ]
[ 493, 41 ]
python
en
['en', 'en', 'en']
True
remove_move
(name)
Remove item from six.moves.
Remove item from six.moves.
def remove_move(name): """Remove item from six.moves.""" try: delattr(_MovedItems, name) except AttributeError: try: del moves.__dict__[name] except KeyError: raise AttributeError("no such move, %r" % (name,))
[ "def", "remove_move", "(", "name", ")", ":", "try", ":", "delattr", "(", "_MovedItems", ",", "name", ")", "except", "AttributeError", ":", "try", ":", "del", "moves", ".", "__dict__", "[", "name", "]", "except", "KeyError", ":", "raise", "AttributeError", "(", "\"no such move, %r\"", "%", "(", "name", ",", ")", ")" ]
[ 496, 0 ]
[ 504, 62 ]
python
en
['en', 'en', 'en']
True
with_metaclass
(meta, *bases)
Create a base class with a metaclass.
Create a base class with a metaclass.
def with_metaclass(meta, *bases): """Create a base class with a metaclass.""" # This requires a bit of explanation: the basic idea is to make a dummy # metaclass for one level of class instantiation that replaces itself with # the actual metaclass. class metaclass(type): def __new__(cls, name, this_bases, d): return meta(name, bases, d) @classmethod def __prepare__(cls, name, this_bases): return meta.__prepare__(name, bases) return type.__new__(metaclass, 'temporary_class', (), {})
[ "def", "with_metaclass", "(", "meta", ",", "*", "bases", ")", ":", "# This requires a bit of explanation: the basic idea is to make a dummy", "# metaclass for one level of class instantiation that replaces itself with", "# the actual metaclass.", "class", "metaclass", "(", "type", ")", ":", "def", "__new__", "(", "cls", ",", "name", ",", "this_bases", ",", "d", ")", ":", "return", "meta", "(", "name", ",", "bases", ",", "d", ")", "@", "classmethod", "def", "__prepare__", "(", "cls", ",", "name", ",", "this_bases", ")", ":", "return", "meta", ".", "__prepare__", "(", "name", ",", "bases", ")", "return", "type", ".", "__new__", "(", "metaclass", ",", "'temporary_class'", ",", "(", ")", ",", "{", "}", ")" ]
[ 818, 0 ]
[ 831, 61 ]
python
en
['en', 'en', 'en']
True
add_metaclass
(metaclass)
Class decorator for creating a class with a metaclass.
Class decorator for creating a class with a metaclass.
def add_metaclass(metaclass): """Class decorator for creating a class with a metaclass.""" def wrapper(cls): orig_vars = cls.__dict__.copy() slots = orig_vars.get('__slots__') if slots is not None: if isinstance(slots, str): slots = [slots] for slots_var in slots: orig_vars.pop(slots_var) orig_vars.pop('__dict__', None) orig_vars.pop('__weakref__', None) if hasattr(cls, '__qualname__'): orig_vars['__qualname__'] = cls.__qualname__ return metaclass(cls.__name__, cls.__bases__, orig_vars) return wrapper
[ "def", "add_metaclass", "(", "metaclass", ")", ":", "def", "wrapper", "(", "cls", ")", ":", "orig_vars", "=", "cls", ".", "__dict__", ".", "copy", "(", ")", "slots", "=", "orig_vars", ".", "get", "(", "'__slots__'", ")", "if", "slots", "is", "not", "None", ":", "if", "isinstance", "(", "slots", ",", "str", ")", ":", "slots", "=", "[", "slots", "]", "for", "slots_var", "in", "slots", ":", "orig_vars", ".", "pop", "(", "slots_var", ")", "orig_vars", ".", "pop", "(", "'__dict__'", ",", "None", ")", "orig_vars", ".", "pop", "(", "'__weakref__'", ",", "None", ")", "if", "hasattr", "(", "cls", ",", "'__qualname__'", ")", ":", "orig_vars", "[", "'__qualname__'", "]", "=", "cls", ".", "__qualname__", "return", "metaclass", "(", "cls", ".", "__name__", ",", "cls", ".", "__bases__", ",", "orig_vars", ")", "return", "wrapper" ]
[ 834, 0 ]
[ 849, 18 ]
python
en
['en', 'en', 'en']
True
ensure_binary
(s, encoding='utf-8', errors='strict')
Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes`
Coerce **s** to six.binary_type.
def ensure_binary(s, encoding='utf-8', errors='strict'): """Coerce **s** to six.binary_type. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> encoded to `bytes` - `bytes` -> `bytes` """ if isinstance(s, text_type): return s.encode(encoding, errors) elif isinstance(s, binary_type): return s else: raise TypeError("not expecting type '%s'" % type(s))
[ "def", "ensure_binary", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "s", ",", "text_type", ")", ":", "return", "s", ".", "encode", "(", "encoding", ",", "errors", ")", "elif", "isinstance", "(", "s", ",", "binary_type", ")", ":", "return", "s", "else", ":", "raise", "TypeError", "(", "\"not expecting type '%s'\"", "%", "type", "(", "s", ")", ")" ]
[ 852, 0 ]
[ 868, 60 ]
python
en
['en', 'sn', 'en']
True
ensure_str
(s, encoding='utf-8', errors='strict')
Coerce *s* to `str`. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str`
Coerce *s* to `str`.
def ensure_str(s, encoding='utf-8', errors='strict'): """Coerce *s* to `str`. For Python 2: - `unicode` -> encoded to `str` - `str` -> `str` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if not isinstance(s, (text_type, binary_type)): raise TypeError("not expecting type '%s'" % type(s)) if PY2 and isinstance(s, text_type): s = s.encode(encoding, errors) elif PY3 and isinstance(s, binary_type): s = s.decode(encoding, errors) return s
[ "def", "ensure_str", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "not", "isinstance", "(", "s", ",", "(", "text_type", ",", "binary_type", ")", ")", ":", "raise", "TypeError", "(", "\"not expecting type '%s'\"", "%", "type", "(", "s", ")", ")", "if", "PY2", "and", "isinstance", "(", "s", ",", "text_type", ")", ":", "s", "=", "s", ".", "encode", "(", "encoding", ",", "errors", ")", "elif", "PY3", "and", "isinstance", "(", "s", ",", "binary_type", ")", ":", "s", "=", "s", ".", "decode", "(", "encoding", ",", "errors", ")", "return", "s" ]
[ 871, 0 ]
[ 888, 12 ]
python
en
['en', 'sl', 'en']
True
ensure_text
(s, encoding='utf-8', errors='strict')
Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str`
Coerce *s* to six.text_type.
def ensure_text(s, encoding='utf-8', errors='strict'): """Coerce *s* to six.text_type. For Python 2: - `unicode` -> `unicode` - `str` -> `unicode` For Python 3: - `str` -> `str` - `bytes` -> decoded to `str` """ if isinstance(s, binary_type): return s.decode(encoding, errors) elif isinstance(s, text_type): return s else: raise TypeError("not expecting type '%s'" % type(s))
[ "def", "ensure_text", "(", "s", ",", "encoding", "=", "'utf-8'", ",", "errors", "=", "'strict'", ")", ":", "if", "isinstance", "(", "s", ",", "binary_type", ")", ":", "return", "s", ".", "decode", "(", "encoding", ",", "errors", ")", "elif", "isinstance", "(", "s", ",", "text_type", ")", ":", "return", "s", "else", ":", "raise", "TypeError", "(", "\"not expecting type '%s'\"", "%", "type", "(", "s", ")", ")" ]
[ 891, 0 ]
[ 907, 60 ]
python
en
['en', 'sr', 'en']
True
python_2_unicode_compatible
(klass)
A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class.
A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing.
def python_2_unicode_compatible(klass): """ A decorator that defines __unicode__ and __str__ methods under Python 2. Under Python 3 it does nothing. To support Python 2 and 3 with a single code base, define a __str__ method returning text and apply this decorator to the class. """ if PY2: if '__str__' not in klass.__dict__: raise ValueError("@python_2_unicode_compatible cannot be applied " "to %s because it doesn't define __str__()." % klass.__name__) klass.__unicode__ = klass.__str__ klass.__str__ = lambda self: self.__unicode__().encode('utf-8') return klass
[ "def", "python_2_unicode_compatible", "(", "klass", ")", ":", "if", "PY2", ":", "if", "'__str__'", "not", "in", "klass", ".", "__dict__", ":", "raise", "ValueError", "(", "\"@python_2_unicode_compatible cannot be applied \"", "\"to %s because it doesn't define __str__().\"", "%", "klass", ".", "__name__", ")", "klass", ".", "__unicode__", "=", "klass", ".", "__str__", "klass", ".", "__str__", "=", "lambda", "self", ":", "self", ".", "__unicode__", "(", ")", ".", "encode", "(", "'utf-8'", ")", "return", "klass" ]
[ 911, 0 ]
[ 926, 16 ]
python
en
['en', 'error', 'th']
False
_SixMetaPathImporter.is_package
(self, fullname)
Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451)
Return true, if the named module is a package.
def is_package(self, fullname): """ Return true, if the named module is a package. We need this method to get correct spec objects with Python 3.4 (see PEP451) """ return hasattr(self.__get_module(fullname), "__path__")
[ "def", "is_package", "(", "self", ",", "fullname", ")", ":", "return", "hasattr", "(", "self", ".", "__get_module", "(", "fullname", ")", ",", "\"__path__\"", ")" ]
[ 208, 4 ]
[ 215, 63 ]
python
en
['en', 'error', 'th']
False
_SixMetaPathImporter.get_code
(self, fullname)
Return None Required, if is_package is implemented
Return None
def get_code(self, fullname): """Return None Required, if is_package is implemented""" self.__get_module(fullname) # eventually raises ImportError return None
[ "def", "get_code", "(", "self", ",", "fullname", ")", ":", "self", ".", "__get_module", "(", "fullname", ")", "# eventually raises ImportError", "return", "None" ]
[ 217, 4 ]
[ 222, 19 ]
python
en
['en', 'co', 'en']
False
NoneMetadataError.__init__
(self, dist, metadata_name)
:param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed (can be "METADATA" or "PKG-INFO").
:param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed (can be "METADATA" or "PKG-INFO").
def __init__(self, dist, metadata_name): # type: (Distribution, str) -> None """ :param dist: A Distribution object. :param metadata_name: The name of the metadata being accessed (can be "METADATA" or "PKG-INFO"). """ self.dist = dist self.metadata_name = metadata_name
[ "def", "__init__", "(", "self", ",", "dist", ",", "metadata_name", ")", ":", "# type: (Distribution, str) -> None", "self", ".", "dist", "=", "dist", "self", ".", "metadata_name", "=", "metadata_name" ]
[ 40, 4 ]
[ 48, 42 ]
python
en
['en', 'error', 'th']
False
NetworkConnectionError.__init__
(self, error_msg, response=None, request=None)
Initialize NetworkConnectionError with `request` and `response` objects.
Initialize NetworkConnectionError with `request` and `response` objects.
def __init__(self, error_msg, response=None, request=None): # type: (str, Response, Request) -> None """ Initialize NetworkConnectionError with `request` and `response` objects. """ self.response = response self.request = request self.error_msg = error_msg if (self.response is not None and not self.request and hasattr(response, 'request')): self.request = self.response.request super().__init__(error_msg, response, request)
[ "def", "__init__", "(", "self", ",", "error_msg", ",", "response", "=", "None", ",", "request", "=", "None", ")", ":", "# type: (str, Response, Request) -> None", "self", ".", "response", "=", "response", "self", ".", "request", "=", "request", "self", ".", "error_msg", "=", "error_msg", "if", "(", "self", ".", "response", "is", "not", "None", "and", "not", "self", ".", "request", "and", "hasattr", "(", "response", ",", "'request'", ")", ")", ":", "self", ".", "request", "=", "self", ".", "response", ".", "request", "super", "(", ")", ".", "__init__", "(", "error_msg", ",", "response", ",", "request", ")" ]
[ 104, 4 ]
[ 116, 54 ]
python
en
['en', 'error', 'th']
False
HashError.body
(self)
Return a summary of me for display under the heading. This default implementation simply prints a description of the triggering requirement. :param req: The InstallRequirement that provoked this error, with its link already populated by the resolver's _populate_link().
Return a summary of me for display under the heading.
def body(self): # type: () -> str """Return a summary of me for display under the heading. This default implementation simply prints a description of the triggering requirement. :param req: The InstallRequirement that provoked this error, with its link already populated by the resolver's _populate_link(). """ return f' {self._requirement_name()}'
[ "def", "body", "(", "self", ")", ":", "# type: () -> str", "return", "f' {self._requirement_name()}'" ]
[ 220, 4 ]
[ 231, 48 ]
python
en
['en', 'en', 'en']
True
HashError._requirement_name
(self)
Return a description of the requirement that triggered me. This default implementation returns long description of the req, with line numbers
Return a description of the requirement that triggered me.
def _requirement_name(self): # type: () -> str """Return a description of the requirement that triggered me. This default implementation returns long description of the req, with line numbers """ return str(self.req) if self.req else 'unknown package'
[ "def", "_requirement_name", "(", "self", ")", ":", "# type: () -> str", "return", "str", "(", "self", ".", "req", ")", "if", "self", ".", "req", "else", "'unknown package'" ]
[ 237, 4 ]
[ 245, 63 ]
python
en
['en', 'en', 'en']
True
HashMissing.__init__
(self, gotten_hash)
:param gotten_hash: The hash of the (possibly malicious) archive we just downloaded
:param gotten_hash: The hash of the (possibly malicious) archive we just downloaded
def __init__(self, gotten_hash): # type: (str) -> None """ :param gotten_hash: The hash of the (possibly malicious) archive we just downloaded """ self.gotten_hash = gotten_hash
[ "def", "__init__", "(", "self", ",", "gotten_hash", ")", ":", "# type: (str) -> None", "self", ".", "gotten_hash", "=", "gotten_hash" ]
[ 278, 4 ]
[ 284, 38 ]
python
en
['en', 'error', 'th']
False
HashMismatch.__init__
(self, allowed, gots)
:param allowed: A dict of algorithm names pointing to lists of allowed hex digests :param gots: A dict of algorithm names pointing to hashes we actually got from the files under suspicion
:param allowed: A dict of algorithm names pointing to lists of allowed hex digests :param gots: A dict of algorithm names pointing to hashes we actually got from the files under suspicion
def __init__(self, allowed, gots): # type: (Dict[str, List[str]], Dict[str, _Hash]) -> None """ :param allowed: A dict of algorithm names pointing to lists of allowed hex digests :param gots: A dict of algorithm names pointing to hashes we actually got from the files under suspicion """ self.allowed = allowed self.gots = gots
[ "def", "__init__", "(", "self", ",", "allowed", ",", "gots", ")", ":", "# type: (Dict[str, List[str]], Dict[str, _Hash]) -> None", "self", ".", "allowed", "=", "allowed", "self", ".", "gots", "=", "gots" ]
[ 329, 4 ]
[ 338, 24 ]
python
en
['en', 'error', 'th']
False
HashMismatch._hash_comparison
(self)
Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef
Return a comparison of actual and expected hash values.
def _hash_comparison(self): # type: () -> str """ Return a comparison of actual and expected hash values. Example:: Expected sha256 abcdeabcdeabcdeabcdeabcdeabcdeabcdeabcdeabcde or 123451234512345123451234512345123451234512345 Got bcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdefbcdef """ def hash_then_or(hash_name): # type: (str) -> chain[str] # For now, all the decent hashes have 6-char names, so we can get # away with hard-coding space literals. return chain([hash_name], repeat(' or')) lines = [] # type: List[str] for hash_name, expecteds in self.allowed.items(): prefix = hash_then_or(hash_name) lines.extend((' Expected {} {}'.format(next(prefix), e)) for e in expecteds) lines.append(' Got {}\n'.format( self.gots[hash_name].hexdigest())) return '\n'.join(lines)
[ "def", "_hash_comparison", "(", "self", ")", ":", "# type: () -> str", "def", "hash_then_or", "(", "hash_name", ")", ":", "# type: (str) -> chain[str]", "# For now, all the decent hashes have 6-char names, so we can get", "# away with hard-coding space literals.", "return", "chain", "(", "[", "hash_name", "]", ",", "repeat", "(", "' or'", ")", ")", "lines", "=", "[", "]", "# type: List[str]", "for", "hash_name", ",", "expecteds", "in", "self", ".", "allowed", ".", "items", "(", ")", ":", "prefix", "=", "hash_then_or", "(", "hash_name", ")", "lines", ".", "extend", "(", "(", "' Expected {} {}'", ".", "format", "(", "next", "(", "prefix", ")", ",", "e", ")", ")", "for", "e", "in", "expecteds", ")", "lines", ".", "append", "(", "' Got {}\\n'", ".", "format", "(", "self", ".", "gots", "[", "hash_name", "]", ".", "hexdigest", "(", ")", ")", ")", "return", "'\\n'", ".", "join", "(", "lines", ")" ]
[ 345, 4 ]
[ 370, 31 ]
python
en
['en', 'error', 'th']
False
subsample_for_vis
(eval_task_ids, tasks_per_template)
Keep only 1 task per template.
Keep only 1 task per template.
def subsample_for_vis(eval_task_ids, tasks_per_template): """Keep only 1 task per template.""" templates_sel = {} res = [] eval_task_ids = sorted(eval_task_ids) # For repro for task_id in eval_task_ids: this_temp = task_id.split(':')[0] if this_temp not in templates_sel: templates_sel[this_temp] = 1 res.append(task_id) elif templates_sel[this_temp] < tasks_per_template: templates_sel[this_temp] += 1 res.append(task_id) return res
[ "def", "subsample_for_vis", "(", "eval_task_ids", ",", "tasks_per_template", ")", ":", "templates_sel", "=", "{", "}", "res", "=", "[", "]", "eval_task_ids", "=", "sorted", "(", "eval_task_ids", ")", "# For repro", "for", "task_id", "in", "eval_task_ids", ":", "this_temp", "=", "task_id", ".", "split", "(", "':'", ")", "[", "0", "]", "if", "this_temp", "not", "in", "templates_sel", ":", "templates_sel", "[", "this_temp", "]", "=", "1", "res", ".", "append", "(", "task_id", ")", "elif", "templates_sel", "[", "this_temp", "]", "<", "tasks_per_template", ":", "templates_sel", "[", "this_temp", "]", "+=", "1", "res", ".", "append", "(", "task_id", ")", "return", "res" ]
[ 62, 0 ]
[ 75, 14 ]
python
en
['en', 'en', 'en']
True
get_subset_tasks
(all_tasks, ratio)
Select a subset of tasks from all_tasks, keeping some % from each temp. Args: all_tasks: List of tasks like ['00001:001', ...] ratio: A number between 0-1, specifying how many of the tasks to keep. Returns: tasks: An updated list, with the ratio number of tasks.
Select a subset of tasks from all_tasks, keeping some % from each temp. Args: all_tasks: List of tasks like ['00001:001', ...] ratio: A number between 0-1, specifying how many of the tasks to keep. Returns: tasks: An updated list, with the ratio number of tasks.
def get_subset_tasks(all_tasks, ratio): """Select a subset of tasks from all_tasks, keeping some % from each temp. Args: all_tasks: List of tasks like ['00001:001', ...] ratio: A number between 0-1, specifying how many of the tasks to keep. Returns: tasks: An updated list, with the ratio number of tasks. """ if len(all_tasks) == 0: return all_tasks assert 0.0 <= ratio <= 1.0 all_tasks = sorted(all_tasks) samples_to_keep = int(len(all_tasks) * ratio) return all_tasks[::(len(all_tasks) // samples_to_keep)][:samples_to_keep]
[ "def", "get_subset_tasks", "(", "all_tasks", ",", "ratio", ")", ":", "if", "len", "(", "all_tasks", ")", "==", "0", ":", "return", "all_tasks", "assert", "0.0", "<=", "ratio", "<=", "1.0", "all_tasks", "=", "sorted", "(", "all_tasks", ")", "samples_to_keep", "=", "int", "(", "len", "(", "all_tasks", ")", "*", "ratio", ")", "return", "all_tasks", "[", ":", ":", "(", "len", "(", "all_tasks", ")", "//", "samples_to_keep", ")", "]", "[", ":", "samples_to_keep", "]" ]
[ 78, 0 ]
[ 91, 77 ]
python
en
['en', 'en', 'en']
True
main
(cfg)
Run the training and testing.
Run the training and testing.
def main(cfg): """Run the training and testing.""" # Make a copy of overrides/etc files; so that if this code is run # again with a different override param (eg to generate vis etc), even if # it overwrites the config files and destroy that information, the original # info is stored and avlbl when making graphs etc if not os.path.exists('.hydra.orig'): subprocess.call('cp -r .hydra .hydra.orig', shell=True) print(cfg.num_gpus) templates_tasks = None if ':' in cfg.eval_setup_name: # Means that we only want template IDs defined after the ":" # The tasks itself would have "00001:<task_id>", hence splitting only 1 cfg.eval_setup_name, templates_tasks = cfg.eval_setup_name.split( ':', 1) train_task_ids, eval_task_ids = get_train_test(cfg.eval_setup_name, cfg.fold_id, cfg.use_test_split) if templates_tasks is not None: # Subselect the train/eval task ids to only keep the ones in task_ids templates_tasks = templates_tasks.split(';') final_templates = [] for temp_task in templates_tasks: if ':' in temp_task: temp, task = temp_task.split(':') else: temp = temp_task task = '' if '-' in temp_task: final_templates += [ '{:05d}:{}'.format(el, task) for el in range(int(temp.split('-')[0]), int(temp.split('-')[1]) + 1) ] else: final_templates += ['{:05d}:{}'.format(int(temp), task)] templates_tasks = sorted(list(set(final_templates))) logging.info('Running on %s templates/tasks', templates_tasks) def fits_templates_tasks(task_id): for temp_task in templates_tasks: if task_id.startswith(temp_task): return True return False train_task_ids = [ el for el in train_task_ids if fits_templates_tasks(el) ] eval_task_ids = [ el for el in eval_task_ids if fits_templates_tasks(el) ] assert len(train_task_ids) > 0 or len(eval_task_ids) > 0, ( 'At least one of train or test should have a task in it') train_task_ids = sorted(train_task_ids) eval_task_ids = sorted(eval_task_ids) logging.info('Final train task ids: %s', train_task_ids) logging.info('Final eval task ids: %s', eval_task_ids) assert 0.0 <= cfg.data_ratio_train <= 1.0, 'Should be within limits' assert 0.0 <= cfg.data_ratio_eval <= 1.0, 'Should be within limits' train_task_ids = get_subset_tasks(train_task_ids, cfg.data_ratio_train) eval_task_ids = get_subset_tasks(eval_task_ids, cfg.data_ratio_eval) assert cfg.tier is None, ( 'Do not set this beforehand; will figure from eval_setup') cfg.tier = phyre.eval_setup_to_action_tier(cfg.eval_setup_name) agent = find_all_agents()[cfg.agent.type] output_dir = os.getcwd() max_test_attempts_per_task = (cfg.max_test_attempts_per_task or phyre.MAX_TEST_ATTEMPTS) # Validate the config # If the following are not true, it gives weird errors, eg missing argument # in forward assert cfg.num_gpus == 0 or cfg.train.batch_size % cfg.num_gpus == 0 if cfg.eval.batch_size is not None: assert cfg.num_gpus == 0 or cfg.eval.batch_size % cfg.num_gpus == 0 # Scale the number of iters if cfg.train.scale_num_iter != 1.0: for param_name in [ 'num_iter', 'report_every', 'save_checkpoints_every', 'full_eval_every' ]: logging.info(f'cfg.train.scale_num_iter {cfg.train.scale_num_iter}') logging.info(f'param_name {param_name}') old_val = getattr(cfg.train, param_name) logging.info(f'old_val {old_val}') new_val = type(old_val)(old_val * cfg.train.scale_num_iter) setattr(cfg.train, param_name, new_val) logging.warning('Setting cfg.train.%s to %s using scale %f', param_name, new_val, cfg.train.scale_num_iter) # It's fine to use eval_task_ids iff it's dev. dev_tasks_ids = None if cfg.use_test_split else eval_task_ids summary_writer = SummaryWriter(log_dir=os.path.join(output_dir, 'logs')) full_eval_fn = partial(agent.eval, task_ids=eval_task_ids, max_attempts_per_task=max_test_attempts_per_task, cfg=cfg) logging.info('Starting training') state = agent.train(train_task_ids, dev_tasks_ids, full_eval_fn, output_dir=output_dir, summary_writer=summary_writer, cfg=cfg) ## Evaluation out_path = os.path.join( output_dir, 'results-vis.json' if cfg.eval.store_vis else 'results.json') # Don't stop re-evaluations if doing vis if (os.path.exists(out_path) and not cfg.force_eval and not cfg.eval.store_vis): logging.warning('Eval out path exists (%s). Del or no eval.', out_path) return 0 # Moved all of this to train, so the final prediction would be stored # in results_intermediate as well. However keeping the code here too since # it's used when only running testing. logging.info('Starting final eval') evaluation = full_eval_fn(state) num_tasks = len(eval_task_ids) results = {} results['num_eval_tasks'] = num_tasks results['metrics'] = evaluation.compute_all_metrics() results['metrics_rollout'] = evaluation.compute_all_metrics_over_rollout() results['metrics_per_task'] = evaluation.compute_all_metrics_per_task() results['args'] = sys.argv results['parsed_args'] = dict( # cfg=cfg, # Not json serializable, anyway will be stored in dir main_kwargs=dict(eval_setup_name=cfg.eval_setup_name, fold_id=cfg.fold_id, use_test_split=cfg.use_test_split, agent_type=cfg.agent.type, max_test_attempts_per_task=max_test_attempts_per_task, output_dir=output_dir)) print(results['parsed_args']) results['target_metric'] = ( results['metrics']['independent_solved_by_aucs'] [max_test_attempts_per_task]) results['target_metric_over_time'] = [ el['independent_solved_by_aucs'][max_test_attempts_per_task] for el in results['metrics_rollout'] ] logging.info('FINAL: %s; Over rollout: %s', results['target_metric'], results['target_metric_over_time']) summary_writer.add_scalar('AUCCESS-full/eval', results['target_metric']) summary_writer.close() if not os.path.exists(output_dir): os.makedirs(output_dir) with open(out_path, 'w') as stream: json.dump(results, stream)
[ "def", "main", "(", "cfg", ")", ":", "# Make a copy of overrides/etc files; so that if this code is run", "# again with a different override param (eg to generate vis etc), even if", "# it overwrites the config files and destroy that information, the original", "# info is stored and avlbl when making graphs etc", "if", "not", "os", ".", "path", ".", "exists", "(", "'.hydra.orig'", ")", ":", "subprocess", ".", "call", "(", "'cp -r .hydra .hydra.orig'", ",", "shell", "=", "True", ")", "print", "(", "cfg", ".", "num_gpus", ")", "templates_tasks", "=", "None", "if", "':'", "in", "cfg", ".", "eval_setup_name", ":", "# Means that we only want template IDs defined after the \":\"", "# The tasks itself would have \"00001:<task_id>\", hence splitting only 1", "cfg", ".", "eval_setup_name", ",", "templates_tasks", "=", "cfg", ".", "eval_setup_name", ".", "split", "(", "':'", ",", "1", ")", "train_task_ids", ",", "eval_task_ids", "=", "get_train_test", "(", "cfg", ".", "eval_setup_name", ",", "cfg", ".", "fold_id", ",", "cfg", ".", "use_test_split", ")", "if", "templates_tasks", "is", "not", "None", ":", "# Subselect the train/eval task ids to only keep the ones in task_ids", "templates_tasks", "=", "templates_tasks", ".", "split", "(", "';'", ")", "final_templates", "=", "[", "]", "for", "temp_task", "in", "templates_tasks", ":", "if", "':'", "in", "temp_task", ":", "temp", ",", "task", "=", "temp_task", ".", "split", "(", "':'", ")", "else", ":", "temp", "=", "temp_task", "task", "=", "''", "if", "'-'", "in", "temp_task", ":", "final_templates", "+=", "[", "'{:05d}:{}'", ".", "format", "(", "el", ",", "task", ")", "for", "el", "in", "range", "(", "int", "(", "temp", ".", "split", "(", "'-'", ")", "[", "0", "]", ")", ",", "int", "(", "temp", ".", "split", "(", "'-'", ")", "[", "1", "]", ")", "+", "1", ")", "]", "else", ":", "final_templates", "+=", "[", "'{:05d}:{}'", ".", "format", "(", "int", "(", "temp", ")", ",", "task", ")", "]", "templates_tasks", "=", "sorted", "(", "list", "(", "set", "(", "final_templates", ")", ")", ")", "logging", ".", "info", "(", "'Running on %s templates/tasks'", ",", "templates_tasks", ")", "def", "fits_templates_tasks", "(", "task_id", ")", ":", "for", "temp_task", "in", "templates_tasks", ":", "if", "task_id", ".", "startswith", "(", "temp_task", ")", ":", "return", "True", "return", "False", "train_task_ids", "=", "[", "el", "for", "el", "in", "train_task_ids", "if", "fits_templates_tasks", "(", "el", ")", "]", "eval_task_ids", "=", "[", "el", "for", "el", "in", "eval_task_ids", "if", "fits_templates_tasks", "(", "el", ")", "]", "assert", "len", "(", "train_task_ids", ")", ">", "0", "or", "len", "(", "eval_task_ids", ")", ">", "0", ",", "(", "'At least one of train or test should have a task in it'", ")", "train_task_ids", "=", "sorted", "(", "train_task_ids", ")", "eval_task_ids", "=", "sorted", "(", "eval_task_ids", ")", "logging", ".", "info", "(", "'Final train task ids: %s'", ",", "train_task_ids", ")", "logging", ".", "info", "(", "'Final eval task ids: %s'", ",", "eval_task_ids", ")", "assert", "0.0", "<=", "cfg", ".", "data_ratio_train", "<=", "1.0", ",", "'Should be within limits'", "assert", "0.0", "<=", "cfg", ".", "data_ratio_eval", "<=", "1.0", ",", "'Should be within limits'", "train_task_ids", "=", "get_subset_tasks", "(", "train_task_ids", ",", "cfg", ".", "data_ratio_train", ")", "eval_task_ids", "=", "get_subset_tasks", "(", "eval_task_ids", ",", "cfg", ".", "data_ratio_eval", ")", "assert", "cfg", ".", "tier", "is", "None", ",", "(", "'Do not set this beforehand; will figure from eval_setup'", ")", "cfg", ".", "tier", "=", "phyre", ".", "eval_setup_to_action_tier", "(", "cfg", ".", "eval_setup_name", ")", "agent", "=", "find_all_agents", "(", ")", "[", "cfg", ".", "agent", ".", "type", "]", "output_dir", "=", "os", ".", "getcwd", "(", ")", "max_test_attempts_per_task", "=", "(", "cfg", ".", "max_test_attempts_per_task", "or", "phyre", ".", "MAX_TEST_ATTEMPTS", ")", "# Validate the config", "# If the following are not true, it gives weird errors, eg missing argument", "# in forward", "assert", "cfg", ".", "num_gpus", "==", "0", "or", "cfg", ".", "train", ".", "batch_size", "%", "cfg", ".", "num_gpus", "==", "0", "if", "cfg", ".", "eval", ".", "batch_size", "is", "not", "None", ":", "assert", "cfg", ".", "num_gpus", "==", "0", "or", "cfg", ".", "eval", ".", "batch_size", "%", "cfg", ".", "num_gpus", "==", "0", "# Scale the number of iters", "if", "cfg", ".", "train", ".", "scale_num_iter", "!=", "1.0", ":", "for", "param_name", "in", "[", "'num_iter'", ",", "'report_every'", ",", "'save_checkpoints_every'", ",", "'full_eval_every'", "]", ":", "logging", ".", "info", "(", "f'cfg.train.scale_num_iter {cfg.train.scale_num_iter}'", ")", "logging", ".", "info", "(", "f'param_name {param_name}'", ")", "old_val", "=", "getattr", "(", "cfg", ".", "train", ",", "param_name", ")", "logging", ".", "info", "(", "f'old_val {old_val}'", ")", "new_val", "=", "type", "(", "old_val", ")", "(", "old_val", "*", "cfg", ".", "train", ".", "scale_num_iter", ")", "setattr", "(", "cfg", ".", "train", ",", "param_name", ",", "new_val", ")", "logging", ".", "warning", "(", "'Setting cfg.train.%s to %s using scale %f'", ",", "param_name", ",", "new_val", ",", "cfg", ".", "train", ".", "scale_num_iter", ")", "# It's fine to use eval_task_ids iff it's dev.", "dev_tasks_ids", "=", "None", "if", "cfg", ".", "use_test_split", "else", "eval_task_ids", "summary_writer", "=", "SummaryWriter", "(", "log_dir", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'logs'", ")", ")", "full_eval_fn", "=", "partial", "(", "agent", ".", "eval", ",", "task_ids", "=", "eval_task_ids", ",", "max_attempts_per_task", "=", "max_test_attempts_per_task", ",", "cfg", "=", "cfg", ")", "logging", ".", "info", "(", "'Starting training'", ")", "state", "=", "agent", ".", "train", "(", "train_task_ids", ",", "dev_tasks_ids", ",", "full_eval_fn", ",", "output_dir", "=", "output_dir", ",", "summary_writer", "=", "summary_writer", ",", "cfg", "=", "cfg", ")", "## Evaluation", "out_path", "=", "os", ".", "path", ".", "join", "(", "output_dir", ",", "'results-vis.json'", "if", "cfg", ".", "eval", ".", "store_vis", "else", "'results.json'", ")", "# Don't stop re-evaluations if doing vis", "if", "(", "os", ".", "path", ".", "exists", "(", "out_path", ")", "and", "not", "cfg", ".", "force_eval", "and", "not", "cfg", ".", "eval", ".", "store_vis", ")", ":", "logging", ".", "warning", "(", "'Eval out path exists (%s). Del or no eval.'", ",", "out_path", ")", "return", "0", "# Moved all of this to train, so the final prediction would be stored", "# in results_intermediate as well. However keeping the code here too since", "# it's used when only running testing.", "logging", ".", "info", "(", "'Starting final eval'", ")", "evaluation", "=", "full_eval_fn", "(", "state", ")", "num_tasks", "=", "len", "(", "eval_task_ids", ")", "results", "=", "{", "}", "results", "[", "'num_eval_tasks'", "]", "=", "num_tasks", "results", "[", "'metrics'", "]", "=", "evaluation", ".", "compute_all_metrics", "(", ")", "results", "[", "'metrics_rollout'", "]", "=", "evaluation", ".", "compute_all_metrics_over_rollout", "(", ")", "results", "[", "'metrics_per_task'", "]", "=", "evaluation", ".", "compute_all_metrics_per_task", "(", ")", "results", "[", "'args'", "]", "=", "sys", ".", "argv", "results", "[", "'parsed_args'", "]", "=", "dict", "(", "# cfg=cfg, # Not json serializable, anyway will be stored in dir", "main_kwargs", "=", "dict", "(", "eval_setup_name", "=", "cfg", ".", "eval_setup_name", ",", "fold_id", "=", "cfg", ".", "fold_id", ",", "use_test_split", "=", "cfg", ".", "use_test_split", ",", "agent_type", "=", "cfg", ".", "agent", ".", "type", ",", "max_test_attempts_per_task", "=", "max_test_attempts_per_task", ",", "output_dir", "=", "output_dir", ")", ")", "print", "(", "results", "[", "'parsed_args'", "]", ")", "results", "[", "'target_metric'", "]", "=", "(", "results", "[", "'metrics'", "]", "[", "'independent_solved_by_aucs'", "]", "[", "max_test_attempts_per_task", "]", ")", "results", "[", "'target_metric_over_time'", "]", "=", "[", "el", "[", "'independent_solved_by_aucs'", "]", "[", "max_test_attempts_per_task", "]", "for", "el", "in", "results", "[", "'metrics_rollout'", "]", "]", "logging", ".", "info", "(", "'FINAL: %s; Over rollout: %s'", ",", "results", "[", "'target_metric'", "]", ",", "results", "[", "'target_metric_over_time'", "]", ")", "summary_writer", ".", "add_scalar", "(", "'AUCCESS-full/eval'", ",", "results", "[", "'target_metric'", "]", ")", "summary_writer", ".", "close", "(", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "output_dir", ")", ":", "os", ".", "makedirs", "(", "output_dir", ")", "with", "open", "(", "out_path", ",", "'w'", ")", "as", "stream", ":", "json", ".", "dump", "(", "results", ",", "stream", ")" ]
[ 95, 0 ]
[ 252, 34 ]
python
en
['en', 'en', 'en']
True
run
(argv=None)
Build and run the pipeline.
Build and run the pipeline.
def run(argv=None): """Build and run the pipeline.""" parser = argparse.ArgumentParser() parser.add_argument( '--project', help=('Google Cloud Project ID'), required=True) parser.add_argument( '--input_topic', help=('Google Cloud PubSub topic name '), required=True) known_args, pipeline_args = parser.parse_known_args(argv) pipeline_options = PipelineOptions( pipeline_args.append('--project={}'.format(known_args.project))) pipeline_options.view_as(SetupOptions).save_main_session = True pipeline_options.view_as(StandardOptions).streaming = True p = beam.Pipeline(options=pipeline_options) TOPIC = 'projects/{}/topics/{}'.format(known_args.project, known_args.input_topic) # this table needs to exist table_spec = '{}:taxifare.traffic_realtime'.format(known_args.project) def to_bq_format(count): """BigQuery writer requires rows to be stored as python dictionary""" return {'trips_last_5min': count, 'time': datetime.now().strftime("%Y-%m-%d %H:%M:%S")} pipeline = (p | 'read_from_pubsub' >> beam.io.ReadFromPubSub(topic=TOPIC).with_output_types(bytes) | 'window' >> # TODO: Your code goes here. | 'count' >> beam.CombineGlobally(CountFn()).without_defaults() | 'format_for_bq' >> beam.Map(to_bq_format) | 'write_to_bq' >> beam.io.WriteToBigQuery( table_spec, # WRITE_TRUNCATE not supported for streaming write_disposition=beam.io.BigQueryDisposition.WRITE_APPEND, create_disposition=beam.io.BigQueryDisposition.CREATE_NEVER) ) result = p.run()
[ "def", "run", "(", "argv", "=", "None", ")", ":", "parser", "=", "argparse", ".", "ArgumentParser", "(", ")", "parser", ".", "add_argument", "(", "'--project'", ",", "help", "=", "(", "'Google Cloud Project ID'", ")", ",", "required", "=", "True", ")", "parser", ".", "add_argument", "(", "'--input_topic'", ",", "help", "=", "(", "'Google Cloud PubSub topic name '", ")", ",", "required", "=", "True", ")", "known_args", ",", "pipeline_args", "=", "parser", ".", "parse_known_args", "(", "argv", ")", "pipeline_options", "=", "PipelineOptions", "(", "pipeline_args", ".", "append", "(", "'--project={}'", ".", "format", "(", "known_args", ".", "project", ")", ")", ")", "pipeline_options", ".", "view_as", "(", "SetupOptions", ")", ".", "save_main_session", "=", "True", "pipeline_options", ".", "view_as", "(", "StandardOptions", ")", ".", "streaming", "=", "True", "p", "=", "beam", ".", "Pipeline", "(", "options", "=", "pipeline_options", ")", "TOPIC", "=", "'projects/{}/topics/{}'", ".", "format", "(", "known_args", ".", "project", ",", "known_args", ".", "input_topic", ")", "# this table needs to exist", "table_spec", "=", "'{}:taxifare.traffic_realtime'", ".", "format", "(", "known_args", ".", "project", ")", "def", "to_bq_format", "(", "count", ")", ":", "\"\"\"BigQuery writer requires rows to be stored as python dictionary\"\"\"", "return", "{", "'trips_last_5min'", ":", "count", ",", "'time'", ":", "datetime", ".", "now", "(", ")", ".", "strftime", "(", "\"%Y-%m-%d %H:%M:%S\"", ")", "}", "pipeline", "=", "(", "p", "|", "'read_from_pubsub'", ">>", "beam", ".", "io", ".", "ReadFromPubSub", "(", "topic", "=", "TOPIC", ")", ".", "with_output_types", "(", "bytes", ")", "|", "'window'", ">>", "# TODO: Your code goes here.", "|", "'count'", ">>", "beam", ".", "CombineGlobally", "(", "CountFn", "(", ")", ")", ".", "without_defaults", "(", ")", "|", "'format_for_bq'", ">>", "beam", ".", "Map", "(", "to_bq_format", ")", "|", "'write_to_bq'", ">>", "beam", ".", "io", ".", "WriteToBigQuery", "(", "table_spec", ",", "# WRITE_TRUNCATE not supported for streaming", "write_disposition", "=", "beam", ".", "io", ".", "BigQueryDisposition", ".", "WRITE_APPEND", ",", "create_disposition", "=", "beam", ".", "io", ".", "BigQueryDisposition", ".", "CREATE_NEVER", ")", ")", "result", "=", "p", ".", "run", "(", ")" ]
[ 34, 0 ]
[ 78, 20 ]
python
en
['en', 'en', 'en']
True
translate_pattern
(glob)
Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories.
Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories.
def translate_pattern(glob): """ Translate a file path glob like '*.txt' in to a regular expression. This differs from fnmatch.translate which allows wildcards to match directory separators. It also knows about '**/' which matches any number of directories. """ pat = '' # This will split on '/' within [character classes]. This is deliberate. chunks = glob.split(os.path.sep) sep = re.escape(os.sep) valid_char = '[^%s]' % (sep,) for c, chunk in enumerate(chunks): last_chunk = c == len(chunks) - 1 # Chunks that are a literal ** are globstars. They match anything. if chunk == '**': if last_chunk: # Match anything if this is the last component pat += '.*' else: # Match '(name/)*' pat += '(?:%s+%s)*' % (valid_char, sep) continue # Break here as the whole path component has been handled # Find any special characters in the remainder i = 0 chunk_len = len(chunk) while i < chunk_len: char = chunk[i] if char == '*': # Match any number of name characters pat += valid_char + '*' elif char == '?': # Match a name character pat += valid_char elif char == '[': # Character class inner_i = i + 1 # Skip initial !/] chars if inner_i < chunk_len and chunk[inner_i] == '!': inner_i = inner_i + 1 if inner_i < chunk_len and chunk[inner_i] == ']': inner_i = inner_i + 1 # Loop till the closing ] is found while inner_i < chunk_len and chunk[inner_i] != ']': inner_i = inner_i + 1 if inner_i >= chunk_len: # Got to the end of the string without finding a closing ] # Do not treat this as a matching group, but as a literal [ pat += re.escape(char) else: # Grab the insides of the [brackets] inner = chunk[i + 1:inner_i] char_class = '' # Class negation if inner[0] == '!': char_class = '^' inner = inner[1:] char_class += re.escape(inner) pat += '[%s]' % (char_class,) # Skip to the end ] i = inner_i else: pat += re.escape(char) i += 1 # Join each chunk with the dir separator if not last_chunk: pat += sep pat += r'\Z' return re.compile(pat, flags=re.MULTILINE|re.DOTALL)
[ "def", "translate_pattern", "(", "glob", ")", ":", "pat", "=", "''", "# This will split on '/' within [character classes]. This is deliberate.", "chunks", "=", "glob", ".", "split", "(", "os", ".", "path", ".", "sep", ")", "sep", "=", "re", ".", "escape", "(", "os", ".", "sep", ")", "valid_char", "=", "'[^%s]'", "%", "(", "sep", ",", ")", "for", "c", ",", "chunk", "in", "enumerate", "(", "chunks", ")", ":", "last_chunk", "=", "c", "==", "len", "(", "chunks", ")", "-", "1", "# Chunks that are a literal ** are globstars. They match anything.", "if", "chunk", "==", "'**'", ":", "if", "last_chunk", ":", "# Match anything if this is the last component", "pat", "+=", "'.*'", "else", ":", "# Match '(name/)*'", "pat", "+=", "'(?:%s+%s)*'", "%", "(", "valid_char", ",", "sep", ")", "continue", "# Break here as the whole path component has been handled", "# Find any special characters in the remainder", "i", "=", "0", "chunk_len", "=", "len", "(", "chunk", ")", "while", "i", "<", "chunk_len", ":", "char", "=", "chunk", "[", "i", "]", "if", "char", "==", "'*'", ":", "# Match any number of name characters", "pat", "+=", "valid_char", "+", "'*'", "elif", "char", "==", "'?'", ":", "# Match a name character", "pat", "+=", "valid_char", "elif", "char", "==", "'['", ":", "# Character class", "inner_i", "=", "i", "+", "1", "# Skip initial !/] chars", "if", "inner_i", "<", "chunk_len", "and", "chunk", "[", "inner_i", "]", "==", "'!'", ":", "inner_i", "=", "inner_i", "+", "1", "if", "inner_i", "<", "chunk_len", "and", "chunk", "[", "inner_i", "]", "==", "']'", ":", "inner_i", "=", "inner_i", "+", "1", "# Loop till the closing ] is found", "while", "inner_i", "<", "chunk_len", "and", "chunk", "[", "inner_i", "]", "!=", "']'", ":", "inner_i", "=", "inner_i", "+", "1", "if", "inner_i", ">=", "chunk_len", ":", "# Got to the end of the string without finding a closing ]", "# Do not treat this as a matching group, but as a literal [", "pat", "+=", "re", ".", "escape", "(", "char", ")", "else", ":", "# Grab the insides of the [brackets]", "inner", "=", "chunk", "[", "i", "+", "1", ":", "inner_i", "]", "char_class", "=", "''", "# Class negation", "if", "inner", "[", "0", "]", "==", "'!'", ":", "char_class", "=", "'^'", "inner", "=", "inner", "[", "1", ":", "]", "char_class", "+=", "re", ".", "escape", "(", "inner", ")", "pat", "+=", "'[%s]'", "%", "(", "char_class", ",", ")", "# Skip to the end ]", "i", "=", "inner_i", "else", ":", "pat", "+=", "re", ".", "escape", "(", "char", ")", "i", "+=", "1", "# Join each chunk with the dir separator", "if", "not", "last_chunk", ":", "pat", "+=", "sep", "pat", "+=", "r'\\Z'", "return", "re", ".", "compile", "(", "pat", ",", "flags", "=", "re", ".", "MULTILINE", "|", "re", ".", "DOTALL", ")" ]
[ 35, 0 ]
[ 115, 56 ]
python
en
['en', 'error', 'th']
False
write_file
(filename, contents)
Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it.
Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it.
def write_file(filename, contents): """Create a file with the specified name and write 'contents' (a sequence of strings without line terminators) to it. """ contents = "\n".join(contents) # assuming the contents has been vetted for utf-8 encoding contents = contents.encode("utf-8") with open(filename, "wb") as f: # always write POSIX-style manifest f.write(contents)
[ "def", "write_file", "(", "filename", ",", "contents", ")", ":", "contents", "=", "\"\\n\"", ".", "join", "(", "contents", ")", "# assuming the contents has been vetted for utf-8 encoding", "contents", "=", "contents", ".", "encode", "(", "\"utf-8\"", ")", "with", "open", "(", "filename", ",", "\"wb\"", ")", "as", "f", ":", "# always write POSIX-style manifest", "f", ".", "write", "(", "contents", ")" ]
[ 580, 0 ]
[ 590, 25 ]
python
en
['en', 'en', 'en']
True
get_pkg_info_revision
()
Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision.
Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision.
def get_pkg_info_revision(): """ Get a -r### off of PKG-INFO Version in case this is an sdist of a subversion revision. """ warnings.warn("get_pkg_info_revision is deprecated.", DeprecationWarning) if os.path.exists('PKG-INFO'): with io.open('PKG-INFO') as f: for line in f: match = re.match(r"Version:.*-r(\d+)\s*$", line) if match: return int(match.group(1)) return 0
[ "def", "get_pkg_info_revision", "(", ")", ":", "warnings", ".", "warn", "(", "\"get_pkg_info_revision is deprecated.\"", ",", "DeprecationWarning", ")", "if", "os", ".", "path", ".", "exists", "(", "'PKG-INFO'", ")", ":", "with", "io", ".", "open", "(", "'PKG-INFO'", ")", "as", "f", ":", "for", "line", "in", "f", ":", "match", "=", "re", ".", "match", "(", "r\"Version:.*-r(\\d+)\\s*$\"", ",", "line", ")", "if", "match", ":", "return", "int", "(", "match", ".", "group", "(", "1", ")", ")", "return", "0" ]
[ 683, 0 ]
[ 695, 12 ]
python
en
['en', 'error', 'th']
False
egg_info.save_version_info
(self, filename)
Materialize the value of date into the build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds.
Materialize the value of date into the build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds.
def save_version_info(self, filename): """ Materialize the value of date into the build tag. Install build keys in a deterministic order to avoid arbitrary reordering on subsequent builds. """ egg_info = collections.OrderedDict() # follow the order these keys would have been added # when PYTHONHASHSEED=0 egg_info['tag_build'] = self.tags() egg_info['tag_date'] = 0 edit_config(filename, dict(egg_info=egg_info))
[ "def", "save_version_info", "(", "self", ",", "filename", ")", ":", "egg_info", "=", "collections", ".", "OrderedDict", "(", ")", "# follow the order these keys would have been added", "# when PYTHONHASHSEED=0", "egg_info", "[", "'tag_build'", "]", "=", "self", ".", "tags", "(", ")", "egg_info", "[", "'tag_date'", "]", "=", "0", "edit_config", "(", "filename", ",", "dict", "(", "egg_info", "=", "egg_info", ")", ")" ]
[ 156, 4 ]
[ 167, 54 ]
python
en
['en', 'error', 'th']
False
egg_info.write_or_delete_file
(self, what, filename, data, force=False)
Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true).
Write `data` to `filename` or delete if empty
def write_or_delete_file(self, what, filename, data, force=False): """Write `data` to `filename` or delete if empty If `data` is non-empty, this routine is the same as ``write_file()``. If `data` is empty but not ``None``, this is the same as calling ``delete_file(filename)`. If `data` is ``None``, then this is a no-op unless `filename` exists, in which case a warning is issued about the orphaned file (if `force` is false), or deleted (if `force` is true). """ if data: self.write_file(what, filename, data) elif os.path.exists(filename): if data is None and not force: log.warn( "%s not set in setup(), but %s exists", what, filename ) return else: self.delete_file(filename)
[ "def", "write_or_delete_file", "(", "self", ",", "what", ",", "filename", ",", "data", ",", "force", "=", "False", ")", ":", "if", "data", ":", "self", ".", "write_file", "(", "what", ",", "filename", ",", "data", ")", "elif", "os", ".", "path", ".", "exists", "(", "filename", ")", ":", "if", "data", "is", "None", "and", "not", "force", ":", "log", ".", "warn", "(", "\"%s not set in setup(), but %s exists\"", ",", "what", ",", "filename", ")", "return", "else", ":", "self", ".", "delete_file", "(", "filename", ")" ]
[ 216, 4 ]
[ 234, 42 ]
python
en
['en', 'el-Latn', 'en']
True
egg_info.write_file
(self, what, filename, data)
Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file.
Write `data` to `filename` (if not a dry run) after announcing it
def write_file(self, what, filename, data): """Write `data` to `filename` (if not a dry run) after announcing it `what` is used in a log message to identify what is being written to the file. """ log.info("writing %s to %s", what, filename) if six.PY3: data = data.encode("utf-8") if not self.dry_run: f = open(filename, 'wb') f.write(data) f.close()
[ "def", "write_file", "(", "self", ",", "what", ",", "filename", ",", "data", ")", ":", "log", ".", "info", "(", "\"writing %s to %s\"", ",", "what", ",", "filename", ")", "if", "six", ".", "PY3", ":", "data", "=", "data", ".", "encode", "(", "\"utf-8\"", ")", "if", "not", "self", ".", "dry_run", ":", "f", "=", "open", "(", "filename", ",", "'wb'", ")", "f", ".", "write", "(", "data", ")", "f", ".", "close", "(", ")" ]
[ 236, 4 ]
[ 248, 21 ]
python
en
['en', 'en', 'en']
True
egg_info.delete_file
(self, filename)
Delete `filename` (if not a dry run) after announcing it
Delete `filename` (if not a dry run) after announcing it
def delete_file(self, filename): """Delete `filename` (if not a dry run) after announcing it""" log.info("deleting %s", filename) if not self.dry_run: os.unlink(filename)
[ "def", "delete_file", "(", "self", ",", "filename", ")", ":", "log", ".", "info", "(", "\"deleting %s\"", ",", "filename", ")", "if", "not", "self", ".", "dry_run", ":", "os", ".", "unlink", "(", "filename", ")" ]
[ 250, 4 ]
[ 254, 31 ]
python
en
['en', 'en', 'en']
True
egg_info.find_sources
(self)
Generate SOURCES.txt manifest file
Generate SOURCES.txt manifest file
def find_sources(self): """Generate SOURCES.txt manifest file""" manifest_filename = os.path.join(self.egg_info, "SOURCES.txt") mm = manifest_maker(self.distribution) mm.manifest = manifest_filename mm.run() self.filelist = mm.filelist
[ "def", "find_sources", "(", "self", ")", ":", "manifest_filename", "=", "os", ".", "path", ".", "join", "(", "self", ".", "egg_info", ",", "\"SOURCES.txt\"", ")", "mm", "=", "manifest_maker", "(", "self", ".", "distribution", ")", "mm", ".", "manifest", "=", "manifest_filename", "mm", ".", "run", "(", ")", "self", ".", "filelist", "=", "mm", ".", "filelist" ]
[ 287, 4 ]
[ 293, 35 ]
python
en
['en', 'en', 'it']
True
FileList._remove_files
(self, predicate)
Remove all files from the file list that match the predicate. Return True if any matching files were removed
Remove all files from the file list that match the predicate. Return True if any matching files were removed
def _remove_files(self, predicate): """ Remove all files from the file list that match the predicate. Return True if any matching files were removed """ found = False for i in range(len(self.files) - 1, -1, -1): if predicate(self.files[i]): self.debug_print(" removing " + self.files[i]) del self.files[i] found = True return found
[ "def", "_remove_files", "(", "self", ",", "predicate", ")", ":", "found", "=", "False", "for", "i", "in", "range", "(", "len", "(", "self", ".", "files", ")", "-", "1", ",", "-", "1", ",", "-", "1", ")", ":", "if", "predicate", "(", "self", ".", "files", "[", "i", "]", ")", ":", "self", ".", "debug_print", "(", "\" removing \"", "+", "self", ".", "files", "[", "i", "]", ")", "del", "self", ".", "files", "[", "i", "]", "found", "=", "True", "return", "found" ]
[ 387, 4 ]
[ 398, 20 ]
python
en
['en', 'error', 'th']
False
FileList.include
(self, pattern)
Include files that match 'pattern'.
Include files that match 'pattern'.
def include(self, pattern): """Include files that match 'pattern'.""" found = [f for f in glob(pattern) if not os.path.isdir(f)] self.extend(found) return bool(found)
[ "def", "include", "(", "self", ",", "pattern", ")", ":", "found", "=", "[", "f", "for", "f", "in", "glob", "(", "pattern", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "f", ")", "]", "self", ".", "extend", "(", "found", ")", "return", "bool", "(", "found", ")" ]
[ 400, 4 ]
[ 404, 26 ]
python
en
['en', 'en', 'en']
True
FileList.exclude
(self, pattern)
Exclude files that match 'pattern'.
Exclude files that match 'pattern'.
def exclude(self, pattern): """Exclude files that match 'pattern'.""" match = translate_pattern(pattern) return self._remove_files(match.match)
[ "def", "exclude", "(", "self", ",", "pattern", ")", ":", "match", "=", "translate_pattern", "(", "pattern", ")", "return", "self", ".", "_remove_files", "(", "match", ".", "match", ")" ]
[ 406, 4 ]
[ 409, 46 ]
python
en
['en', 'en', 'en']
True
FileList.recursive_include
(self, dir, pattern)
Include all files anywhere in 'dir/' that match the pattern.
Include all files anywhere in 'dir/' that match the pattern.
def recursive_include(self, dir, pattern): """ Include all files anywhere in 'dir/' that match the pattern. """ full_pattern = os.path.join(dir, '**', pattern) found = [f for f in glob(full_pattern, recursive=True) if not os.path.isdir(f)] self.extend(found) return bool(found)
[ "def", "recursive_include", "(", "self", ",", "dir", ",", "pattern", ")", ":", "full_pattern", "=", "os", ".", "path", ".", "join", "(", "dir", ",", "'**'", ",", "pattern", ")", "found", "=", "[", "f", "for", "f", "in", "glob", "(", "full_pattern", ",", "recursive", "=", "True", ")", "if", "not", "os", ".", "path", ".", "isdir", "(", "f", ")", "]", "self", ".", "extend", "(", "found", ")", "return", "bool", "(", "found", ")" ]
[ 411, 4 ]
[ 419, 26 ]
python
en
['en', 'error', 'th']
False
FileList.recursive_exclude
(self, dir, pattern)
Exclude any file anywhere in 'dir/' that match the pattern.
Exclude any file anywhere in 'dir/' that match the pattern.
def recursive_exclude(self, dir, pattern): """ Exclude any file anywhere in 'dir/' that match the pattern. """ match = translate_pattern(os.path.join(dir, '**', pattern)) return self._remove_files(match.match)
[ "def", "recursive_exclude", "(", "self", ",", "dir", ",", "pattern", ")", ":", "match", "=", "translate_pattern", "(", "os", ".", "path", ".", "join", "(", "dir", ",", "'**'", ",", "pattern", ")", ")", "return", "self", ".", "_remove_files", "(", "match", ".", "match", ")" ]
[ 421, 4 ]
[ 426, 46 ]
python
en
['en', 'error', 'th']
False
FileList.graft
(self, dir)
Include all files from 'dir/'.
Include all files from 'dir/'.
def graft(self, dir): """Include all files from 'dir/'.""" found = [ item for match_dir in glob(dir) for item in distutils.filelist.findall(match_dir) ] self.extend(found) return bool(found)
[ "def", "graft", "(", "self", ",", "dir", ")", ":", "found", "=", "[", "item", "for", "match_dir", "in", "glob", "(", "dir", ")", "for", "item", "in", "distutils", ".", "filelist", ".", "findall", "(", "match_dir", ")", "]", "self", ".", "extend", "(", "found", ")", "return", "bool", "(", "found", ")" ]
[ 428, 4 ]
[ 436, 26 ]
python
en
['en', 'en', 'en']
True
FileList.prune
(self, dir)
Filter out files from 'dir/'.
Filter out files from 'dir/'.
def prune(self, dir): """Filter out files from 'dir/'.""" match = translate_pattern(os.path.join(dir, '**')) return self._remove_files(match.match)
[ "def", "prune", "(", "self", ",", "dir", ")", ":", "match", "=", "translate_pattern", "(", "os", ".", "path", ".", "join", "(", "dir", ",", "'**'", ")", ")", "return", "self", ".", "_remove_files", "(", "match", ".", "match", ")" ]
[ 438, 4 ]
[ 441, 46 ]
python
en
['en', 'en', 'en']
True