nodes
stringlengths
501
22.4k
edges
stringlengths
138
5.07k
code
stringlengths
108
19.3k
0, module; 1, function_definition; 2, function_name:sort; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, expression_statement; 12, identifier:cmp; 13, None; 14, identifier:key; 15, None; 16, identifier:reverse; 17, False; 18, comment:"""Overrides sort func to use the KeyValue for the key."""; 19, boolean_operator; 20, block; 21, call; 22, not_operator; 23, attribute; 24, expression_statement; 25, attribute; 26, argument_list; 27, identifier:key; 28, identifier:self; 29, identifier:_keys; 30, assignment; 31, call; 32, identifier:sort; 33, keyword_argument; 34, keyword_argument; 35, keyword_argument; 36, identifier:key; 37, attribute; 38, identifier:super; 39, argument_list; 40, identifier:cmp; 41, identifier:cmp; 42, identifier:key; 43, identifier:key; 44, identifier:reverse; 45, identifier:reverse; 46, identifier:self; 47, identifier:KeyValue; 48, identifier:CliTable; 49, identifier:self
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 19, 22; 19, 23; 20, 24; 21, 25; 21, 26; 22, 27; 23, 28; 23, 29; 24, 30; 25, 31; 25, 32; 26, 33; 26, 34; 26, 35; 30, 36; 30, 37; 31, 38; 31, 39; 33, 40; 33, 41; 34, 42; 34, 43; 35, 44; 35, 45; 37, 46; 37, 47; 39, 48; 39, 49
def sort(self, cmp=None, key=None, reverse=False): """Overrides sort func to use the KeyValue for the key.""" if not key and self._keys: key = self.KeyValue super(CliTable, self).sort(cmp=cmp, key=key, reverse=reverse)
0, module; 1, function_definition; 2, function_name:sort; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, function_definition; 11, expression_statement; 12, comment:# Exclude header by copying table.; 13, expression_statement; 14, if_statement; 15, expression_statement; 16, comment:# Regenerate the table with original header; 17, expression_statement; 18, expression_statement; 19, comment:# Re-write the 'row' attribute of each row; 20, for_statement; 21, identifier:cmp; 22, None; 23, identifier:key; 24, None; 25, identifier:reverse; 26, False; 27, comment:"""Sorts rows in the texttable. Args: cmp: func, non default sort algorithm to use. key: func, applied to each element before sorting. reverse: bool, reverse order of sort. """; 28, function_name:_DefaultKey; 29, parameters; 30, block; 31, assignment; 32, assignment; 33, comparison_operator:cmp is not None; 34, block; 35, call; 36, assignment; 37, call; 38, pattern_list; 39, call; 40, block; 41, identifier:value; 42, expression_statement; 43, expression_statement; 44, for_statement; 45, return_statement; 46, identifier:key; 47, boolean_operator; 48, identifier:new_table; 49, subscript; 50, identifier:cmp; 51, None; 52, expression_statement; 53, attribute; 54, argument_list; 55, attribute; 56, list; 57, attribute; 58, argument_list; 59, identifier:index; 60, identifier:row; 61, identifier:enumerate; 62, argument_list; 63, expression_statement; 64, comment:"""Default key func is to create a list of all fields."""; 65, assignment; 66, identifier:key; 67, attribute; 68, comment:# Try sorting as numerical value if possible.; 69, block; 70, identifier:result; 71, identifier:key; 72, identifier:_DefaultKey; 73, attribute; 74, slice; 75, assignment; 76, identifier:new_table; 77, identifier:sort; 78, keyword_argument; 79, keyword_argument; 80, identifier:self; 81, identifier:_table; 82, attribute; 83, attribute; 84, identifier:extend; 85, identifier:new_table; 86, attribute; 87, assignment; 88, identifier:result; 89, list; 90, identifier:self; 91, identifier:header; 92, try_statement; 93, identifier:self; 94, identifier:_table; 95, integer:1; 96, identifier:key; 97, call; 98, identifier:key; 99, identifier:key; 100, identifier:reverse; 101, identifier:reverse; 102, identifier:self; 103, identifier:header; 104, identifier:self; 105, identifier:_table; 106, identifier:self; 107, identifier:_table; 108, attribute; 109, identifier:index; 110, block; 111, except_clause; 112, identifier:cmp_to_key; 113, argument_list; 114, identifier:row; 115, identifier:row; 116, expression_statement; 117, identifier:ValueError; 118, block; 119, identifier:cmp; 120, call; 121, expression_statement; 122, attribute; 123, argument_list; 124, call; 125, identifier:result; 126, identifier:append; 127, call; 128, attribute; 129, argument_list; 130, identifier:float; 131, argument_list; 132, identifier:result; 133, identifier:append; 134, subscript; 135, subscript; 136, identifier:value; 137, identifier:key; 138, identifier:value; 139, identifier:key
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 6, 21; 6, 22; 7, 23; 7, 24; 8, 25; 8, 26; 9, 27; 10, 28; 10, 29; 10, 30; 11, 31; 13, 32; 14, 33; 14, 34; 15, 35; 17, 36; 18, 37; 20, 38; 20, 39; 20, 40; 29, 41; 30, 42; 30, 43; 30, 44; 30, 45; 31, 46; 31, 47; 32, 48; 32, 49; 33, 50; 33, 51; 34, 52; 35, 53; 35, 54; 36, 55; 36, 56; 37, 57; 37, 58; 38, 59; 38, 60; 39, 61; 39, 62; 40, 63; 42, 64; 43, 65; 44, 66; 44, 67; 44, 68; 44, 69; 45, 70; 47, 71; 47, 72; 49, 73; 49, 74; 52, 75; 53, 76; 53, 77; 54, 78; 54, 79; 55, 80; 55, 81; 56, 82; 57, 83; 57, 84; 58, 85; 62, 86; 63, 87; 65, 88; 65, 89; 67, 90; 67, 91; 69, 92; 73, 93; 73, 94; 74, 95; 75, 96; 75, 97; 78, 98; 78, 99; 79, 100; 79, 101; 82, 102; 82, 103; 83, 104; 83, 105; 86, 106; 86, 107; 87, 108; 87, 109; 92, 110; 92, 111; 97, 112; 97, 113; 108, 114; 108, 115; 110, 116; 111, 117; 111, 118; 113, 119; 116, 120; 118, 121; 120, 122; 120, 123; 121, 124; 122, 125; 122, 126; 123, 127; 124, 128; 124, 129; 127, 130; 127, 131; 128, 132; 128, 133; 129, 134; 131, 135; 134, 136; 134, 137; 135, 138; 135, 139
def sort(self, cmp=None, key=None, reverse=False): """Sorts rows in the texttable. Args: cmp: func, non default sort algorithm to use. key: func, applied to each element before sorting. reverse: bool, reverse order of sort. """ def _DefaultKey(value): """Default key func is to create a list of all fields.""" result = [] for key in self.header: # Try sorting as numerical value if possible. try: result.append(float(value[key])) except ValueError: result.append(value[key]) return result key = key or _DefaultKey # Exclude header by copying table. new_table = self._table[1:] if cmp is not None: key = cmp_to_key(cmp) new_table.sort(key=key, reverse=reverse) # Regenerate the table with original header self._table = [self.header] self._table.extend(new_table) # Re-write the 'row' attribute of each row for index, row in enumerate(self._table): row.row = index
0, module; 1, function_definition; 2, function_name:find_order; 3, parameters; 4, block; 5, identifier:graph; 6, expression_statement; 7, while_statement; 8, string; 9, identifier:graph; 10, comment:# Find all items without a parent; 11, block; 12, string_content:Do a topological sort on the dependency graph dict.; 13, expression_statement; 14, if_statement; 15, comment:# If there is more than one, sort them for predictable order; 16, expression_statement; 17, for_statement; 18, assignment; 19, not_operator; 20, block; 21, call; 22, identifier:result; 23, identifier:leftmost; 24, comment:# Yield and remove them from the graph; 25, block; 26, identifier:leftmost; 27, list_comprehension; 28, identifier:leftmost; 29, raise_statement; 30, attribute; 31, argument_list; 32, expression_statement; 33, expression_statement; 34, for_statement; 35, identifier:l; 36, for_in_clause; 37, if_clause; 38, call; 39, identifier:leftmost; 40, identifier:sort; 41, yield; 42, call; 43, identifier:bset; 44, call; 45, block; 46, pattern_list; 47, call; 48, not_operator; 49, identifier:ValueError; 50, argument_list; 51, identifier:result; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, expression_statement; 57, identifier:l; 58, identifier:s; 59, attribute; 60, argument_list; 61, identifier:s; 62, binary_operator:'Dependency cycle detected! %s' % graph; 63, identifier:graph; 64, identifier:pop; 65, identifier:result; 66, identifier:graph; 67, identifier:values; 68, call; 69, identifier:graph; 70, identifier:items; 71, string; 72, identifier:graph; 73, attribute; 74, argument_list; 75, string_content:Dependency cycle detected! %s; 76, identifier:bset; 77, identifier:discard; 78, identifier:result
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 7, 10; 7, 11; 8, 12; 11, 13; 11, 14; 11, 15; 11, 16; 11, 17; 13, 18; 14, 19; 14, 20; 16, 21; 17, 22; 17, 23; 17, 24; 17, 25; 18, 26; 18, 27; 19, 28; 20, 29; 21, 30; 21, 31; 25, 32; 25, 33; 25, 34; 27, 35; 27, 36; 27, 37; 29, 38; 30, 39; 30, 40; 32, 41; 33, 42; 34, 43; 34, 44; 34, 45; 36, 46; 36, 47; 37, 48; 38, 49; 38, 50; 41, 51; 42, 52; 42, 53; 44, 54; 44, 55; 45, 56; 46, 57; 46, 58; 47, 59; 47, 60; 48, 61; 50, 62; 52, 63; 52, 64; 53, 65; 54, 66; 54, 67; 56, 68; 59, 69; 59, 70; 62, 71; 62, 72; 68, 73; 68, 74; 71, 75; 73, 76; 73, 77; 74, 78
def find_order(graph): ''' Do a topological sort on the dependency graph dict. ''' while graph: # Find all items without a parent leftmost = [l for l, s in graph.items() if not s] if not leftmost: raise ValueError('Dependency cycle detected! %s' % graph) # If there is more than one, sort them for predictable order leftmost.sort() for result in leftmost: # Yield and remove them from the graph yield result graph.pop(result) for bset in graph.values(): bset.discard(result)
0, module; 1, function_definition; 2, function_name:do_sort; 3, parameters; 4, block; 5, identifier:value; 6, default_parameter; 7, expression_statement; 8, if_statement; 9, return_statement; 10, identifier:case_sensitive; 11, False; 12, comment:"""Sort an iterable. If the iterable is made of strings the second parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} """; 13, not_operator; 14, block; 15, else_clause; 16, call; 17, identifier:case_sensitive; 18, function_definition; 19, block; 20, identifier:sorted; 21, argument_list; 22, function_name:sort_func; 23, parameters; 24, block; 25, expression_statement; 26, identifier:seq; 27, keyword_argument; 28, identifier:item; 29, if_statement; 30, return_statement; 31, assignment; 32, identifier:key; 33, identifier:sort_func; 34, call; 35, block; 36, identifier:item; 37, identifier:sort_func; 38, None; 39, identifier:isinstance; 40, argument_list; 41, expression_statement; 42, identifier:item; 43, identifier:basestring; 44, assignment; 45, identifier:item; 46, call; 47, attribute; 48, argument_list; 49, identifier:item; 50, identifier:lower
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 8, 13; 8, 14; 8, 15; 9, 16; 13, 17; 14, 18; 15, 19; 16, 20; 16, 21; 18, 22; 18, 23; 18, 24; 19, 25; 21, 26; 21, 27; 23, 28; 24, 29; 24, 30; 25, 31; 27, 32; 27, 33; 29, 34; 29, 35; 30, 36; 31, 37; 31, 38; 34, 39; 34, 40; 35, 41; 40, 42; 40, 43; 41, 44; 44, 45; 44, 46; 46, 47; 46, 48; 47, 49; 47, 50
def do_sort(value, case_sensitive=False): """Sort an iterable. If the iterable is made of strings the second parameter can be used to control the case sensitiveness of the comparison which is disabled by default. .. sourcecode:: jinja {% for item in iterable|sort %} ... {% endfor %} """ if not case_sensitive: def sort_func(item): if isinstance(item, basestring): item = item.lower() return item else: sort_func = None return sorted(seq, key=sort_func)
0, module; 1, function_definition; 2, function_name:dedupe; 3, parameters; 4, block; 5, identifier:contains_dupes; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, comment:# iterate over items in *contains_dupes*; 11, for_statement; 12, comment:# uniquify *extractor* list; 13, expression_statement; 14, for_statement; 15, expression_statement; 16, comment:# check that extractor differs from contain_dupes (e.g. duplicates were found); 17, comment:# if not, then return the original list; 18, if_statement; 19, identifier:threshold; 20, integer:70; 21, identifier:scorer; 22, attribute; 23, comment:"""This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """; 24, assignment; 25, identifier:item; 26, identifier:contains_dupes; 27, comment:# return all duplicate matches found; 28, block; 29, assignment; 30, identifier:e; 31, identifier:extractor; 32, block; 33, assignment; 34, comparison_operator:len(extractor) == len(contains_dupes); 35, block; 36, else_clause; 37, identifier:fuzz; 38, identifier:token_set_ratio; 39, identifier:extractor; 40, list; 41, expression_statement; 42, comment:# filter matches based on the threshold; 43, expression_statement; 44, comment:# if there is only 1 item in *filtered*, no duplicates were found so append to *extracted*; 45, if_statement; 46, identifier:keys; 47, dictionary; 48, expression_statement; 49, identifier:extractor; 50, call; 51, call; 52, call; 53, return_statement; 54, block; 55, assignment; 56, assignment; 57, comparison_operator:len(filtered) == 1; 58, block; 59, else_clause; 60, assignment; 61, attribute; 62, argument_list; 63, identifier:len; 64, argument_list; 65, identifier:len; 66, argument_list; 67, identifier:contains_dupes; 68, return_statement; 69, identifier:matches; 70, call; 71, identifier:filtered; 72, list_comprehension; 73, call; 74, integer:1; 75, expression_statement; 76, comment:# alpha sort; 77, block; 78, subscript; 79, integer:1; 80, identifier:keys; 81, identifier:keys; 82, identifier:extractor; 83, identifier:contains_dupes; 84, identifier:extractor; 85, identifier:extract; 86, argument_list; 87, identifier:x; 88, for_in_clause; 89, if_clause; 90, identifier:len; 91, argument_list; 92, call; 93, expression_statement; 94, comment:# length sort; 95, expression_statement; 96, comment:# take first item as our 'canonical example'; 97, expression_statement; 98, identifier:keys; 99, identifier:e; 100, identifier:item; 101, identifier:contains_dupes; 102, keyword_argument; 103, keyword_argument; 104, identifier:x; 105, identifier:matches; 106, comparison_operator:x[1] > threshold; 107, identifier:filtered; 108, attribute; 109, argument_list; 110, assignment; 111, assignment; 112, call; 113, identifier:limit; 114, None; 115, identifier:scorer; 116, identifier:scorer; 117, subscript; 118, identifier:threshold; 119, identifier:extractor; 120, identifier:append; 121, subscript; 122, identifier:filtered; 123, call; 124, identifier:filter_sort; 125, call; 126, attribute; 127, argument_list; 128, identifier:x; 129, integer:1; 130, subscript; 131, integer:0; 132, identifier:sorted; 133, argument_list; 134, identifier:sorted; 135, argument_list; 136, identifier:extractor; 137, identifier:append; 138, subscript; 139, identifier:filtered; 140, integer:0; 141, identifier:filtered; 142, keyword_argument; 143, identifier:filtered; 144, keyword_argument; 145, keyword_argument; 146, subscript; 147, integer:0; 148, identifier:key; 149, lambda; 150, identifier:key; 151, lambda; 152, identifier:reverse; 153, True; 154, identifier:filter_sort; 155, integer:0; 156, lambda_parameters; 157, subscript; 158, lambda_parameters; 159, call; 160, identifier:x; 161, identifier:x; 162, integer:0; 163, identifier:x; 164, identifier:len; 165, argument_list; 166, subscript; 167, identifier:x; 168, integer:0
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 6, 19; 6, 20; 7, 21; 7, 22; 8, 23; 9, 24; 11, 25; 11, 26; 11, 27; 11, 28; 13, 29; 14, 30; 14, 31; 14, 32; 15, 33; 18, 34; 18, 35; 18, 36; 22, 37; 22, 38; 24, 39; 24, 40; 28, 41; 28, 42; 28, 43; 28, 44; 28, 45; 29, 46; 29, 47; 32, 48; 33, 49; 33, 50; 34, 51; 34, 52; 35, 53; 36, 54; 41, 55; 43, 56; 45, 57; 45, 58; 45, 59; 48, 60; 50, 61; 50, 62; 51, 63; 51, 64; 52, 65; 52, 66; 53, 67; 54, 68; 55, 69; 55, 70; 56, 71; 56, 72; 57, 73; 57, 74; 58, 75; 59, 76; 59, 77; 60, 78; 60, 79; 61, 80; 61, 81; 64, 82; 66, 83; 68, 84; 70, 85; 70, 86; 72, 87; 72, 88; 72, 89; 73, 90; 73, 91; 75, 92; 77, 93; 77, 94; 77, 95; 77, 96; 77, 97; 78, 98; 78, 99; 86, 100; 86, 101; 86, 102; 86, 103; 88, 104; 88, 105; 89, 106; 91, 107; 92, 108; 92, 109; 93, 110; 95, 111; 97, 112; 102, 113; 102, 114; 103, 115; 103, 116; 106, 117; 106, 118; 108, 119; 108, 120; 109, 121; 110, 122; 110, 123; 111, 124; 111, 125; 112, 126; 112, 127; 117, 128; 117, 129; 121, 130; 121, 131; 123, 132; 123, 133; 125, 134; 125, 135; 126, 136; 126, 137; 127, 138; 130, 139; 130, 140; 133, 141; 133, 142; 135, 143; 135, 144; 135, 145; 138, 146; 138, 147; 142, 148; 142, 149; 144, 150; 144, 151; 145, 152; 145, 153; 146, 154; 146, 155; 149, 156; 149, 157; 151, 158; 151, 159; 156, 160; 157, 161; 157, 162; 158, 163; 159, 164; 159, 165; 165, 166; 166, 167; 166, 168
def dedupe(contains_dupes, threshold=70, scorer=fuzz.token_set_ratio): """This convenience function takes a list of strings containing duplicates and uses fuzzy matching to identify and remove duplicates. Specifically, it uses the process.extract to identify duplicates that score greater than a user defined threshold. Then, it looks for the longest item in the duplicate list since we assume this item contains the most entity information and returns that. It breaks string length ties on an alphabetical sort. Note: as the threshold DECREASES the number of duplicates that are found INCREASES. This means that the returned deduplicated list will likely be shorter. Raise the threshold for fuzzy_dedupe to be less sensitive. Args: contains_dupes: A list of strings that we would like to dedupe. threshold: the numerical value (0,100) point at which we expect to find duplicates. Defaults to 70 out of 100 scorer: Optional function for scoring matches between the query and an individual processed choice. This should be a function of the form f(query, choice) -> int. By default, fuzz.token_set_ratio() is used and expects both query and choice to be strings. Returns: A deduplicated list. For example: In: contains_dupes = ['Frodo Baggin', 'Frodo Baggins', 'F. Baggins', 'Samwise G.', 'Gandalf', 'Bilbo Baggins'] In: fuzzy_dedupe(contains_dupes) Out: ['Frodo Baggins', 'Samwise G.', 'Bilbo Baggins', 'Gandalf'] """ extractor = [] # iterate over items in *contains_dupes* for item in contains_dupes: # return all duplicate matches found matches = extract(item, contains_dupes, limit=None, scorer=scorer) # filter matches based on the threshold filtered = [x for x in matches if x[1] > threshold] # if there is only 1 item in *filtered*, no duplicates were found so append to *extracted* if len(filtered) == 1: extractor.append(filtered[0][0]) else: # alpha sort filtered = sorted(filtered, key=lambda x: x[0]) # length sort filter_sort = sorted(filtered, key=lambda x: len(x[0]), reverse=True) # take first item as our 'canonical example' extractor.append(filter_sort[0][0]) # uniquify *extractor* list keys = {} for e in extractor: keys[e] = 1 extractor = keys.keys() # check that extractor differs from contain_dupes (e.g. duplicates were found) # if not, then return the original list if len(extractor) == len(contains_dupes): return contains_dupes else: return extractor
0, module; 1, function_definition; 2, function_name:_process_and_sort; 3, parameters; 4, block; 5, identifier:s; 6, identifier:force_ascii; 7, default_parameter; 8, expression_statement; 9, comment:# pull tokens; 10, expression_statement; 11, expression_statement; 12, comment:# sort tokens and join; 13, expression_statement; 14, return_statement; 15, identifier:full_process; 16, True; 17, comment:"""Return a cleaned string with token sorted."""; 18, assignment; 19, assignment; 20, assignment; 21, call; 22, identifier:ts; 23, conditional_expression:utils.full_process(s, force_ascii=force_ascii) if full_process else s; 24, identifier:tokens; 25, call; 26, identifier:sorted_string; 27, call; 28, attribute; 29, argument_list; 30, call; 31, identifier:full_process; 32, identifier:s; 33, attribute; 34, argument_list; 35, attribute; 36, argument_list; 37, identifier:sorted_string; 38, identifier:strip; 39, attribute; 40, argument_list; 41, identifier:ts; 42, identifier:split; 43, string:u" "; 44, identifier:join; 45, call; 46, identifier:utils; 47, identifier:full_process; 48, identifier:s; 49, keyword_argument; 50, identifier:sorted; 51, argument_list; 52, identifier:force_ascii; 53, identifier:force_ascii; 54, identifier:tokens
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 7, 16; 8, 17; 10, 18; 11, 19; 13, 20; 14, 21; 18, 22; 18, 23; 19, 24; 19, 25; 20, 26; 20, 27; 21, 28; 21, 29; 23, 30; 23, 31; 23, 32; 25, 33; 25, 34; 27, 35; 27, 36; 28, 37; 28, 38; 30, 39; 30, 40; 33, 41; 33, 42; 35, 43; 35, 44; 36, 45; 39, 46; 39, 47; 40, 48; 40, 49; 45, 50; 45, 51; 49, 52; 49, 53; 51, 54
def _process_and_sort(s, force_ascii, full_process=True): """Return a cleaned string with token sorted.""" # pull tokens ts = utils.full_process(s, force_ascii=force_ascii) if full_process else s tokens = ts.split() # sort tokens and join sorted_string = u" ".join(sorted(tokens)) return sorted_string.strip()
0, module; 1, function_definition; 2, function_name:token_sort_ratio; 3, parameters; 4, block; 5, identifier:s1; 6, identifier:s2; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, return_statement; 11, identifier:force_ascii; 12, True; 13, identifier:full_process; 14, True; 15, comment:"""Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """; 16, call; 17, identifier:_token_sort; 18, argument_list; 19, identifier:s1; 20, identifier:s2; 21, keyword_argument; 22, keyword_argument; 23, keyword_argument; 24, identifier:partial; 25, False; 26, identifier:force_ascii; 27, identifier:force_ascii; 28, identifier:full_process; 29, identifier:full_process
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 7, 11; 7, 12; 8, 13; 8, 14; 9, 15; 10, 16; 16, 17; 16, 18; 18, 19; 18, 20; 18, 21; 18, 22; 18, 23; 21, 24; 21, 25; 22, 26; 22, 27; 23, 28; 23, 29
def token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return a measure of the sequences' similarity between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=False, force_ascii=force_ascii, full_process=full_process)
0, module; 1, function_definition; 2, function_name:partial_token_sort_ratio; 3, parameters; 4, block; 5, identifier:s1; 6, identifier:s2; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, return_statement; 11, identifier:force_ascii; 12, True; 13, identifier:full_process; 14, True; 15, comment:"""Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """; 16, call; 17, identifier:_token_sort; 18, argument_list; 19, identifier:s1; 20, identifier:s2; 21, keyword_argument; 22, keyword_argument; 23, keyword_argument; 24, identifier:partial; 25, True; 26, identifier:force_ascii; 27, identifier:force_ascii; 28, identifier:full_process; 29, identifier:full_process
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 7, 11; 7, 12; 8, 13; 8, 14; 9, 15; 10, 16; 16, 17; 16, 18; 18, 19; 18, 20; 18, 21; 18, 22; 18, 23; 21, 24; 21, 25; 22, 26; 22, 27; 23, 28; 23, 29
def partial_token_sort_ratio(s1, s2, force_ascii=True, full_process=True): """Return the ratio of the most similar substring as a number between 0 and 100 but sorting the token before comparing. """ return _token_sort(s1, s2, partial=True, force_ascii=force_ascii, full_process=full_process)
0, module; 1, function_definition; 2, function_name:WRatio; 3, parameters; 4, block; 5, identifier:s1; 6, identifier:s2; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, comment:# should we look at partials?; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, comment:# if strings are similar length, don't use partials; 20, if_statement; 21, comment:# if one string is much much shorter than the other; 22, if_statement; 23, if_statement; 24, identifier:force_ascii; 25, True; 26, identifier:full_process; 27, True; 28, comment:""" Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """; 29, identifier:full_process; 30, block; 31, else_clause; 32, not_operator; 33, block; 34, not_operator; 35, block; 36, assignment; 37, assignment; 38, assignment; 39, assignment; 40, assignment; 41, comparison_operator:len_ratio < 1.5; 42, block; 43, comparison_operator:len_ratio > 8; 44, block; 45, identifier:try_partial; 46, block; 47, else_clause; 48, expression_statement; 49, expression_statement; 50, block; 51, call; 52, return_statement; 53, call; 54, return_statement; 55, identifier:try_partial; 56, True; 57, identifier:unbase_scale; 58, float:.95; 59, identifier:partial_scale; 60, float:.90; 61, identifier:base; 62, call; 63, identifier:len_ratio; 64, binary_operator:float(max(len(p1), len(p2))) / min(len(p1), len(p2)); 65, identifier:len_ratio; 66, float:1.5; 67, expression_statement; 68, identifier:len_ratio; 69, integer:8; 70, expression_statement; 71, expression_statement; 72, expression_statement; 73, expression_statement; 74, return_statement; 75, block; 76, assignment; 77, assignment; 78, expression_statement; 79, expression_statement; 80, attribute; 81, argument_list; 82, integer:0; 83, attribute; 84, argument_list; 85, integer:0; 86, identifier:ratio; 87, argument_list; 88, call; 89, call; 90, assignment; 91, assignment; 92, assignment; 93, assignment; 94, assignment; 95, call; 96, expression_statement; 97, expression_statement; 98, return_statement; 99, identifier:p1; 100, call; 101, identifier:p2; 102, call; 103, assignment; 104, assignment; 105, identifier:utils; 106, identifier:validate_string; 107, identifier:p1; 108, identifier:utils; 109, identifier:validate_string; 110, identifier:p2; 111, identifier:p1; 112, identifier:p2; 113, identifier:float; 114, argument_list; 115, identifier:min; 116, argument_list; 117, identifier:try_partial; 118, False; 119, identifier:partial_scale; 120, float:.6; 121, identifier:partial; 122, binary_operator:partial_ratio(p1, p2) * partial_scale; 123, identifier:ptsor; 124, binary_operator:partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale; 125, identifier:ptser; 126, binary_operator:partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale; 127, attribute; 128, argument_list; 129, assignment; 130, assignment; 131, call; 132, attribute; 133, argument_list; 134, attribute; 135, argument_list; 136, identifier:p1; 137, identifier:s1; 138, identifier:p2; 139, identifier:s2; 140, call; 141, call; 142, call; 143, call; 144, identifier:partial_scale; 145, binary_operator:partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale; 146, identifier:partial_scale; 147, binary_operator:partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale; 148, identifier:partial_scale; 149, identifier:utils; 150, identifier:intr; 151, call; 152, identifier:tsor; 153, binary_operator:token_sort_ratio(p1, p2, full_process=False) * unbase_scale; 154, identifier:tser; 155, binary_operator:token_set_ratio(p1, p2, full_process=False) * unbase_scale; 156, attribute; 157, argument_list; 158, identifier:utils; 159, identifier:full_process; 160, identifier:s1; 161, keyword_argument; 162, identifier:utils; 163, identifier:full_process; 164, identifier:s2; 165, keyword_argument; 166, identifier:max; 167, argument_list; 168, identifier:len; 169, argument_list; 170, identifier:len; 171, argument_list; 172, identifier:partial_ratio; 173, argument_list; 174, call; 175, line_continuation:\; 176, identifier:unbase_scale; 177, call; 178, line_continuation:\; 179, identifier:unbase_scale; 180, identifier:max; 181, argument_list; 182, call; 183, identifier:unbase_scale; 184, call; 185, identifier:unbase_scale; 186, identifier:utils; 187, identifier:intr; 188, call; 189, identifier:force_ascii; 190, identifier:force_ascii; 191, identifier:force_ascii; 192, identifier:force_ascii; 193, call; 194, call; 195, identifier:p1; 196, identifier:p2; 197, identifier:p1; 198, identifier:p2; 199, identifier:partial_token_sort_ratio; 200, argument_list; 201, identifier:partial_token_set_ratio; 202, argument_list; 203, identifier:base; 204, identifier:partial; 205, identifier:ptsor; 206, identifier:ptser; 207, identifier:token_sort_ratio; 208, argument_list; 209, identifier:token_set_ratio; 210, argument_list; 211, identifier:max; 212, argument_list; 213, identifier:len; 214, argument_list; 215, identifier:len; 216, argument_list; 217, identifier:p1; 218, identifier:p2; 219, keyword_argument; 220, identifier:p1; 221, identifier:p2; 222, keyword_argument; 223, identifier:p1; 224, identifier:p2; 225, keyword_argument; 226, identifier:p1; 227, identifier:p2; 228, keyword_argument; 229, identifier:base; 230, identifier:tsor; 231, identifier:tser; 232, identifier:p1; 233, identifier:p2; 234, identifier:full_process; 235, False; 236, identifier:full_process; 237, False; 238, identifier:full_process; 239, False; 240, identifier:full_process; 241, False
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 7, 24; 7, 25; 8, 26; 8, 27; 9, 28; 10, 29; 10, 30; 10, 31; 11, 32; 11, 33; 12, 34; 12, 35; 14, 36; 15, 37; 16, 38; 17, 39; 18, 40; 20, 41; 20, 42; 22, 43; 22, 44; 23, 45; 23, 46; 23, 47; 30, 48; 30, 49; 31, 50; 32, 51; 33, 52; 34, 53; 35, 54; 36, 55; 36, 56; 37, 57; 37, 58; 38, 59; 38, 60; 39, 61; 39, 62; 40, 63; 40, 64; 41, 65; 41, 66; 42, 67; 43, 68; 43, 69; 44, 70; 46, 71; 46, 72; 46, 73; 46, 74; 47, 75; 48, 76; 49, 77; 50, 78; 50, 79; 51, 80; 51, 81; 52, 82; 53, 83; 53, 84; 54, 85; 62, 86; 62, 87; 64, 88; 64, 89; 67, 90; 70, 91; 71, 92; 72, 93; 73, 94; 74, 95; 75, 96; 75, 97; 75, 98; 76, 99; 76, 100; 77, 101; 77, 102; 78, 103; 79, 104; 80, 105; 80, 106; 81, 107; 83, 108; 83, 109; 84, 110; 87, 111; 87, 112; 88, 113; 88, 114; 89, 115; 89, 116; 90, 117; 90, 118; 91, 119; 91, 120; 92, 121; 92, 122; 93, 123; 93, 124; 94, 125; 94, 126; 95, 127; 95, 128; 96, 129; 97, 130; 98, 131; 100, 132; 100, 133; 102, 134; 102, 135; 103, 136; 103, 137; 104, 138; 104, 139; 114, 140; 116, 141; 116, 142; 122, 143; 122, 144; 124, 145; 124, 146; 126, 147; 126, 148; 127, 149; 127, 150; 128, 151; 129, 152; 129, 153; 130, 154; 130, 155; 131, 156; 131, 157; 132, 158; 132, 159; 133, 160; 133, 161; 134, 162; 134, 163; 135, 164; 135, 165; 140, 166; 140, 167; 141, 168; 141, 169; 142, 170; 142, 171; 143, 172; 143, 173; 145, 174; 145, 175; 145, 176; 147, 177; 147, 178; 147, 179; 151, 180; 151, 181; 153, 182; 153, 183; 155, 184; 155, 185; 156, 186; 156, 187; 157, 188; 161, 189; 161, 190; 165, 191; 165, 192; 167, 193; 167, 194; 169, 195; 171, 196; 173, 197; 173, 198; 174, 199; 174, 200; 177, 201; 177, 202; 181, 203; 181, 204; 181, 205; 181, 206; 182, 207; 182, 208; 184, 209; 184, 210; 188, 211; 188, 212; 193, 213; 193, 214; 194, 215; 194, 216; 200, 217; 200, 218; 200, 219; 202, 220; 202, 221; 202, 222; 208, 223; 208, 224; 208, 225; 210, 226; 210, 227; 210, 228; 212, 229; 212, 230; 212, 231; 214, 232; 216, 233; 219, 234; 219, 235; 222, 236; 222, 237; 225, 238; 225, 239; 228, 240; 228, 241
def WRatio(s1, s2, force_ascii=True, full_process=True): """ Return a measure of the sequences' similarity between 0 and 100, using different algorithms. **Steps in the order they occur** #. Run full_process from utils on both strings #. Short circuit if this makes either string empty #. Take the ratio of the two processed strings (fuzz.ratio) #. Run checks to compare the length of the strings * If one of the strings is more than 1.5 times as long as the other use partial_ratio comparisons - scale partial results by 0.9 (this makes sure only full results can return 100) * If one of the strings is over 8 times as long as the other instead scale by 0.6 #. Run the other ratio functions * if using partial ratio functions call partial_ratio, partial_token_sort_ratio and partial_token_set_ratio scale all of these by the ratio based on length * otherwise call token_sort_ratio and token_set_ratio * all token based comparisons are scaled by 0.95 (on top of any partial scalars) #. Take the highest value from these results round it and return it as an integer. :param s1: :param s2: :param force_ascii: Allow only ascii characters :type force_ascii: bool :full_process: Process inputs, used here to avoid double processing in extract functions (Default: True) :return: """ if full_process: p1 = utils.full_process(s1, force_ascii=force_ascii) p2 = utils.full_process(s2, force_ascii=force_ascii) else: p1 = s1 p2 = s2 if not utils.validate_string(p1): return 0 if not utils.validate_string(p2): return 0 # should we look at partials? try_partial = True unbase_scale = .95 partial_scale = .90 base = ratio(p1, p2) len_ratio = float(max(len(p1), len(p2))) / min(len(p1), len(p2)) # if strings are similar length, don't use partials if len_ratio < 1.5: try_partial = False # if one string is much much shorter than the other if len_ratio > 8: partial_scale = .6 if try_partial: partial = partial_ratio(p1, p2) * partial_scale ptsor = partial_token_sort_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale ptser = partial_token_set_ratio(p1, p2, full_process=False) \ * unbase_scale * partial_scale return utils.intr(max(base, partial, ptsor, ptser)) else: tsor = token_sort_ratio(p1, p2, full_process=False) * unbase_scale tser = token_set_ratio(p1, p2, full_process=False) * unbase_scale return utils.intr(max(base, tsor, tser))
0, module; 1, function_definition; 2, function_name:sort_depth; 3, parameters; 4, block; 5, identifier:vals; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, identifier:reverse; 12, False; 13, comment:"""Sort bids or asks by price """; 14, assignment; 15, assignment; 16, identifier:lst; 17, identifier:lst; 18, list_comprehension; 19, identifier:lst; 20, call; 21, list; 22, for_in_clause; 23, identifier:sorted; 24, argument_list; 25, call; 26, identifier:quantity; 27, pattern_list; 28, call; 29, identifier:lst; 30, keyword_argument; 31, keyword_argument; 32, identifier:float; 33, argument_list; 34, identifier:price; 35, identifier:quantity; 36, attribute; 37, argument_list; 38, identifier:key; 39, call; 40, identifier:reverse; 41, identifier:reverse; 42, identifier:price; 43, identifier:vals; 44, identifier:items; 45, identifier:itemgetter; 46, argument_list; 47, integer:0
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 6, 12; 7, 13; 8, 14; 9, 15; 10, 16; 14, 17; 14, 18; 15, 19; 15, 20; 18, 21; 18, 22; 20, 23; 20, 24; 21, 25; 21, 26; 22, 27; 22, 28; 24, 29; 24, 30; 24, 31; 25, 32; 25, 33; 27, 34; 27, 35; 28, 36; 28, 37; 30, 38; 30, 39; 31, 40; 31, 41; 33, 42; 36, 43; 36, 44; 39, 45; 39, 46; 46, 47
def sort_depth(vals, reverse=False): """Sort bids or asks by price """ lst = [[float(price), quantity] for price, quantity in vals.items()] lst = sorted(lst, key=itemgetter(0), reverse=reverse) return lst
0, module; 1, function_definition; 2, function_name:_get_fields; 3, parameters; 4, block; 5, identifier:attrs; 6, identifier:field_class; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, if_statement; 13, return_statement; 14, identifier:pop; 15, False; 16, identifier:ordered; 17, False; 18, comment:"""Get fields from a class. If ordered=True, fields will sorted by creation index. :param attrs: Mapping of class attributes :param type field_class: Base field class :param bool pop: Remove matching fields """; 19, assignment; 20, identifier:pop; 21, block; 22, identifier:ordered; 23, block; 24, identifier:fields; 25, identifier:fields; 26, list_comprehension; 27, for_statement; 28, expression_statement; 29, tuple; 30, for_in_clause; 31, if_clause; 32, pattern_list; 33, identifier:fields; 34, block; 35, call; 36, identifier:field_name; 37, identifier:field_value; 38, pattern_list; 39, call; 40, call; 41, identifier:field_name; 42, identifier:_; 43, delete_statement; 44, attribute; 45, argument_list; 46, identifier:field_name; 47, identifier:field_value; 48, identifier:iteritems; 49, argument_list; 50, identifier:is_instance_or_subclass; 51, argument_list; 52, subscript; 53, identifier:fields; 54, identifier:sort; 55, keyword_argument; 56, identifier:attrs; 57, identifier:field_value; 58, identifier:field_class; 59, identifier:attrs; 60, identifier:field_name; 61, identifier:key; 62, lambda; 63, lambda_parameters; 64, attribute; 65, identifier:pair; 66, subscript; 67, identifier:_creation_index; 68, identifier:pair; 69, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 11, 20; 11, 21; 12, 22; 12, 23; 13, 24; 19, 25; 19, 26; 21, 27; 23, 28; 26, 29; 26, 30; 26, 31; 27, 32; 27, 33; 27, 34; 28, 35; 29, 36; 29, 37; 30, 38; 30, 39; 31, 40; 32, 41; 32, 42; 34, 43; 35, 44; 35, 45; 38, 46; 38, 47; 39, 48; 39, 49; 40, 50; 40, 51; 43, 52; 44, 53; 44, 54; 45, 55; 49, 56; 51, 57; 51, 58; 52, 59; 52, 60; 55, 61; 55, 62; 62, 63; 62, 64; 63, 65; 64, 66; 64, 67; 66, 68; 66, 69
def _get_fields(attrs, field_class, pop=False, ordered=False): """Get fields from a class. If ordered=True, fields will sorted by creation index. :param attrs: Mapping of class attributes :param type field_class: Base field class :param bool pop: Remove matching fields """ fields = [ (field_name, field_value) for field_name, field_value in iteritems(attrs) if is_instance_or_subclass(field_value, field_class) ] if pop: for field_name, _ in fields: del attrs[field_name] if ordered: fields.sort(key=lambda pair: pair[1]._creation_index) return fields
0, module; 1, function_definition; 2, function_name:extract_features; 3, parameters; 4, block; 5, identifier:timeseries_container; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, default_parameter; 19, default_parameter; 20, default_parameter; 21, expression_statement; 22, comment:# Always use the standardized way of storing the data.; 23, comment:# See the function normalize_input_to_internal_representation for more information.; 24, expression_statement; 25, comment:# Use the standard setting if the user did not supply ones himself.; 26, if_statement; 27, comment:# If requested, do profiling (advanced feature); 28, if_statement; 29, with_statement; 30, comment:# Turn off profiling if it was turned on; 31, if_statement; 32, return_statement; 33, identifier:default_fc_parameters; 34, None; 35, identifier:kind_to_fc_parameters; 36, None; 37, identifier:column_id; 38, None; 39, identifier:column_sort; 40, None; 41, identifier:column_kind; 42, None; 43, identifier:column_value; 44, None; 45, identifier:chunksize; 46, attribute; 47, identifier:n_jobs; 48, attribute; 49, identifier:show_warnings; 50, attribute; 51, identifier:disable_progressbar; 52, attribute; 53, identifier:impute_function; 54, attribute; 55, identifier:profile; 56, attribute; 57, identifier:profiling_filename; 58, attribute; 59, identifier:profiling_sorting; 60, attribute; 61, identifier:distributor; 62, None; 63, comment:""" Extract features from * a :class:`pandas.DataFrame` containing the different time series or * a dictionary of :class:`pandas.DataFrame` each containing one type of time series In both cases a :class:`pandas.DataFrame` with the calculated features will be returned. For a list of all the calculated time series features, please see the :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class, which is used to control which features with which parameters are calculated. For a detailed explanation of the different parameters and data formats please see :ref:`data-formats-label`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_features >>> df, _ = load_robot_execution_failures() >>> X = extract_features(df, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. :type timeseries_container: pandas.DataFrame or dict :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. This means that kinds, for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param impute_function: None, if no imputing should happen or the function to call for imputing. :type impute_function: None or callable :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param distributor: Advanced parameter: set this to a class name that you want to use as a distributor. See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: class :return: The (maybe imputed) DataFrame containing extracted features. :rtype: pandas.DataFrame """; 64, assignment; 65, boolean_operator; 66, block; 67, elif_clause; 68, identifier:profile; 69, block; 70, with_clause; 71, block; 72, identifier:profile; 73, block; 74, identifier:result; 75, identifier:defaults; 76, identifier:CHUNKSIZE; 77, identifier:defaults; 78, identifier:N_PROCESSES; 79, identifier:defaults; 80, identifier:SHOW_WARNINGS; 81, identifier:defaults; 82, identifier:DISABLE_PROGRESSBAR; 83, identifier:defaults; 84, identifier:IMPUTE_FUNCTION; 85, identifier:defaults; 86, identifier:PROFILING; 87, identifier:defaults; 88, identifier:PROFILING_FILENAME; 89, identifier:defaults; 90, identifier:PROFILING_SORTING; 91, pattern_list; 92, line_continuation:\; 93, call; 94, comparison_operator:default_fc_parameters is None; 95, comparison_operator:kind_to_fc_parameters is None; 96, expression_statement; 97, boolean_operator; 98, block; 99, expression_statement; 100, with_item; 101, if_statement; 102, expression_statement; 103, comment:# Impute the result if requested; 104, if_statement; 105, expression_statement; 106, identifier:df_melt; 107, identifier:column_id; 108, identifier:column_kind; 109, identifier:column_value; 110, attribute; 111, argument_list; 112, identifier:default_fc_parameters; 113, None; 114, identifier:kind_to_fc_parameters; 115, None; 116, assignment; 117, comparison_operator:default_fc_parameters is None; 118, comparison_operator:kind_to_fc_parameters is not None; 119, expression_statement; 120, assignment; 121, call; 122, not_operator; 123, block; 124, else_clause; 125, assignment; 126, comparison_operator:impute_function is not None; 127, block; 128, call; 129, identifier:dataframe_functions; 130, identifier:_normalize_input_to_internal_representation; 131, keyword_argument; 132, keyword_argument; 133, keyword_argument; 134, keyword_argument; 135, keyword_argument; 136, identifier:default_fc_parameters; 137, call; 138, identifier:default_fc_parameters; 139, None; 140, identifier:kind_to_fc_parameters; 141, None; 142, assignment; 143, identifier:profiler; 144, call; 145, attribute; 146, argument_list; 147, identifier:show_warnings; 148, expression_statement; 149, block; 150, identifier:result; 151, call; 152, identifier:impute_function; 153, None; 154, expression_statement; 155, attribute; 156, argument_list; 157, identifier:timeseries_container; 158, identifier:timeseries_container; 159, identifier:column_id; 160, identifier:column_id; 161, identifier:column_kind; 162, identifier:column_kind; 163, identifier:column_sort; 164, identifier:column_sort; 165, identifier:column_value; 166, identifier:column_value; 167, identifier:ComprehensiveFCParameters; 168, argument_list; 169, identifier:default_fc_parameters; 170, dictionary; 171, attribute; 172, argument_list; 173, identifier:warnings; 174, identifier:catch_warnings; 175, call; 176, expression_statement; 177, identifier:_do_extraction; 178, argument_list; 179, call; 180, identifier:profiling; 181, identifier:end_profiling; 182, identifier:profiler; 183, keyword_argument; 184, keyword_argument; 185, identifier:profiling; 186, identifier:start_profiling; 187, attribute; 188, argument_list; 189, call; 190, keyword_argument; 191, keyword_argument; 192, keyword_argument; 193, keyword_argument; 194, keyword_argument; 195, keyword_argument; 196, keyword_argument; 197, keyword_argument; 198, keyword_argument; 199, keyword_argument; 200, identifier:impute_function; 201, argument_list; 202, identifier:filename; 203, identifier:profiling_filename; 204, identifier:sorting; 205, identifier:profiling_sorting; 206, identifier:warnings; 207, identifier:simplefilter; 208, string:"ignore"; 209, attribute; 210, argument_list; 211, identifier:df; 212, identifier:df_melt; 213, identifier:column_id; 214, identifier:column_id; 215, identifier:column_value; 216, identifier:column_value; 217, identifier:column_kind; 218, identifier:column_kind; 219, identifier:n_jobs; 220, identifier:n_jobs; 221, identifier:chunk_size; 222, identifier:chunksize; 223, identifier:disable_progressbar; 224, identifier:disable_progressbar; 225, identifier:default_fc_parameters; 226, identifier:default_fc_parameters; 227, identifier:kind_to_fc_parameters; 228, identifier:kind_to_fc_parameters; 229, identifier:distributor; 230, identifier:distributor; 231, identifier:result; 232, identifier:warnings; 233, identifier:simplefilter; 234, string:"default"
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 3, 18; 3, 19; 3, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 4, 32; 6, 33; 6, 34; 7, 35; 7, 36; 8, 37; 8, 38; 9, 39; 9, 40; 10, 41; 10, 42; 11, 43; 11, 44; 12, 45; 12, 46; 13, 47; 13, 48; 14, 49; 14, 50; 15, 51; 15, 52; 16, 53; 16, 54; 17, 55; 17, 56; 18, 57; 18, 58; 19, 59; 19, 60; 20, 61; 20, 62; 21, 63; 24, 64; 26, 65; 26, 66; 26, 67; 28, 68; 28, 69; 29, 70; 29, 71; 31, 72; 31, 73; 32, 74; 46, 75; 46, 76; 48, 77; 48, 78; 50, 79; 50, 80; 52, 81; 52, 82; 54, 83; 54, 84; 56, 85; 56, 86; 58, 87; 58, 88; 60, 89; 60, 90; 64, 91; 64, 92; 64, 93; 65, 94; 65, 95; 66, 96; 67, 97; 67, 98; 69, 99; 70, 100; 71, 101; 71, 102; 71, 103; 71, 104; 73, 105; 91, 106; 91, 107; 91, 108; 91, 109; 93, 110; 93, 111; 94, 112; 94, 113; 95, 114; 95, 115; 96, 116; 97, 117; 97, 118; 98, 119; 99, 120; 100, 121; 101, 122; 101, 123; 101, 124; 102, 125; 104, 126; 104, 127; 105, 128; 110, 129; 110, 130; 111, 131; 111, 132; 111, 133; 111, 134; 111, 135; 116, 136; 116, 137; 117, 138; 117, 139; 118, 140; 118, 141; 119, 142; 120, 143; 120, 144; 121, 145; 121, 146; 122, 147; 123, 148; 124, 149; 125, 150; 125, 151; 126, 152; 126, 153; 127, 154; 128, 155; 128, 156; 131, 157; 131, 158; 132, 159; 132, 160; 133, 161; 133, 162; 134, 163; 134, 164; 135, 165; 135, 166; 137, 167; 137, 168; 142, 169; 142, 170; 144, 171; 144, 172; 145, 173; 145, 174; 148, 175; 149, 176; 151, 177; 151, 178; 154, 179; 155, 180; 155, 181; 156, 182; 156, 183; 156, 184; 171, 185; 171, 186; 175, 187; 175, 188; 176, 189; 178, 190; 178, 191; 178, 192; 178, 193; 178, 194; 178, 195; 178, 196; 178, 197; 178, 198; 178, 199; 179, 200; 179, 201; 183, 202; 183, 203; 184, 204; 184, 205; 187, 206; 187, 207; 188, 208; 189, 209; 189, 210; 190, 211; 190, 212; 191, 213; 191, 214; 192, 215; 192, 216; 193, 217; 193, 218; 194, 219; 194, 220; 195, 221; 195, 222; 196, 223; 196, 224; 197, 225; 197, 226; 198, 227; 198, 228; 199, 229; 199, 230; 201, 231; 209, 232; 209, 233; 210, 234
def extract_features(timeseries_container, default_fc_parameters=None, kind_to_fc_parameters=None, column_id=None, column_sort=None, column_kind=None, column_value=None, chunksize=defaults.CHUNKSIZE, n_jobs=defaults.N_PROCESSES, show_warnings=defaults.SHOW_WARNINGS, disable_progressbar=defaults.DISABLE_PROGRESSBAR, impute_function=defaults.IMPUTE_FUNCTION, profile=defaults.PROFILING, profiling_filename=defaults.PROFILING_FILENAME, profiling_sorting=defaults.PROFILING_SORTING, distributor=None): """ Extract features from * a :class:`pandas.DataFrame` containing the different time series or * a dictionary of :class:`pandas.DataFrame` each containing one type of time series In both cases a :class:`pandas.DataFrame` with the calculated features will be returned. For a list of all the calculated time series features, please see the :class:`~tsfresh.feature_extraction.settings.ComprehensiveFCParameters` class, which is used to control which features with which parameters are calculated. For a detailed explanation of the different parameters and data formats please see :ref:`data-formats-label`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_features >>> df, _ = load_robot_execution_failures() >>> X = extract_features(df, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. :type timeseries_container: pandas.DataFrame or dict :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. This means that kinds, for which kind_of_fc_parameters doe not have any entries, will be ignored by the feature selection. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param impute_function: None, if no imputing should happen or the function to call for imputing. :type impute_function: None or callable :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param distributor: Advanced parameter: set this to a class name that you want to use as a distributor. See the utilities/distribution.py for more information. Leave to None, if you want TSFresh to choose the best distributor. :type distributor: class :return: The (maybe imputed) DataFrame containing extracted features. :rtype: pandas.DataFrame """ # Always use the standardized way of storing the data. # See the function normalize_input_to_internal_representation for more information. df_melt, column_id, column_kind, column_value = \ dataframe_functions._normalize_input_to_internal_representation( timeseries_container=timeseries_container, column_id=column_id, column_kind=column_kind, column_sort=column_sort, column_value=column_value) # Use the standard setting if the user did not supply ones himself. if default_fc_parameters is None and kind_to_fc_parameters is None: default_fc_parameters = ComprehensiveFCParameters() elif default_fc_parameters is None and kind_to_fc_parameters is not None: default_fc_parameters = {} # If requested, do profiling (advanced feature) if profile: profiler = profiling.start_profiling() with warnings.catch_warnings(): if not show_warnings: warnings.simplefilter("ignore") else: warnings.simplefilter("default") result = _do_extraction(df=df_melt, column_id=column_id, column_value=column_value, column_kind=column_kind, n_jobs=n_jobs, chunk_size=chunksize, disable_progressbar=disable_progressbar, default_fc_parameters=default_fc_parameters, kind_to_fc_parameters=kind_to_fc_parameters, distributor=distributor) # Impute the result if requested if impute_function is not None: impute_function(result) # Turn off profiling if it was turned on if profile: profiling.end_profiling(profiler, filename=profiling_filename, sorting=profiling_sorting) return result
0, module; 1, function_definition; 2, function_name:convert_to_output_format; 3, parameters; 4, block; 5, identifier:param; 6, expression_statement; 7, function_definition; 8, return_statement; 9, comment:""" Helper function to convert parameters to a valid string, that can be used in a column name. Does the opposite which is used in the from_columns function. The parameters are sorted by their name and written out in the form <param name>_<param value>__<param name>_<param value>__ ... If a <param_value> is a string, this method will wrap it with parenthesis ", so "<param_value>" :param param: The dictionary of parameters to write out :type param: dict :return: The string of parsed parameters :rtype: str """; 10, function_name:add_parenthesis_if_string_value; 11, parameters; 12, block; 13, call; 14, identifier:x; 15, if_statement; 16, attribute; 17, generator_expression; 18, call; 19, block; 20, else_clause; 21, string:"__"; 22, identifier:join; 23, binary_operator:str(key) + "_" + add_parenthesis_if_string_value(param[key]); 24, for_in_clause; 25, identifier:isinstance; 26, argument_list; 27, return_statement; 28, block; 29, binary_operator:str(key) + "_"; 30, call; 31, identifier:key; 32, call; 33, identifier:x; 34, identifier:string_types; 35, binary_operator:'"' + str(x) + '"'; 36, return_statement; 37, call; 38, string:"_"; 39, identifier:add_parenthesis_if_string_value; 40, argument_list; 41, identifier:sorted; 42, argument_list; 43, binary_operator:'"' + str(x); 44, string:'"'; 45, call; 46, identifier:str; 47, argument_list; 48, subscript; 49, call; 50, string:'"'; 51, call; 52, identifier:str; 53, argument_list; 54, identifier:key; 55, identifier:param; 56, identifier:key; 57, attribute; 58, argument_list; 59, identifier:str; 60, argument_list; 61, identifier:x; 62, identifier:param; 63, identifier:keys; 64, identifier:x
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 7, 11; 7, 12; 8, 13; 11, 14; 12, 15; 13, 16; 13, 17; 15, 18; 15, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 18, 25; 18, 26; 19, 27; 20, 28; 23, 29; 23, 30; 24, 31; 24, 32; 26, 33; 26, 34; 27, 35; 28, 36; 29, 37; 29, 38; 30, 39; 30, 40; 32, 41; 32, 42; 35, 43; 35, 44; 36, 45; 37, 46; 37, 47; 40, 48; 42, 49; 43, 50; 43, 51; 45, 52; 45, 53; 47, 54; 48, 55; 48, 56; 49, 57; 49, 58; 51, 59; 51, 60; 53, 61; 57, 62; 57, 63; 60, 64
def convert_to_output_format(param): """ Helper function to convert parameters to a valid string, that can be used in a column name. Does the opposite which is used in the from_columns function. The parameters are sorted by their name and written out in the form <param name>_<param value>__<param name>_<param value>__ ... If a <param_value> is a string, this method will wrap it with parenthesis ", so "<param_value>" :param param: The dictionary of parameters to write out :type param: dict :return: The string of parsed parameters :rtype: str """ def add_parenthesis_if_string_value(x): if isinstance(x, string_types): return '"' + str(x) + '"' else: return str(x) return "__".join(str(key) + "_" + add_parenthesis_if_string_value(param[key]) for key in sorted(param.keys()))
0, module; 1, function_definition; 2, function_name:end_profiling; 3, parameters; 4, block; 5, identifier:profiler; 6, identifier:filename; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, with_statement; 14, identifier:sorting; 15, None; 16, comment:""" Helper function to stop the profiling process and write out the profiled data into the given filename. Before this, sort the stats by the passed sorting. :param profiler: An already started profiler (probably by start_profiling). :type profiler: cProfile.Profile :param filename: The name of the output file to save the profile. :type filename: basestring :param sorting: The sorting of the statistics passed to the sort_stats function. :type sorting: basestring :return: None :rtype: None Start and stop the profiler with: >>> profiler = start_profiling() >>> # Do something you want to profile >>> end_profiling(profiler, "out.txt", "cumulative") """; 17, call; 18, assignment; 19, assignment; 20, call; 21, with_clause; 22, block; 23, attribute; 24, argument_list; 25, identifier:s; 26, call; 27, identifier:ps; 28, call; 29, attribute; 30, argument_list; 31, with_item; 32, expression_statement; 33, expression_statement; 34, identifier:profiler; 35, identifier:disable; 36, attribute; 37, argument_list; 38, attribute; 39, argument_list; 40, identifier:ps; 41, identifier:print_stats; 42, as_pattern; 43, call; 44, call; 45, identifier:six; 46, identifier:StringIO; 47, call; 48, identifier:sort_stats; 49, identifier:sorting; 50, call; 51, as_pattern_target; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, attribute; 57, argument_list; 58, identifier:open; 59, argument_list; 60, identifier:f; 61, identifier:_logger; 62, identifier:info; 63, string:"[calculate_ts_features] Finished profiling of time series feature extraction"; 64, identifier:f; 65, identifier:write; 66, call; 67, identifier:pstats; 68, identifier:Stats; 69, identifier:profiler; 70, keyword_argument; 71, identifier:filename; 72, string:"w+"; 73, attribute; 74, argument_list; 75, identifier:stream; 76, identifier:s; 77, identifier:s; 78, identifier:getvalue
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 9, 17; 10, 18; 11, 19; 12, 20; 13, 21; 13, 22; 17, 23; 17, 24; 18, 25; 18, 26; 19, 27; 19, 28; 20, 29; 20, 30; 21, 31; 22, 32; 22, 33; 23, 34; 23, 35; 26, 36; 26, 37; 28, 38; 28, 39; 29, 40; 29, 41; 31, 42; 32, 43; 33, 44; 36, 45; 36, 46; 38, 47; 38, 48; 39, 49; 42, 50; 42, 51; 43, 52; 43, 53; 44, 54; 44, 55; 47, 56; 47, 57; 50, 58; 50, 59; 51, 60; 52, 61; 52, 62; 53, 63; 54, 64; 54, 65; 55, 66; 56, 67; 56, 68; 57, 69; 57, 70; 59, 71; 59, 72; 66, 73; 66, 74; 70, 75; 70, 76; 73, 77; 73, 78
def end_profiling(profiler, filename, sorting=None): """ Helper function to stop the profiling process and write out the profiled data into the given filename. Before this, sort the stats by the passed sorting. :param profiler: An already started profiler (probably by start_profiling). :type profiler: cProfile.Profile :param filename: The name of the output file to save the profile. :type filename: basestring :param sorting: The sorting of the statistics passed to the sort_stats function. :type sorting: basestring :return: None :rtype: None Start and stop the profiler with: >>> profiler = start_profiling() >>> # Do something you want to profile >>> end_profiling(profiler, "out.txt", "cumulative") """ profiler.disable() s = six.StringIO() ps = pstats.Stats(profiler, stream=s).sort_stats(sorting) ps.print_stats() with open(filename, "w+") as f: _logger.info("[calculate_ts_features] Finished profiling of time series feature extraction") f.write(s.getvalue())
0, module; 1, function_definition; 2, function_name:extract_relevant_features; 3, parameters; 4, block; 5, identifier:timeseries_container; 6, identifier:y; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, default_parameter; 19, default_parameter; 20, default_parameter; 21, default_parameter; 22, default_parameter; 23, default_parameter; 24, default_parameter; 25, default_parameter; 26, default_parameter; 27, default_parameter; 28, expression_statement; 29, assert_statement; 30, assert_statement; 31, if_statement; 32, expression_statement; 33, expression_statement; 34, if_statement; 35, expression_statement; 36, expression_statement; 37, if_statement; 38, return_statement; 39, identifier:X; 40, None; 41, identifier:default_fc_parameters; 42, None; 43, identifier:kind_to_fc_parameters; 44, None; 45, identifier:column_id; 46, None; 47, identifier:column_sort; 48, None; 49, identifier:column_kind; 50, None; 51, identifier:column_value; 52, None; 53, identifier:show_warnings; 54, attribute; 55, identifier:disable_progressbar; 56, attribute; 57, identifier:profile; 58, attribute; 59, identifier:profiling_filename; 60, attribute; 61, identifier:profiling_sorting; 62, attribute; 63, identifier:test_for_binary_target_binary_feature; 64, attribute; 65, identifier:test_for_binary_target_real_feature; 66, attribute; 67, identifier:test_for_real_target_binary_feature; 68, attribute; 69, identifier:test_for_real_target_real_feature; 70, attribute; 71, identifier:fdr_level; 72, attribute; 73, identifier:hypotheses_independent; 74, attribute; 75, identifier:n_jobs; 76, attribute; 77, identifier:chunksize; 78, attribute; 79, identifier:ml_task; 80, string; 81, comment:""" High level convenience function to extract time series features from `timeseries_container`. Then return feature matrix `X` possibly augmented with relevant features with respect to target vector `y`. For more details see the documentation of :func:`~tsfresh.feature_extraction.extraction.extract_features` and :func:`~tsfresh.feature_selection.selection.select_features`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_relevant_features >>> df, y = load_robot_execution_failures() >>> X = extract_relevant_features(df, y, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. See :func:`~tsfresh.feature_extraction.extraction.extract_features`. :param X: A DataFrame containing additional features :type X: pandas.DataFrame :param y: The target vector :type y: pandas.Series :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature (currently unused) :type test_for_binary_target_binary_feature: str :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature :type test_for_binary_target_real_feature: str :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused) :type test_for_real_target_binary_feature: str :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused) :type test_for_real_target_real_feature: str :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant features among all created features. :type fdr_level: float :param hypotheses_independent: Can the significance of the features be assumed to be independent? Normally, this should be set to False as the features are never independent (e.g. mean and median) :type hypotheses_independent: bool :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`. Defaults to `'auto'`, meaning the intended task is inferred from `y`. If `y` has a boolean, integer or object dtype, the task is assumend to be classification, else regression. :type ml_task: str :return: Feature matrix X, possibly extended with relevant time series features. """; 82, call; 83, call; 84, comparison_operator:len(set(y)) > 1; 85, string:"Feature selection is only possible if more than 1 label/class is provided"; 86, comparison_operator:X is not None; 87, block; 88, assignment; 89, assignment; 90, comparison_operator:ids_container != ids_y; 91, block; 92, assignment; 93, assignment; 94, comparison_operator:X is None; 95, block; 96, else_clause; 97, identifier:X; 98, identifier:defaults; 99, identifier:SHOW_WARNINGS; 100, identifier:defaults; 101, identifier:DISABLE_PROGRESSBAR; 102, identifier:defaults; 103, identifier:PROFILING; 104, identifier:defaults; 105, identifier:PROFILING_FILENAME; 106, identifier:defaults; 107, identifier:PROFILING_SORTING; 108, identifier:defaults; 109, identifier:TEST_FOR_BINARY_TARGET_BINARY_FEATURE; 110, identifier:defaults; 111, identifier:TEST_FOR_BINARY_TARGET_REAL_FEATURE; 112, identifier:defaults; 113, identifier:TEST_FOR_REAL_TARGET_BINARY_FEATURE; 114, identifier:defaults; 115, identifier:TEST_FOR_REAL_TARGET_REAL_FEATURE; 116, identifier:defaults; 117, identifier:FDR_LEVEL; 118, identifier:defaults; 119, identifier:HYPOTHESES_INDEPENDENT; 120, identifier:defaults; 121, identifier:N_PROCESSES; 122, identifier:defaults; 123, identifier:CHUNKSIZE; 124, string_content:auto; 125, identifier:isinstance; 126, argument_list; 127, attribute; 128, argument_list; 129, call; 130, integer:1; 131, identifier:X; 132, None; 133, expression_statement; 134, identifier:ids_container; 135, call; 136, identifier:ids_y; 137, call; 138, identifier:ids_container; 139, identifier:ids_y; 140, if_statement; 141, if_statement; 142, identifier:X_ext; 143, call; 144, identifier:X_sel; 145, call; 146, identifier:X; 147, None; 148, expression_statement; 149, block; 150, identifier:y; 151, attribute; 152, string:"y needs to be a pandas.Series, received type: {}."; 153, identifier:format; 154, call; 155, identifier:len; 156, argument_list; 157, assignment; 158, identifier:get_ids; 159, argument_list; 160, identifier:set; 161, argument_list; 162, comparison_operator:len(ids_container - ids_y) > 0; 163, block; 164, comparison_operator:len(ids_y - ids_container) > 0; 165, block; 166, identifier:extract_features; 167, argument_list; 168, identifier:select_features; 169, argument_list; 170, assignment; 171, expression_statement; 172, identifier:pd; 173, identifier:Series; 174, identifier:type; 175, argument_list; 176, call; 177, identifier:timeseries_container; 178, call; 179, keyword_argument; 180, keyword_argument; 181, attribute; 182, call; 183, integer:0; 184, raise_statement; 185, call; 186, integer:0; 187, raise_statement; 188, identifier:timeseries_container; 189, keyword_argument; 190, keyword_argument; 191, keyword_argument; 192, keyword_argument; 193, keyword_argument; 194, keyword_argument; 195, keyword_argument; 196, keyword_argument; 197, keyword_argument; 198, keyword_argument; 199, keyword_argument; 200, keyword_argument; 201, keyword_argument; 202, identifier:X_ext; 203, identifier:y; 204, keyword_argument; 205, keyword_argument; 206, keyword_argument; 207, keyword_argument; 208, keyword_argument; 209, keyword_argument; 210, keyword_argument; 211, keyword_argument; 212, keyword_argument; 213, identifier:X; 214, identifier:X_sel; 215, assignment; 216, identifier:y; 217, identifier:set; 218, argument_list; 219, identifier:restrict_input_to_index; 220, argument_list; 221, identifier:df_or_dict; 222, identifier:timeseries_container; 223, identifier:column_id; 224, identifier:column_id; 225, identifier:y; 226, identifier:index; 227, identifier:len; 228, argument_list; 229, call; 230, identifier:len; 231, argument_list; 232, call; 233, identifier:default_fc_parameters; 234, identifier:default_fc_parameters; 235, identifier:kind_to_fc_parameters; 236, identifier:kind_to_fc_parameters; 237, identifier:show_warnings; 238, identifier:show_warnings; 239, identifier:disable_progressbar; 240, identifier:disable_progressbar; 241, identifier:profile; 242, identifier:profile; 243, identifier:profiling_filename; 244, identifier:profiling_filename; 245, identifier:profiling_sorting; 246, identifier:profiling_sorting; 247, identifier:n_jobs; 248, identifier:n_jobs; 249, identifier:column_id; 250, identifier:column_id; 251, identifier:column_sort; 252, identifier:column_sort; 253, identifier:column_kind; 254, identifier:column_kind; 255, identifier:column_value; 256, identifier:column_value; 257, identifier:impute_function; 258, identifier:impute; 259, identifier:test_for_binary_target_binary_feature; 260, identifier:test_for_binary_target_binary_feature; 261, identifier:test_for_binary_target_real_feature; 262, identifier:test_for_binary_target_real_feature; 263, identifier:test_for_real_target_binary_feature; 264, identifier:test_for_real_target_binary_feature; 265, identifier:test_for_real_target_real_feature; 266, identifier:test_for_real_target_real_feature; 267, identifier:fdr_level; 268, identifier:fdr_level; 269, identifier:hypotheses_independent; 270, identifier:hypotheses_independent; 271, identifier:n_jobs; 272, identifier:n_jobs; 273, identifier:chunksize; 274, identifier:chunksize; 275, identifier:ml_task; 276, identifier:ml_task; 277, identifier:X; 278, call; 279, identifier:y; 280, identifier:timeseries_container; 281, identifier:column_id; 282, attribute; 283, binary_operator:ids_container - ids_y; 284, identifier:ValueError; 285, argument_list; 286, binary_operator:ids_y - ids_container; 287, identifier:ValueError; 288, argument_list; 289, attribute; 290, argument_list; 291, identifier:X; 292, identifier:index; 293, identifier:ids_container; 294, identifier:ids_y; 295, call; 296, identifier:ids_y; 297, identifier:ids_container; 298, call; 299, identifier:pd; 300, identifier:merge; 301, identifier:X; 302, identifier:X_sel; 303, keyword_argument; 304, keyword_argument; 305, keyword_argument; 306, attribute; 307, argument_list; 308, attribute; 309, argument_list; 310, identifier:left_index; 311, True; 312, identifier:right_index; 313, True; 314, identifier:how; 315, string:"left"; 316, concatenated_string; 317, identifier:format; 318, binary_operator:ids_container - ids_y; 319, concatenated_string; 320, identifier:format; 321, binary_operator:ids_y - ids_container; 322, string:"The following ids are in the time series container but are missing in y: "; 323, string:"{}"; 324, identifier:ids_container; 325, identifier:ids_y; 326, string:"The following ids are in y but are missing inside the time series container: "; 327, string:"{}"; 328, identifier:ids_y; 329, identifier:ids_container
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 3, 18; 3, 19; 3, 20; 3, 21; 3, 22; 3, 23; 3, 24; 3, 25; 3, 26; 3, 27; 4, 28; 4, 29; 4, 30; 4, 31; 4, 32; 4, 33; 4, 34; 4, 35; 4, 36; 4, 37; 4, 38; 7, 39; 7, 40; 8, 41; 8, 42; 9, 43; 9, 44; 10, 45; 10, 46; 11, 47; 11, 48; 12, 49; 12, 50; 13, 51; 13, 52; 14, 53; 14, 54; 15, 55; 15, 56; 16, 57; 16, 58; 17, 59; 17, 60; 18, 61; 18, 62; 19, 63; 19, 64; 20, 65; 20, 66; 21, 67; 21, 68; 22, 69; 22, 70; 23, 71; 23, 72; 24, 73; 24, 74; 25, 75; 25, 76; 26, 77; 26, 78; 27, 79; 27, 80; 28, 81; 29, 82; 29, 83; 30, 84; 30, 85; 31, 86; 31, 87; 32, 88; 33, 89; 34, 90; 34, 91; 35, 92; 36, 93; 37, 94; 37, 95; 37, 96; 38, 97; 54, 98; 54, 99; 56, 100; 56, 101; 58, 102; 58, 103; 60, 104; 60, 105; 62, 106; 62, 107; 64, 108; 64, 109; 66, 110; 66, 111; 68, 112; 68, 113; 70, 114; 70, 115; 72, 116; 72, 117; 74, 118; 74, 119; 76, 120; 76, 121; 78, 122; 78, 123; 80, 124; 82, 125; 82, 126; 83, 127; 83, 128; 84, 129; 84, 130; 86, 131; 86, 132; 87, 133; 88, 134; 88, 135; 89, 136; 89, 137; 90, 138; 90, 139; 91, 140; 91, 141; 92, 142; 92, 143; 93, 144; 93, 145; 94, 146; 94, 147; 95, 148; 96, 149; 126, 150; 126, 151; 127, 152; 127, 153; 128, 154; 129, 155; 129, 156; 133, 157; 135, 158; 135, 159; 137, 160; 137, 161; 140, 162; 140, 163; 141, 164; 141, 165; 143, 166; 143, 167; 145, 168; 145, 169; 148, 170; 149, 171; 151, 172; 151, 173; 154, 174; 154, 175; 156, 176; 157, 177; 157, 178; 159, 179; 159, 180; 161, 181; 162, 182; 162, 183; 163, 184; 164, 185; 164, 186; 165, 187; 167, 188; 167, 189; 167, 190; 167, 191; 167, 192; 167, 193; 167, 194; 167, 195; 167, 196; 167, 197; 167, 198; 167, 199; 167, 200; 167, 201; 169, 202; 169, 203; 169, 204; 169, 205; 169, 206; 169, 207; 169, 208; 169, 209; 169, 210; 169, 211; 169, 212; 170, 213; 170, 214; 171, 215; 175, 216; 176, 217; 176, 218; 178, 219; 178, 220; 179, 221; 179, 222; 180, 223; 180, 224; 181, 225; 181, 226; 182, 227; 182, 228; 184, 229; 185, 230; 185, 231; 187, 232; 189, 233; 189, 234; 190, 235; 190, 236; 191, 237; 191, 238; 192, 239; 192, 240; 193, 241; 193, 242; 194, 243; 194, 244; 195, 245; 195, 246; 196, 247; 196, 248; 197, 249; 197, 250; 198, 251; 198, 252; 199, 253; 199, 254; 200, 255; 200, 256; 201, 257; 201, 258; 204, 259; 204, 260; 205, 261; 205, 262; 206, 263; 206, 264; 207, 265; 207, 266; 208, 267; 208, 268; 209, 269; 209, 270; 210, 271; 210, 272; 211, 273; 211, 274; 212, 275; 212, 276; 215, 277; 215, 278; 218, 279; 220, 280; 220, 281; 220, 282; 228, 283; 229, 284; 229, 285; 231, 286; 232, 287; 232, 288; 278, 289; 278, 290; 282, 291; 282, 292; 283, 293; 283, 294; 285, 295; 286, 296; 286, 297; 288, 298; 289, 299; 289, 300; 290, 301; 290, 302; 290, 303; 290, 304; 290, 305; 295, 306; 295, 307; 298, 308; 298, 309; 303, 310; 303, 311; 304, 312; 304, 313; 305, 314; 305, 315; 306, 316; 306, 317; 307, 318; 308, 319; 308, 320; 309, 321; 316, 322; 316, 323; 318, 324; 318, 325; 319, 326; 319, 327; 321, 328; 321, 329
def extract_relevant_features(timeseries_container, y, X=None, default_fc_parameters=None, kind_to_fc_parameters=None, column_id=None, column_sort=None, column_kind=None, column_value=None, show_warnings=defaults.SHOW_WARNINGS, disable_progressbar=defaults.DISABLE_PROGRESSBAR, profile=defaults.PROFILING, profiling_filename=defaults.PROFILING_FILENAME, profiling_sorting=defaults.PROFILING_SORTING, test_for_binary_target_binary_feature=defaults.TEST_FOR_BINARY_TARGET_BINARY_FEATURE, test_for_binary_target_real_feature=defaults.TEST_FOR_BINARY_TARGET_REAL_FEATURE, test_for_real_target_binary_feature=defaults.TEST_FOR_REAL_TARGET_BINARY_FEATURE, test_for_real_target_real_feature=defaults.TEST_FOR_REAL_TARGET_REAL_FEATURE, fdr_level=defaults.FDR_LEVEL, hypotheses_independent=defaults.HYPOTHESES_INDEPENDENT, n_jobs=defaults.N_PROCESSES, chunksize=defaults.CHUNKSIZE, ml_task='auto'): """ High level convenience function to extract time series features from `timeseries_container`. Then return feature matrix `X` possibly augmented with relevant features with respect to target vector `y`. For more details see the documentation of :func:`~tsfresh.feature_extraction.extraction.extract_features` and :func:`~tsfresh.feature_selection.selection.select_features`. Examples ======== >>> from tsfresh.examples import load_robot_execution_failures >>> from tsfresh import extract_relevant_features >>> df, y = load_robot_execution_failures() >>> X = extract_relevant_features(df, y, column_id='id', column_sort='time') :param timeseries_container: The pandas.DataFrame with the time series to compute the features for, or a dictionary of pandas.DataFrames. See :func:`~tsfresh.feature_extraction.extraction.extract_features`. :param X: A DataFrame containing additional features :type X: pandas.DataFrame :param y: The target vector :type y: pandas.Series :param default_fc_parameters: mapping from feature calculator names to parameters. Only those names which are keys in this dict will be calculated. See the class:`ComprehensiveFCParameters` for more information. :type default_fc_parameters: dict :param kind_to_fc_parameters: mapping from kind names to objects of the same type as the ones for default_fc_parameters. If you put a kind as a key here, the fc_parameters object (which is the value), will be used instead of the default_fc_parameters. :type kind_to_fc_parameters: dict :param column_id: The name of the id column to group by. :type column_id: str :param column_sort: The name of the sort column. :type column_sort: str :param column_kind: The name of the column keeping record on the kind of the value. :type column_kind: str :param column_value: The name for the column keeping the value itself. :type column_value: str :param chunksize: The size of one chunk that is submitted to the worker process for the parallelisation. Where one chunk is defined as a singular time series for one id and one kind. If you set the chunksize to 10, then it means that one task is to calculate all features for 10 time series. If it is set it to None, depending on distributor, heuristics are used to find the optimal chunksize. If you get out of memory exceptions, you can try it with the dask distributor and a smaller chunksize. :type chunksize: None or int :param n_jobs: The number of processes to use for parallelization. If zero, no parallelization is used. :type n_jobs: int :param: show_warnings: Show warnings during the feature extraction (needed for debugging of calculators). :type show_warnings: bool :param disable_progressbar: Do not show a progressbar while doing the calculation. :type disable_progressbar: bool :param profile: Turn on profiling during feature extraction :type profile: bool :param profiling_sorting: How to sort the profiling results (see the documentation of the profiling package for more information) :type profiling_sorting: basestring :param profiling_filename: Where to save the profiling results. :type profiling_filename: basestring :param test_for_binary_target_binary_feature: Which test to be used for binary target, binary feature (currently unused) :type test_for_binary_target_binary_feature: str :param test_for_binary_target_real_feature: Which test to be used for binary target, real feature :type test_for_binary_target_real_feature: str :param test_for_real_target_binary_feature: Which test to be used for real target, binary feature (currently unused) :type test_for_real_target_binary_feature: str :param test_for_real_target_real_feature: Which test to be used for real target, real feature (currently unused) :type test_for_real_target_real_feature: str :param fdr_level: The FDR level that should be respected, this is the theoretical expected percentage of irrelevant features among all created features. :type fdr_level: float :param hypotheses_independent: Can the significance of the features be assumed to be independent? Normally, this should be set to False as the features are never independent (e.g. mean and median) :type hypotheses_independent: bool :param ml_task: The intended machine learning task. Either `'classification'`, `'regression'` or `'auto'`. Defaults to `'auto'`, meaning the intended task is inferred from `y`. If `y` has a boolean, integer or object dtype, the task is assumend to be classification, else regression. :type ml_task: str :return: Feature matrix X, possibly extended with relevant time series features. """ assert isinstance(y, pd.Series), "y needs to be a pandas.Series, received type: {}.".format(type(y)) assert len(set(y)) > 1, "Feature selection is only possible if more than 1 label/class is provided" if X is not None: timeseries_container = restrict_input_to_index(timeseries_container, column_id, X.index) ids_container = get_ids(df_or_dict=timeseries_container, column_id=column_id) ids_y = set(y.index) if ids_container != ids_y: if len(ids_container - ids_y) > 0: raise ValueError("The following ids are in the time series container but are missing in y: " "{}".format(ids_container - ids_y)) if len(ids_y - ids_container) > 0: raise ValueError("The following ids are in y but are missing inside the time series container: " "{}".format(ids_y - ids_container)) X_ext = extract_features(timeseries_container, default_fc_parameters=default_fc_parameters, kind_to_fc_parameters=kind_to_fc_parameters, show_warnings=show_warnings, disable_progressbar=disable_progressbar, profile=profile, profiling_filename=profiling_filename, profiling_sorting=profiling_sorting, n_jobs=n_jobs, column_id=column_id, column_sort=column_sort, column_kind=column_kind, column_value=column_value, impute_function=impute) X_sel = select_features(X_ext, y, test_for_binary_target_binary_feature=test_for_binary_target_binary_feature, test_for_binary_target_real_feature=test_for_binary_target_real_feature, test_for_real_target_binary_feature=test_for_real_target_binary_feature, test_for_real_target_real_feature=test_for_real_target_real_feature, fdr_level=fdr_level, hypotheses_independent=hypotheses_independent, n_jobs=n_jobs, chunksize=chunksize, ml_task=ml_task) if X is None: X = X_sel else: X = pd.merge(X, X_sel, left_index=True, right_index=True, how="left") return X
0, module; 1, function_definition; 2, function_name:user_agents; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, return_statement; 8, comment:""" Retrieve user-agents, sorted by most common to least common. """; 9, parenthesized_expression; 10, call; 11, attribute; 12, argument_list; 13, call; 14, identifier:tuples; 15, attribute; 16, argument_list; 17, call; 18, identifier:order_by; 19, call; 20, attribute; 21, argument_list; 22, attribute; 23, argument_list; 24, call; 25, identifier:group_by; 26, subscript; 27, call; 28, identifier:desc; 29, attribute; 30, argument_list; 31, attribute; 32, string; 33, attribute; 34, argument_list; 35, call; 36, identifier:select; 37, subscript; 38, call; 39, identifier:PageView; 40, identifier:headers; 41, string_content:User-Agent; 42, identifier:fn; 43, identifier:Count; 44, attribute; 45, attribute; 46, argument_list; 47, attribute; 48, string; 49, attribute; 50, argument_list; 51, identifier:PageView; 52, identifier:id; 53, identifier:self; 54, identifier:get_query; 55, identifier:PageView; 56, identifier:headers; 57, string_content:User-Agent; 58, identifier:fn; 59, identifier:Count; 60, attribute; 61, identifier:PageView; 62, identifier:id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 9, 10; 10, 11; 10, 12; 11, 13; 11, 14; 13, 15; 13, 16; 15, 17; 15, 18; 16, 19; 17, 20; 17, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 22, 27; 22, 28; 24, 29; 24, 30; 26, 31; 26, 32; 27, 33; 27, 34; 29, 35; 29, 36; 30, 37; 30, 38; 31, 39; 31, 40; 32, 41; 33, 42; 33, 43; 34, 44; 35, 45; 35, 46; 37, 47; 37, 48; 38, 49; 38, 50; 44, 51; 44, 52; 45, 53; 45, 54; 47, 55; 47, 56; 48, 57; 49, 58; 49, 59; 50, 60; 60, 61; 60, 62
def user_agents(self): """ Retrieve user-agents, sorted by most common to least common. """ return (self.get_query() .select( PageView.headers['User-Agent'], fn.Count(PageView.id)) .group_by(PageView.headers['User-Agent']) .order_by(fn.Count(PageView.id).desc()) .tuples())
0, module; 1, function_definition; 2, function_name:languages; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, comment:""" Retrieve languages, sorted by most common to least common. The Accept-Languages header sometimes looks weird, i.e. "en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi- colon. """; 11, assignment; 12, assignment; 13, parenthesized_expression; 14, identifier:language; 15, subscript; 16, identifier:first_language; 17, call; 18, call; 19, attribute; 20, string; 21, attribute; 22, argument_list; 23, attribute; 24, argument_list; 25, identifier:PageView; 26, identifier:headers; 27, string_content:Accept-Language; 28, identifier:fn; 29, identifier:SubStr; 30, identifier:language; 31, comment:# String to slice.; 32, integer:1; 33, comment:# Left index.; 34, call; 35, call; 36, identifier:tuples; 37, attribute; 38, argument_list; 39, attribute; 40, argument_list; 41, identifier:fn; 42, identifier:StrPos; 43, identifier:language; 44, string; 45, call; 46, identifier:order_by; 47, call; 48, string_content:;; 49, attribute; 50, argument_list; 51, attribute; 52, argument_list; 53, call; 54, identifier:group_by; 55, identifier:first_language; 56, call; 57, identifier:desc; 58, attribute; 59, argument_list; 60, attribute; 61, argument_list; 62, call; 63, identifier:select; 64, identifier:first_language; 65, call; 66, identifier:fn; 67, identifier:Count; 68, attribute; 69, attribute; 70, argument_list; 71, attribute; 72, argument_list; 73, identifier:PageView; 74, identifier:id; 75, identifier:self; 76, identifier:get_query; 77, identifier:fn; 78, identifier:Count; 79, attribute; 80, identifier:PageView; 81, identifier:id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 8, 12; 9, 13; 11, 14; 11, 15; 12, 16; 12, 17; 13, 18; 15, 19; 15, 20; 17, 21; 17, 22; 18, 23; 18, 24; 19, 25; 19, 26; 20, 27; 21, 28; 21, 29; 22, 30; 22, 31; 22, 32; 22, 33; 22, 34; 23, 35; 23, 36; 34, 37; 34, 38; 35, 39; 35, 40; 37, 41; 37, 42; 38, 43; 38, 44; 39, 45; 39, 46; 40, 47; 44, 48; 45, 49; 45, 50; 47, 51; 47, 52; 49, 53; 49, 54; 50, 55; 51, 56; 51, 57; 53, 58; 53, 59; 56, 60; 56, 61; 58, 62; 58, 63; 59, 64; 59, 65; 60, 66; 60, 67; 61, 68; 62, 69; 62, 70; 65, 71; 65, 72; 68, 73; 68, 74; 69, 75; 69, 76; 71, 77; 71, 78; 72, 79; 79, 80; 79, 81
def languages(self): """ Retrieve languages, sorted by most common to least common. The Accept-Languages header sometimes looks weird, i.e. "en-US,en;q=0.8,is;q=0.6,da;q=0.4" We will split on the first semi- colon. """ language = PageView.headers['Accept-Language'] first_language = fn.SubStr( language, # String to slice. 1, # Left index. fn.StrPos(language, ';')) return (self.get_query() .select(first_language, fn.Count(PageView.id)) .group_by(first_language) .order_by(fn.Count(PageView.id).desc()) .tuples())
0, module; 1, function_definition; 2, function_name:error_router; 3, parameters; 4, block; 5, identifier:self; 6, identifier:original_handler; 7, identifier:e; 8, expression_statement; 9, if_statement; 10, return_statement; 11, comment:"""This function decides whether the error occured in a flask-restful endpoint or not. If it happened in a flask-restful endpoint, our handler will be dispatched. If it happened in an unrelated view, the app's original error handler will be dispatched. In the event that the error occurred in a flask-restful endpoint but the local handler can't resolve the situation, the router will fall back onto the original_handler as last resort. :param original_handler: the original Flask error handler for the app :type original_handler: function :param e: the exception raised while handling the request :type e: Exception """; 12, call; 13, block; 14, call; 15, attribute; 16, argument_list; 17, try_statement; 18, identifier:original_handler; 19, argument_list; 20, identifier:self; 21, identifier:_has_fr_route; 22, block; 23, except_clause; 24, identifier:e; 25, return_statement; 26, identifier:Exception; 27, block; 28, call; 29, pass_statement; 30, comment:# Fall through to original handler; 31, attribute; 32, argument_list; 33, identifier:self; 34, identifier:handle_error; 35, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 8, 11; 9, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 14, 18; 14, 19; 15, 20; 15, 21; 17, 22; 17, 23; 19, 24; 22, 25; 23, 26; 23, 27; 25, 28; 27, 29; 27, 30; 28, 31; 28, 32; 31, 33; 31, 34; 32, 35
def error_router(self, original_handler, e): """This function decides whether the error occured in a flask-restful endpoint or not. If it happened in a flask-restful endpoint, our handler will be dispatched. If it happened in an unrelated view, the app's original error handler will be dispatched. In the event that the error occurred in a flask-restful endpoint but the local handler can't resolve the situation, the router will fall back onto the original_handler as last resort. :param original_handler: the original Flask error handler for the app :type original_handler: function :param e: the exception raised while handling the request :type e: Exception """ if self._has_fr_route(): try: return self.handle_error(e) except Exception: pass # Fall through to original handler return original_handler(e)
0, module; 1, function_definition; 2, function_name:smooth_knn_dist; 3, parameters; 4, block; 5, identifier:distances; 6, identifier:k; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, for_statement; 15, return_statement; 16, identifier:n_iter; 17, integer:64; 18, identifier:local_connectivity; 19, float:1.0; 20, identifier:bandwidth; 21, float:1.0; 22, comment:"""Compute a continuous version of the distance to the kth nearest neighbor. That is, this is similar to knn-distance but allows continuous k values rather than requiring an integral k. In esscence we are simply computing the distance such that the cardinality of fuzzy set we generate is k. Parameters ---------- distances: array of shape (n_samples, n_neighbors) Distances to nearest neighbors for each samples. Each row should be a sorted list of distances to a given samples nearest neighbors. k: float The number of nearest neighbors to approximate for. n_iter: int (optional, default 64) We need to binary search for the correct distance value. This is the max number of iterations to use in such a search. local_connectivity: int (optional, default 1) The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected at a local level. The higher this value the more connected the manifold becomes locally. In practice this should be not more than the local intrinsic dimension of the manifold. bandwidth: float (optional, default 1) The target bandwidth of the kernel, larger values will produce larger return values. Returns ------- knn_dist: array of shape (n_samples,) The distance to kth nearest neighbor, as suitably approximated. nn_dist: array of shape (n_samples,) The distance to the 1st nearest neighbor for each point. """; 23, assignment; 24, assignment; 25, assignment; 26, identifier:i; 27, call; 28, block; 29, expression_list; 30, identifier:target; 31, binary_operator:np.log2(k) * bandwidth; 32, identifier:rho; 33, call; 34, identifier:result; 35, call; 36, identifier:range; 37, argument_list; 38, expression_statement; 39, expression_statement; 40, expression_statement; 41, comment:# TODO: This is very inefficient, but will do for now. FIXME; 42, expression_statement; 43, expression_statement; 44, if_statement; 45, for_statement; 46, expression_statement; 47, comment:# TODO: This is very inefficient, but will do for now. FIXME; 48, if_statement; 49, identifier:result; 50, identifier:rho; 51, call; 52, identifier:bandwidth; 53, attribute; 54, argument_list; 55, attribute; 56, argument_list; 57, subscript; 58, assignment; 59, assignment; 60, assignment; 61, assignment; 62, assignment; 63, comparison_operator:non_zero_dists.shape[0] >= local_connectivity; 64, block; 65, elif_clause; 66, identifier:n; 67, call; 68, block; 69, assignment; 70, comparison_operator:rho[i] > 0.0; 71, block; 72, else_clause; 73, attribute; 74, argument_list; 75, identifier:np; 76, identifier:zeros; 77, subscript; 78, identifier:np; 79, identifier:zeros; 80, subscript; 81, attribute; 82, integer:0; 83, identifier:lo; 84, float:0.0; 85, identifier:hi; 86, identifier:NPY_INFINITY; 87, identifier:mid; 88, float:1.0; 89, identifier:ith_distances; 90, subscript; 91, identifier:non_zero_dists; 92, subscript; 93, subscript; 94, identifier:local_connectivity; 95, expression_statement; 96, expression_statement; 97, if_statement; 98, comparison_operator:non_zero_dists.shape[0] > 0; 99, block; 100, identifier:range; 101, argument_list; 102, expression_statement; 103, for_statement; 104, if_statement; 105, if_statement; 106, subscript; 107, identifier:mid; 108, subscript; 109, float:0.0; 110, if_statement; 111, block; 112, identifier:np; 113, identifier:log2; 114, identifier:k; 115, attribute; 116, integer:0; 117, attribute; 118, integer:0; 119, identifier:distances; 120, identifier:shape; 121, identifier:distances; 122, identifier:i; 123, identifier:ith_distances; 124, comparison_operator:ith_distances > 0.0; 125, attribute; 126, integer:0; 127, assignment; 128, assignment; 129, comparison_operator:index > 0; 130, block; 131, else_clause; 132, subscript; 133, integer:0; 134, expression_statement; 135, identifier:n_iter; 136, assignment; 137, identifier:j; 138, call; 139, block; 140, comparison_operator:np.fabs(psum - target) < SMOOTH_K_TOLERANCE; 141, block; 142, comparison_operator:psum > target; 143, block; 144, else_clause; 145, identifier:result; 146, identifier:i; 147, identifier:rho; 148, identifier:i; 149, comparison_operator:result[i] < MIN_K_DIST_SCALE * np.mean(ith_distances); 150, block; 151, if_statement; 152, identifier:distances; 153, identifier:shape; 154, identifier:distances; 155, identifier:shape; 156, identifier:ith_distances; 157, float:0.0; 158, identifier:non_zero_dists; 159, identifier:shape; 160, identifier:index; 161, call; 162, identifier:interpolation; 163, binary_operator:local_connectivity - index; 164, identifier:index; 165, integer:0; 166, expression_statement; 167, if_statement; 168, block; 169, attribute; 170, integer:0; 171, assignment; 172, identifier:psum; 173, float:0.0; 174, identifier:range; 175, argument_list; 176, expression_statement; 177, if_statement; 178, call; 179, identifier:SMOOTH_K_TOLERANCE; 180, break_statement; 181, identifier:psum; 182, identifier:target; 183, expression_statement; 184, expression_statement; 185, block; 186, subscript; 187, binary_operator:MIN_K_DIST_SCALE * np.mean(ith_distances); 188, expression_statement; 189, comparison_operator:result[i] < MIN_K_DIST_SCALE * np.mean(distances); 190, block; 191, identifier:int; 192, argument_list; 193, identifier:local_connectivity; 194, identifier:index; 195, assignment; 196, comparison_operator:interpolation > SMOOTH_K_TOLERANCE; 197, block; 198, expression_statement; 199, identifier:non_zero_dists; 200, identifier:shape; 201, subscript; 202, call; 203, integer:1; 204, subscript; 205, assignment; 206, comparison_operator:d > 0; 207, block; 208, else_clause; 209, attribute; 210, argument_list; 211, assignment; 212, assignment; 213, expression_statement; 214, if_statement; 215, identifier:result; 216, identifier:i; 217, identifier:MIN_K_DIST_SCALE; 218, call; 219, assignment; 220, subscript; 221, binary_operator:MIN_K_DIST_SCALE * np.mean(distances); 222, expression_statement; 223, call; 224, subscript; 225, subscript; 226, identifier:interpolation; 227, identifier:SMOOTH_K_TOLERANCE; 228, expression_statement; 229, assignment; 230, identifier:rho; 231, identifier:i; 232, attribute; 233, argument_list; 234, attribute; 235, integer:1; 236, identifier:d; 237, binary_operator:distances[i, j] - rho[i]; 238, identifier:d; 239, integer:0; 240, expression_statement; 241, block; 242, identifier:np; 243, identifier:fabs; 244, binary_operator:psum - target; 245, identifier:hi; 246, identifier:mid; 247, identifier:mid; 248, binary_operator:(lo + hi) / 2.0; 249, assignment; 250, comparison_operator:hi == NPY_INFINITY; 251, block; 252, else_clause; 253, attribute; 254, argument_list; 255, subscript; 256, binary_operator:MIN_K_DIST_SCALE * np.mean(ith_distances); 257, identifier:result; 258, identifier:i; 259, identifier:MIN_K_DIST_SCALE; 260, call; 261, assignment; 262, attribute; 263, argument_list; 264, identifier:rho; 265, identifier:i; 266, identifier:non_zero_dists; 267, binary_operator:index - 1; 268, augmented_assignment; 269, subscript; 270, binary_operator:interpolation * non_zero_dists[0]; 271, identifier:np; 272, identifier:max; 273, identifier:non_zero_dists; 274, identifier:distances; 275, identifier:shape; 276, subscript; 277, subscript; 278, augmented_assignment; 279, expression_statement; 280, identifier:psum; 281, identifier:target; 282, parenthesized_expression; 283, float:2.0; 284, identifier:lo; 285, identifier:mid; 286, identifier:hi; 287, identifier:NPY_INFINITY; 288, expression_statement; 289, block; 290, identifier:np; 291, identifier:mean; 292, identifier:ith_distances; 293, identifier:result; 294, identifier:i; 295, identifier:MIN_K_DIST_SCALE; 296, call; 297, attribute; 298, argument_list; 299, subscript; 300, binary_operator:MIN_K_DIST_SCALE * np.mean(distances); 301, identifier:np; 302, identifier:floor; 303, identifier:local_connectivity; 304, identifier:index; 305, integer:1; 306, subscript; 307, binary_operator:interpolation * (non_zero_dists[index] - non_zero_dists[index - 1]); 308, identifier:rho; 309, identifier:i; 310, identifier:interpolation; 311, subscript; 312, identifier:distances; 313, identifier:i; 314, identifier:j; 315, identifier:rho; 316, identifier:i; 317, identifier:psum; 318, call; 319, augmented_assignment; 320, binary_operator:lo + hi; 321, augmented_assignment; 322, expression_statement; 323, attribute; 324, argument_list; 325, identifier:np; 326, identifier:mean; 327, identifier:distances; 328, identifier:result; 329, identifier:i; 330, identifier:MIN_K_DIST_SCALE; 331, call; 332, identifier:rho; 333, identifier:i; 334, identifier:interpolation; 335, parenthesized_expression; 336, identifier:non_zero_dists; 337, integer:0; 338, attribute; 339, argument_list; 340, identifier:psum; 341, float:1.0; 342, identifier:lo; 343, identifier:hi; 344, identifier:mid; 345, integer:2; 346, assignment; 347, identifier:np; 348, identifier:mean; 349, identifier:ith_distances; 350, attribute; 351, argument_list; 352, binary_operator:non_zero_dists[index] - non_zero_dists[index - 1]; 353, identifier:np; 354, identifier:exp; 355, unary_operator; 356, identifier:mid; 357, binary_operator:(lo + hi) / 2.0; 358, identifier:np; 359, identifier:mean; 360, identifier:distances; 361, subscript; 362, subscript; 363, parenthesized_expression; 364, parenthesized_expression; 365, float:2.0; 366, identifier:non_zero_dists; 367, identifier:index; 368, identifier:non_zero_dists; 369, binary_operator:index - 1; 370, binary_operator:d / mid; 371, binary_operator:lo + hi; 372, identifier:index; 373, integer:1; 374, identifier:d; 375, identifier:mid; 376, identifier:lo; 377, identifier:hi
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 9, 21; 10, 22; 11, 23; 12, 24; 13, 25; 14, 26; 14, 27; 14, 28; 15, 29; 23, 30; 23, 31; 24, 32; 24, 33; 25, 34; 25, 35; 27, 36; 27, 37; 28, 38; 28, 39; 28, 40; 28, 41; 28, 42; 28, 43; 28, 44; 28, 45; 28, 46; 28, 47; 28, 48; 29, 49; 29, 50; 31, 51; 31, 52; 33, 53; 33, 54; 35, 55; 35, 56; 37, 57; 38, 58; 39, 59; 40, 60; 42, 61; 43, 62; 44, 63; 44, 64; 44, 65; 45, 66; 45, 67; 45, 68; 46, 69; 48, 70; 48, 71; 48, 72; 51, 73; 51, 74; 53, 75; 53, 76; 54, 77; 55, 78; 55, 79; 56, 80; 57, 81; 57, 82; 58, 83; 58, 84; 59, 85; 59, 86; 60, 87; 60, 88; 61, 89; 61, 90; 62, 91; 62, 92; 63, 93; 63, 94; 64, 95; 64, 96; 64, 97; 65, 98; 65, 99; 67, 100; 67, 101; 68, 102; 68, 103; 68, 104; 68, 105; 69, 106; 69, 107; 70, 108; 70, 109; 71, 110; 72, 111; 73, 112; 73, 113; 74, 114; 77, 115; 77, 116; 80, 117; 80, 118; 81, 119; 81, 120; 90, 121; 90, 122; 92, 123; 92, 124; 93, 125; 93, 126; 95, 127; 96, 128; 97, 129; 97, 130; 97, 131; 98, 132; 98, 133; 99, 134; 101, 135; 102, 136; 103, 137; 103, 138; 103, 139; 104, 140; 104, 141; 105, 142; 105, 143; 105, 144; 106, 145; 106, 146; 108, 147; 108, 148; 110, 149; 110, 150; 111, 151; 115, 152; 115, 153; 117, 154; 117, 155; 124, 156; 124, 157; 125, 158; 125, 159; 127, 160; 127, 161; 128, 162; 128, 163; 129, 164; 129, 165; 130, 166; 130, 167; 131, 168; 132, 169; 132, 170; 134, 171; 136, 172; 136, 173; 138, 174; 138, 175; 139, 176; 139, 177; 140, 178; 140, 179; 141, 180; 142, 181; 142, 182; 143, 183; 143, 184; 144, 185; 149, 186; 149, 187; 150, 188; 151, 189; 151, 190; 161, 191; 161, 192; 163, 193; 163, 194; 166, 195; 167, 196; 167, 197; 168, 198; 169, 199; 169, 200; 171, 201; 171, 202; 175, 203; 175, 204; 176, 205; 177, 206; 177, 207; 177, 208; 178, 209; 178, 210; 183, 211; 184, 212; 185, 213; 185, 214; 186, 215; 186, 216; 187, 217; 187, 218; 188, 219; 189, 220; 189, 221; 190, 222; 192, 223; 195, 224; 195, 225; 196, 226; 196, 227; 197, 228; 198, 229; 201, 230; 201, 231; 202, 232; 202, 233; 204, 234; 204, 235; 205, 236; 205, 237; 206, 238; 206, 239; 207, 240; 208, 241; 209, 242; 209, 243; 210, 244; 211, 245; 211, 246; 212, 247; 212, 248; 213, 249; 214, 250; 214, 251; 214, 252; 218, 253; 218, 254; 219, 255; 219, 256; 220, 257; 220, 258; 221, 259; 221, 260; 222, 261; 223, 262; 223, 263; 224, 264; 224, 265; 225, 266; 225, 267; 228, 268; 229, 269; 229, 270; 232, 271; 232, 272; 233, 273; 234, 274; 234, 275; 237, 276; 237, 277; 240, 278; 241, 279; 244, 280; 244, 281; 248, 282; 248, 283; 249, 284; 249, 285; 250, 286; 250, 287; 251, 288; 252, 289; 253, 290; 253, 291; 254, 292; 255, 293; 255, 294; 256, 295; 256, 296; 260, 297; 260, 298; 261, 299; 261, 300; 262, 301; 262, 302; 263, 303; 267, 304; 267, 305; 268, 306; 268, 307; 269, 308; 269, 309; 270, 310; 270, 311; 276, 312; 276, 313; 276, 314; 277, 315; 277, 316; 278, 317; 278, 318; 279, 319; 282, 320; 288, 321; 289, 322; 296, 323; 296, 324; 297, 325; 297, 326; 298, 327; 299, 328; 299, 329; 300, 330; 300, 331; 306, 332; 306, 333; 307, 334; 307, 335; 311, 336; 311, 337; 318, 338; 318, 339; 319, 340; 319, 341; 320, 342; 320, 343; 321, 344; 321, 345; 322, 346; 323, 347; 323, 348; 324, 349; 331, 350; 331, 351; 335, 352; 338, 353; 338, 354; 339, 355; 346, 356; 346, 357; 350, 358; 350, 359; 351, 360; 352, 361; 352, 362; 355, 363; 357, 364; 357, 365; 361, 366; 361, 367; 362, 368; 362, 369; 363, 370; 364, 371; 369, 372; 369, 373; 370, 374; 370, 375; 371, 376; 371, 377
def smooth_knn_dist(distances, k, n_iter=64, local_connectivity=1.0, bandwidth=1.0): """Compute a continuous version of the distance to the kth nearest neighbor. That is, this is similar to knn-distance but allows continuous k values rather than requiring an integral k. In esscence we are simply computing the distance such that the cardinality of fuzzy set we generate is k. Parameters ---------- distances: array of shape (n_samples, n_neighbors) Distances to nearest neighbors for each samples. Each row should be a sorted list of distances to a given samples nearest neighbors. k: float The number of nearest neighbors to approximate for. n_iter: int (optional, default 64) We need to binary search for the correct distance value. This is the max number of iterations to use in such a search. local_connectivity: int (optional, default 1) The local connectivity required -- i.e. the number of nearest neighbors that should be assumed to be connected at a local level. The higher this value the more connected the manifold becomes locally. In practice this should be not more than the local intrinsic dimension of the manifold. bandwidth: float (optional, default 1) The target bandwidth of the kernel, larger values will produce larger return values. Returns ------- knn_dist: array of shape (n_samples,) The distance to kth nearest neighbor, as suitably approximated. nn_dist: array of shape (n_samples,) The distance to the 1st nearest neighbor for each point. """ target = np.log2(k) * bandwidth rho = np.zeros(distances.shape[0]) result = np.zeros(distances.shape[0]) for i in range(distances.shape[0]): lo = 0.0 hi = NPY_INFINITY mid = 1.0 # TODO: This is very inefficient, but will do for now. FIXME ith_distances = distances[i] non_zero_dists = ith_distances[ith_distances > 0.0] if non_zero_dists.shape[0] >= local_connectivity: index = int(np.floor(local_connectivity)) interpolation = local_connectivity - index if index > 0: rho[i] = non_zero_dists[index - 1] if interpolation > SMOOTH_K_TOLERANCE: rho[i] += interpolation * (non_zero_dists[index] - non_zero_dists[index - 1]) else: rho[i] = interpolation * non_zero_dists[0] elif non_zero_dists.shape[0] > 0: rho[i] = np.max(non_zero_dists) for n in range(n_iter): psum = 0.0 for j in range(1, distances.shape[1]): d = distances[i, j] - rho[i] if d > 0: psum += np.exp(-(d / mid)) else: psum += 1.0 if np.fabs(psum - target) < SMOOTH_K_TOLERANCE: break if psum > target: hi = mid mid = (lo + hi) / 2.0 else: lo = mid if hi == NPY_INFINITY: mid *= 2 else: mid = (lo + hi) / 2.0 result[i] = mid # TODO: This is very inefficient, but will do for now. FIXME if rho[i] > 0.0: if result[i] < MIN_K_DIST_SCALE * np.mean(ith_distances): result[i] = MIN_K_DIST_SCALE * np.mean(ith_distances) else: if result[i] < MIN_K_DIST_SCALE * np.mean(distances): result[i] = MIN_K_DIST_SCALE * np.mean(distances) return result, rho
0, module; 1, function_definition; 2, function_name:_visible_units; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, comment:# Sort the units by elevation, then owned (eg refinery) above world (ie 16); 8, comment:# (eg geiser), small above big, and otherwise arbitrary but stable.; 9, for_statement; 10, comment:"""A generator of visible units and their positions as `Point`s, sorted."""; 11, identifier:u; 12, call; 13, block; 14, identifier:sorted; 15, argument_list; 16, expression_statement; 17, attribute; 18, keyword_argument; 19, yield; 20, attribute; 21, identifier:units; 22, identifier:key; 23, lambda; 24, expression_list; 25, attribute; 26, identifier:raw_data; 27, lambda_parameters; 28, tuple; 29, identifier:u; 30, call; 31, attribute; 32, identifier:observation; 33, identifier:u; 34, attribute; 35, comparison_operator:u.owner != 16; 36, unary_operator; 37, attribute; 38, attribute; 39, argument_list; 40, identifier:self; 41, identifier:_obs; 42, attribute; 43, identifier:z; 44, attribute; 45, integer:16; 46, attribute; 47, identifier:u; 48, identifier:tag; 49, attribute; 50, identifier:build; 51, attribute; 52, identifier:u; 53, identifier:pos; 54, identifier:u; 55, identifier:owner; 56, identifier:u; 57, identifier:radius; 58, identifier:point; 59, identifier:Point; 60, identifier:u; 61, identifier:pos
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 9, 11; 9, 12; 9, 13; 12, 14; 12, 15; 13, 16; 15, 17; 15, 18; 16, 19; 17, 20; 17, 21; 18, 22; 18, 23; 19, 24; 20, 25; 20, 26; 23, 27; 23, 28; 24, 29; 24, 30; 25, 31; 25, 32; 27, 33; 28, 34; 28, 35; 28, 36; 28, 37; 30, 38; 30, 39; 31, 40; 31, 41; 34, 42; 34, 43; 35, 44; 35, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 39, 51; 42, 52; 42, 53; 44, 54; 44, 55; 46, 56; 46, 57; 49, 58; 49, 59; 51, 60; 51, 61
def _visible_units(self): """A generator of visible units and their positions as `Point`s, sorted.""" # Sort the units by elevation, then owned (eg refinery) above world (ie 16) # (eg geiser), small above big, and otherwise arbitrary but stable. for u in sorted(self._obs.observation.raw_data.units, key=lambda u: (u.pos.z, u.owner != 16, -u.radius, u.tag)): yield u, point.Point.build(u.pos)
0, module; 1, function_definition; 2, function_name:interp; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, dictionary_splat_pattern; 11, expression_statement; 12, if_statement; 13, expression_statement; 14, return_statement; 15, identifier:coords; 16, None; 17, identifier:method; 18, string; 19, identifier:assume_sorted; 20, False; 21, identifier:kwargs; 22, dictionary; 23, identifier:coords_kwargs; 24, comment:""" Multidimensional interpolation of variables. coords : dict, optional Mapping from dimension names to the new coordinates. new coordinate can be an scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. method: {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. assume_sorted: boolean, optional If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. kwargs: dictionary Additional keyword passed to scipy's interpolator. **coords_kwarg : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated: xr.DataArray New dataarray on the new coordinates. Notes ----- scipy is required. See Also -------- scipy.interpolate.interp1d scipy.interpolate.interpn Examples -------- >>> da = xr.DataArray([1, 3], [('x', np.arange(2))]) >>> da.interp(x=0.5) <xarray.DataArray ()> array(2.0) Coordinates: x float64 0.5 """; 25, comparison_operator:self.dtype.kind not in 'uifc'; 26, block; 27, assignment; 28, call; 29, string_content:linear; 30, attribute; 31, string; 32, raise_statement; 33, identifier:ds; 34, call; 35, attribute; 36, argument_list; 37, attribute; 38, identifier:kind; 39, string_content:uifc; 40, call; 41, attribute; 42, argument_list; 43, identifier:self; 44, identifier:_from_temp_dataset; 45, identifier:ds; 46, identifier:self; 47, identifier:dtype; 48, identifier:TypeError; 49, argument_list; 50, call; 51, identifier:interp; 52, identifier:coords; 53, keyword_argument; 54, keyword_argument; 55, keyword_argument; 56, dictionary_splat; 57, call; 58, attribute; 59, argument_list; 60, identifier:method; 61, identifier:method; 62, identifier:kwargs; 63, identifier:kwargs; 64, identifier:assume_sorted; 65, identifier:assume_sorted; 66, identifier:coords_kwargs; 67, attribute; 68, argument_list; 69, identifier:self; 70, identifier:_to_temp_dataset; 71, concatenated_string; 72, identifier:format; 73, attribute; 74, string; 75, string; 76, identifier:self; 77, identifier:dtype; 78, string_content:interp only works for a numeric type array.; 79, string_content:Given {}.
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 6, 15; 6, 16; 7, 17; 7, 18; 8, 19; 8, 20; 9, 21; 9, 22; 10, 23; 11, 24; 12, 25; 12, 26; 13, 27; 14, 28; 18, 29; 25, 30; 25, 31; 26, 32; 27, 33; 27, 34; 28, 35; 28, 36; 30, 37; 30, 38; 31, 39; 32, 40; 34, 41; 34, 42; 35, 43; 35, 44; 36, 45; 37, 46; 37, 47; 40, 48; 40, 49; 41, 50; 41, 51; 42, 52; 42, 53; 42, 54; 42, 55; 42, 56; 49, 57; 50, 58; 50, 59; 53, 60; 53, 61; 54, 62; 54, 63; 55, 64; 55, 65; 56, 66; 57, 67; 57, 68; 58, 69; 58, 70; 67, 71; 67, 72; 68, 73; 71, 74; 71, 75; 73, 76; 73, 77; 74, 78; 75, 79
def interp(self, coords=None, method='linear', assume_sorted=False, kwargs={}, **coords_kwargs): """ Multidimensional interpolation of variables. coords : dict, optional Mapping from dimension names to the new coordinates. new coordinate can be an scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. method: {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. assume_sorted: boolean, optional If False, values of x can be in any order and they are sorted first. If True, x has to be an array of monotonically increasing values. kwargs: dictionary Additional keyword passed to scipy's interpolator. **coords_kwarg : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated: xr.DataArray New dataarray on the new coordinates. Notes ----- scipy is required. See Also -------- scipy.interpolate.interp1d scipy.interpolate.interpn Examples -------- >>> da = xr.DataArray([1, 3], [('x', np.arange(2))]) >>> da.interp(x=0.5) <xarray.DataArray ()> array(2.0) Coordinates: x float64 0.5 """ if self.dtype.kind not in 'uifc': raise TypeError('interp only works for a numeric type array. ' 'Given {}.'.format(self.dtype)) ds = self._to_temp_dataset().interp( coords, method=method, kwargs=kwargs, assume_sorted=assume_sorted, **coords_kwargs) return self._from_temp_dataset(ds)
0, module; 1, function_definition; 2, function_name:interp_like; 3, parameters; 4, block; 5, identifier:self; 6, identifier:other; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, if_statement; 12, expression_statement; 13, return_statement; 14, identifier:method; 15, string; 16, identifier:assume_sorted; 17, False; 18, identifier:kwargs; 19, dictionary; 20, comment:"""Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- scipy is required. If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- DataArray.interp DataArray.reindex_like """; 21, comparison_operator:self.dtype.kind not in 'uifc'; 22, block; 23, assignment; 24, call; 25, string_content:linear; 26, attribute; 27, string; 28, raise_statement; 29, identifier:ds; 30, call; 31, attribute; 32, argument_list; 33, attribute; 34, identifier:kind; 35, string_content:uifc; 36, call; 37, attribute; 38, argument_list; 39, identifier:self; 40, identifier:_from_temp_dataset; 41, identifier:ds; 42, identifier:self; 43, identifier:dtype; 44, identifier:TypeError; 45, argument_list; 46, call; 47, identifier:interp_like; 48, identifier:other; 49, keyword_argument; 50, keyword_argument; 51, keyword_argument; 52, call; 53, attribute; 54, argument_list; 55, identifier:method; 56, identifier:method; 57, identifier:kwargs; 58, identifier:kwargs; 59, identifier:assume_sorted; 60, identifier:assume_sorted; 61, attribute; 62, argument_list; 63, identifier:self; 64, identifier:_to_temp_dataset; 65, concatenated_string; 66, identifier:format; 67, attribute; 68, string; 69, string; 70, identifier:self; 71, identifier:dtype; 72, string_content:interp only works for a numeric type array.; 73, string_content:Given {}.
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 9, 19; 10, 20; 11, 21; 11, 22; 12, 23; 13, 24; 15, 25; 21, 26; 21, 27; 22, 28; 23, 29; 23, 30; 24, 31; 24, 32; 26, 33; 26, 34; 27, 35; 28, 36; 30, 37; 30, 38; 31, 39; 31, 40; 32, 41; 33, 42; 33, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 38, 49; 38, 50; 38, 51; 45, 52; 46, 53; 46, 54; 49, 55; 49, 56; 50, 57; 50, 58; 51, 59; 51, 60; 52, 61; 52, 62; 53, 63; 53, 64; 61, 65; 61, 66; 62, 67; 65, 68; 65, 69; 67, 70; 67, 71; 68, 72; 69, 73
def interp_like(self, other, method='linear', assume_sorted=False, kwargs={}): """Interpolate this object onto the coordinates of another object, filling out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.DataArray Another dataarray by interpolating this dataarray's data along the coordinates of the other object. Notes ----- scipy is required. If the dataarray has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- DataArray.interp DataArray.reindex_like """ if self.dtype.kind not in 'uifc': raise TypeError('interp only works for a numeric type array. ' 'Given {}.'.format(self.dtype)) ds = self._to_temp_dataset().interp_like( other, method=method, kwargs=kwargs, assume_sorted=assume_sorted) return self._from_temp_dataset(ds)
0, module; 1, function_definition; 2, function_name:is_uniform_spaced; 3, parameters; 4, type; 5, block; 6, identifier:arr; 7, dictionary_splat_pattern; 8, identifier:bool; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:kwargs; 14, comment:"""Return True if values of an array are uniformly spaced and sorted. >>> is_uniform_spaced(range(5)) True >>> is_uniform_spaced([-4, 0, 100]) False kwargs are additional arguments to ``np.isclose`` """; 15, assignment; 16, assignment; 17, call; 18, identifier:arr; 19, call; 20, identifier:diffs; 21, call; 22, identifier:bool; 23, argument_list; 24, attribute; 25, argument_list; 26, attribute; 27, argument_list; 28, call; 29, identifier:np; 30, identifier:array; 31, identifier:arr; 32, keyword_argument; 33, identifier:np; 34, identifier:diff; 35, identifier:arr; 36, attribute; 37, argument_list; 38, identifier:dtype; 39, identifier:float; 40, identifier:np; 41, identifier:isclose; 42, call; 43, call; 44, dictionary_splat; 45, attribute; 46, argument_list; 47, attribute; 48, argument_list; 49, identifier:kwargs; 50, identifier:diffs; 51, identifier:min; 52, identifier:diffs; 53, identifier:max
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 4, 8; 5, 9; 5, 10; 5, 11; 5, 12; 7, 13; 9, 14; 10, 15; 11, 16; 12, 17; 15, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 19, 24; 19, 25; 21, 26; 21, 27; 23, 28; 24, 29; 24, 30; 25, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 32, 38; 32, 39; 36, 40; 36, 41; 37, 42; 37, 43; 37, 44; 42, 45; 42, 46; 43, 47; 43, 48; 44, 49; 45, 50; 45, 51; 47, 52; 47, 53
def is_uniform_spaced(arr, **kwargs) -> bool: """Return True if values of an array are uniformly spaced and sorted. >>> is_uniform_spaced(range(5)) True >>> is_uniform_spaced([-4, 0, 100]) False kwargs are additional arguments to ``np.isclose`` """ arr = np.array(arr, dtype=float) diffs = np.diff(arr) return bool(np.isclose(diffs.min(), diffs.max(), **kwargs))
0, module; 1, function_definition; 2, function_name:broadcast_variables; 3, parameters; 4, block; 5, list_splat_pattern; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, identifier:variables; 11, comment:"""Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """; 12, assignment; 13, assignment; 14, call; 15, identifier:dims_map; 16, call; 17, identifier:dims_tuple; 18, call; 19, identifier:tuple; 20, generator_expression; 21, identifier:_unified_dims; 22, argument_list; 23, identifier:tuple; 24, argument_list; 25, conditional_expression:var.set_dims(dims_map) if var.dims != dims_tuple else var; 26, for_in_clause; 27, identifier:variables; 28, identifier:dims_map; 29, call; 30, comparison_operator:var.dims != dims_tuple; 31, identifier:var; 32, identifier:var; 33, identifier:variables; 34, attribute; 35, argument_list; 36, attribute; 37, identifier:dims_tuple; 38, identifier:var; 39, identifier:set_dims; 40, identifier:dims_map; 41, identifier:var; 42, identifier:dims
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 5, 10; 6, 11; 7, 12; 8, 13; 9, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 14, 20; 16, 21; 16, 22; 18, 23; 18, 24; 20, 25; 20, 26; 22, 27; 24, 28; 25, 29; 25, 30; 25, 31; 26, 32; 26, 33; 29, 34; 29, 35; 30, 36; 30, 37; 34, 38; 34, 39; 35, 40; 36, 41; 36, 42
def broadcast_variables(*variables): """Given any number of variables, return variables with matching dimensions and broadcast data. The data on the returned variables will be a view of the data on the corresponding original arrays, but dimensions will be reordered and inserted so that both broadcast arrays have the same dimensions. The new dimensions are sorted in order of appearance in the first variable's dimensions followed by the second variable's dimensions. """ dims_map = _unified_dims(variables) dims_tuple = tuple(dims_map) return tuple(var.set_dims(dims_map) if var.dims != dims_tuple else var for var in variables)
0, module; 1, function_definition; 2, function_name:interp; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, dictionary_splat_pattern; 11, expression_statement; 12, import_from_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, function_definition; 17, function_definition; 18, expression_statement; 19, for_statement; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, comment:# attach indexer as coordinate; 24, expression_statement; 25, expression_statement; 26, comment:# Extract coordinates from indexers; 27, expression_statement; 28, expression_statement; 29, expression_statement; 30, expression_statement; 31, return_statement; 32, identifier:coords; 33, None; 34, identifier:method; 35, string; 36, identifier:assume_sorted; 37, False; 38, identifier:kwargs; 39, dictionary; 40, identifier:coords_kwargs; 41, comment:""" Multidimensional interpolation of Dataset. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. **coords_kwarg : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated: xr.Dataset New dataset on the new coordinates. Notes ----- scipy is required. See Also -------- scipy.interpolate.interp1d scipy.interpolate.interpn """; 42, relative_import; 43, dotted_name; 44, assignment; 45, assignment; 46, assignment; 47, function_name:maybe_variable; 48, parameters; 49, comment:# workaround to get variable for dimension without coordinate.; 50, block; 51, function_name:_validate_interp_indexer; 52, parameters; 53, comment:# In the case of datetimes, the restrictions placed on indexers; 54, comment:# used with interp are stronger than those which are placed on; 55, comment:# isel, so we need an additional check after _validate_indexers.; 56, block; 57, assignment; 58, pattern_list; 59, call; 60, block; 61, assignment; 62, assignment; 63, assignment; 64, call; 65, call; 66, assignment; 67, call; 68, call; 69, assignment; 70, call; 71, string_content:linear; 72, import_prefix; 73, identifier:missing; 74, identifier:coords; 75, call; 76, identifier:indexers; 77, call; 78, identifier:obj; 79, conditional_expression:self if assume_sorted else self.sortby([k for k in coords]); 80, identifier:obj; 81, identifier:k; 82, try_statement; 83, identifier:x; 84, identifier:new_x; 85, if_statement; 86, identifier:variables; 87, call; 88, identifier:name; 89, identifier:var; 90, attribute; 91, argument_list; 92, if_statement; 93, identifier:coord_names; 94, call; 95, identifier:indexes; 96, call; 97, identifier:selected; 98, call; 99, attribute; 100, argument_list; 101, attribute; 102, generator_expression; 103, pattern_list; 104, parenthesized_expression; 105, attribute; 106, argument_list; 107, attribute; 108, argument_list; 109, identifier:coord_names; 110, parenthesized_expression; 111, attribute; 112, argument_list; 113, identifier:either_dict_or_kwargs; 114, argument_list; 115, identifier:OrderedDict; 116, argument_list; 117, identifier:self; 118, identifier:assume_sorted; 119, call; 120, block; 121, except_clause; 122, parenthesized_expression; 123, block; 124, else_clause; 125, identifier:OrderedDict; 126, argument_list; 127, attribute; 128, identifier:items; 129, comparison_operator:name not in indexers; 130, block; 131, attribute; 132, argument_list; 133, identifier:OrderedDict; 134, generator_expression; 135, attribute; 136, argument_list; 137, identifier:variables; 138, identifier:update; 139, identifier:indexers; 140, identifier:indexes; 141, identifier:update; 142, tuple; 143, for_in_clause; 144, if_clause; 145, identifier:coord_vars; 146, identifier:new_indexes; 147, call; 148, identifier:variables; 149, identifier:update; 150, identifier:coord_vars; 151, identifier:indexes; 152, identifier:update; 153, identifier:new_indexes; 154, call; 155, identifier:self; 156, identifier:_replace_with_new_dims; 157, identifier:variables; 158, identifier:coord_names; 159, keyword_argument; 160, identifier:coords; 161, identifier:coords_kwargs; 162, string; 163, call; 164, attribute; 165, argument_list; 166, return_statement; 167, identifier:KeyError; 168, block; 169, boolean_operator; 170, raise_statement; 171, block; 172, identifier:obj; 173, identifier:_variables; 174, identifier:name; 175, identifier:indexers; 176, if_statement; 177, call; 178, identifier:intersection; 179, attribute; 180, tuple; 181, for_in_clause; 182, if_clause; 183, identifier:self; 184, identifier:_replace_with_new_dims; 185, call; 186, identifier:coord_names; 187, keyword_argument; 188, identifier:k; 189, call; 190, pattern_list; 191, call; 192, comparison_operator:v.dims == (k,); 193, attribute; 194, argument_list; 195, attribute; 196, argument_list; 197, identifier:indexes; 198, identifier:indexes; 199, string_content:interp; 200, attribute; 201, argument_list; 202, identifier:self; 203, identifier:sortby; 204, list_comprehension; 205, subscript; 206, return_statement; 207, call; 208, not_operator; 209, call; 210, return_statement; 211, comparison_operator:var.dtype.kind in 'uifc'; 212, block; 213, elif_clause; 214, identifier:set; 215, argument_list; 216, identifier:obj; 217, identifier:_coord_names; 218, identifier:k; 219, identifier:v; 220, pattern_list; 221, call; 222, comparison_operator:k not in indexers; 223, attribute; 224, argument_list; 225, identifier:indexes; 226, identifier:indexes; 227, attribute; 228, argument_list; 229, identifier:k; 230, identifier:v; 231, attribute; 232, argument_list; 233, attribute; 234, tuple; 235, identifier:selected; 236, identifier:_get_indexers_coords_and_indexes; 237, identifier:coords; 238, call; 239, identifier:union; 240, identifier:coord_vars; 241, identifier:self; 242, identifier:_validate_indexers; 243, identifier:coords; 244, identifier:k; 245, for_in_clause; 246, attribute; 247, identifier:k; 248, call; 249, identifier:_contains_datetime_like_objects; 250, argument_list; 251, call; 252, identifier:TypeError; 253, argument_list; 254, tuple; 255, attribute; 256, string; 257, expression_statement; 258, expression_statement; 259, call; 260, comment:# keep unrelated object array; 261, block; 262, identifier:variables; 263, identifier:k; 264, identifier:v; 265, attribute; 266, argument_list; 267, identifier:k; 268, identifier:indexers; 269, identifier:variables; 270, identifier:copy; 271, identifier:v; 272, identifier:to_index; 273, identifier:indexers; 274, identifier:items; 275, identifier:v; 276, identifier:dims; 277, identifier:k; 278, attribute; 279, argument_list; 280, identifier:k; 281, identifier:coords; 282, identifier:obj; 283, identifier:_variables; 284, identifier:as_variable; 285, argument_list; 286, identifier:x; 287, identifier:_contains_datetime_like_objects; 288, argument_list; 289, call; 290, identifier:x; 291, identifier:new_x; 292, attribute; 293, identifier:kind; 294, string_content:uifc; 295, assignment; 296, assignment; 297, identifier:all; 298, generator_expression; 299, expression_statement; 300, attribute; 301, identifier:items; 302, call; 303, identifier:intersection; 304, attribute; 305, tuple; 306, identifier:new_x; 307, attribute; 308, argument_list; 309, identifier:var; 310, identifier:dtype; 311, identifier:var_indexers; 312, dictionary_comprehension; 313, subscript; 314, call; 315, comparison_operator:d not in indexers; 316, for_in_clause; 317, assignment; 318, identifier:obj; 319, identifier:indexes; 320, identifier:set; 321, argument_list; 322, identifier:obj; 323, identifier:_coord_names; 324, identifier:k; 325, call; 326, concatenated_string; 327, identifier:format; 328, identifier:new_x; 329, pair; 330, for_in_clause; 331, if_clause; 332, identifier:variables; 333, identifier:name; 334, attribute; 335, argument_list; 336, identifier:d; 337, identifier:indexers; 338, identifier:d; 339, attribute; 340, subscript; 341, identifier:var; 342, identifier:variables; 343, identifier:range; 344, argument_list; 345, string; 346, string; 347, string; 348, string; 349, string; 350, identifier:k; 351, call; 352, pattern_list; 353, call; 354, comparison_operator:k in var.dims; 355, identifier:missing; 356, identifier:interp; 357, identifier:var; 358, identifier:var_indexers; 359, identifier:method; 360, dictionary_splat; 361, identifier:var; 362, identifier:dims; 363, identifier:variables; 364, identifier:name; 365, subscript; 366, string_content:When interpolating over a datetime-like; 367, string_content:coordinate, the coordinates to; 368, string_content:interpolate to must be either datetime; 369, string_content:strings or datetimes.; 370, string_content; 371, identifier:_validate_interp_indexer; 372, argument_list; 373, identifier:k; 374, identifier:v; 375, attribute; 376, argument_list; 377, identifier:k; 378, attribute; 379, identifier:kwargs; 380, attribute; 381, identifier:k; 382, escape_sequence:\n; 383, call; 384, identifier:v; 385, identifier:indexers; 386, identifier:items; 387, identifier:var; 388, identifier:dims; 389, identifier:obj; 390, identifier:dims; 391, identifier:maybe_variable; 392, argument_list; 393, identifier:obj; 394, identifier:k
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 6, 32; 6, 33; 7, 34; 7, 35; 8, 36; 8, 37; 9, 38; 9, 39; 10, 40; 11, 41; 12, 42; 12, 43; 13, 44; 14, 45; 15, 46; 16, 47; 16, 48; 16, 49; 16, 50; 17, 51; 17, 52; 17, 53; 17, 54; 17, 55; 17, 56; 18, 57; 19, 58; 19, 59; 19, 60; 20, 61; 21, 62; 22, 63; 24, 64; 25, 65; 27, 66; 28, 67; 29, 68; 30, 69; 31, 70; 35, 71; 42, 72; 43, 73; 44, 74; 44, 75; 45, 76; 45, 77; 46, 78; 46, 79; 48, 80; 48, 81; 50, 82; 52, 83; 52, 84; 56, 85; 57, 86; 57, 87; 58, 88; 58, 89; 59, 90; 59, 91; 60, 92; 61, 93; 61, 94; 62, 95; 62, 96; 63, 97; 63, 98; 64, 99; 64, 100; 65, 101; 65, 102; 66, 103; 66, 104; 67, 105; 67, 106; 68, 107; 68, 108; 69, 109; 69, 110; 70, 111; 70, 112; 75, 113; 75, 114; 77, 115; 77, 116; 79, 117; 79, 118; 79, 119; 82, 120; 82, 121; 85, 122; 85, 123; 85, 124; 87, 125; 87, 126; 90, 127; 90, 128; 92, 129; 92, 130; 94, 131; 94, 132; 96, 133; 96, 134; 98, 135; 98, 136; 99, 137; 99, 138; 100, 139; 101, 140; 101, 141; 102, 142; 102, 143; 102, 144; 103, 145; 103, 146; 104, 147; 105, 148; 105, 149; 106, 150; 107, 151; 107, 152; 108, 153; 110, 154; 111, 155; 111, 156; 112, 157; 112, 158; 112, 159; 114, 160; 114, 161; 114, 162; 116, 163; 119, 164; 119, 165; 120, 166; 121, 167; 121, 168; 122, 169; 123, 170; 124, 171; 127, 172; 127, 173; 129, 174; 129, 175; 130, 176; 131, 177; 131, 178; 132, 179; 134, 180; 134, 181; 134, 182; 135, 183; 135, 184; 136, 185; 136, 186; 136, 187; 142, 188; 142, 189; 143, 190; 143, 191; 144, 192; 147, 193; 147, 194; 154, 195; 154, 196; 159, 197; 159, 198; 162, 199; 163, 200; 163, 201; 164, 202; 164, 203; 165, 204; 166, 205; 168, 206; 169, 207; 169, 208; 170, 209; 171, 210; 176, 211; 176, 212; 176, 213; 177, 214; 177, 215; 179, 216; 179, 217; 180, 218; 180, 219; 181, 220; 181, 221; 182, 222; 185, 223; 185, 224; 187, 225; 187, 226; 189, 227; 189, 228; 190, 229; 190, 230; 191, 231; 191, 232; 192, 233; 192, 234; 193, 235; 193, 236; 194, 237; 195, 238; 195, 239; 196, 240; 200, 241; 200, 242; 201, 243; 204, 244; 204, 245; 205, 246; 205, 247; 206, 248; 207, 249; 207, 250; 208, 251; 209, 252; 209, 253; 210, 254; 211, 255; 211, 256; 212, 257; 212, 258; 213, 259; 213, 260; 213, 261; 215, 262; 220, 263; 220, 264; 221, 265; 221, 266; 222, 267; 222, 268; 223, 269; 223, 270; 227, 271; 227, 272; 231, 273; 231, 274; 233, 275; 233, 276; 234, 277; 238, 278; 238, 279; 245, 280; 245, 281; 246, 282; 246, 283; 248, 284; 248, 285; 250, 286; 251, 287; 251, 288; 253, 289; 254, 290; 254, 291; 255, 292; 255, 293; 256, 294; 257, 295; 258, 296; 259, 297; 259, 298; 261, 299; 265, 300; 265, 301; 278, 302; 278, 303; 279, 304; 285, 305; 288, 306; 289, 307; 289, 308; 292, 309; 292, 310; 295, 311; 295, 312; 296, 313; 296, 314; 298, 315; 298, 316; 299, 317; 300, 318; 300, 319; 302, 320; 302, 321; 304, 322; 304, 323; 305, 324; 305, 325; 307, 326; 307, 327; 308, 328; 312, 329; 312, 330; 312, 331; 313, 332; 313, 333; 314, 334; 314, 335; 315, 336; 315, 337; 316, 338; 316, 339; 317, 340; 317, 341; 321, 342; 325, 343; 325, 344; 326, 345; 326, 346; 326, 347; 326, 348; 326, 349; 329, 350; 329, 351; 330, 352; 330, 353; 331, 354; 334, 355; 334, 356; 335, 357; 335, 358; 335, 359; 335, 360; 339, 361; 339, 362; 340, 363; 340, 364; 344, 365; 345, 366; 346, 367; 347, 368; 348, 369; 349, 370; 351, 371; 351, 372; 352, 373; 352, 374; 353, 375; 353, 376; 354, 377; 354, 378; 360, 379; 365, 380; 365, 381; 370, 382; 372, 383; 372, 384; 375, 385; 375, 386; 378, 387; 378, 388; 380, 389; 380, 390; 383, 391; 383, 392; 392, 393; 392, 394
def interp(self, coords=None, method='linear', assume_sorted=False, kwargs={}, **coords_kwargs): """ Multidimensional interpolation of Dataset. Parameters ---------- coords : dict, optional Mapping from dimension names to the new coordinates. New coordinate can be a scalar, array-like or DataArray. If DataArrays are passed as new coordates, their dimensions are used for the broadcasting. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. **coords_kwarg : {dim: coordinate, ...}, optional The keyword arguments form of ``coords``. One of coords or coords_kwargs must be provided. Returns ------- interpolated: xr.Dataset New dataset on the new coordinates. Notes ----- scipy is required. See Also -------- scipy.interpolate.interp1d scipy.interpolate.interpn """ from . import missing coords = either_dict_or_kwargs(coords, coords_kwargs, 'interp') indexers = OrderedDict(self._validate_indexers(coords)) obj = self if assume_sorted else self.sortby([k for k in coords]) def maybe_variable(obj, k): # workaround to get variable for dimension without coordinate. try: return obj._variables[k] except KeyError: return as_variable((k, range(obj.dims[k]))) def _validate_interp_indexer(x, new_x): # In the case of datetimes, the restrictions placed on indexers # used with interp are stronger than those which are placed on # isel, so we need an additional check after _validate_indexers. if (_contains_datetime_like_objects(x) and not _contains_datetime_like_objects(new_x)): raise TypeError('When interpolating over a datetime-like ' 'coordinate, the coordinates to ' 'interpolate to must be either datetime ' 'strings or datetimes. ' 'Instead got\n{}'.format(new_x)) else: return (x, new_x) variables = OrderedDict() for name, var in obj._variables.items(): if name not in indexers: if var.dtype.kind in 'uifc': var_indexers = { k: _validate_interp_indexer(maybe_variable(obj, k), v) for k, v in indexers.items() if k in var.dims } variables[name] = missing.interp( var, var_indexers, method, **kwargs) elif all(d not in indexers for d in var.dims): # keep unrelated object array variables[name] = var coord_names = set(variables).intersection(obj._coord_names) indexes = OrderedDict( (k, v) for k, v in obj.indexes.items() if k not in indexers) selected = self._replace_with_new_dims( variables.copy(), coord_names, indexes=indexes) # attach indexer as coordinate variables.update(indexers) indexes.update( (k, v.to_index()) for k, v in indexers.items() if v.dims == (k,) ) # Extract coordinates from indexers coord_vars, new_indexes = ( selected._get_indexers_coords_and_indexes(coords)) variables.update(coord_vars) indexes.update(new_indexes) coord_names = (set(variables) .intersection(obj._coord_names) .union(coord_vars)) return self._replace_with_new_dims( variables, coord_names, indexes=indexes)
0, module; 1, function_definition; 2, function_name:interp_like; 3, parameters; 4, block; 5, identifier:self; 6, identifier:other; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, for_statement; 15, expression_statement; 16, if_statement; 17, return_statement; 18, identifier:method; 19, string; 20, identifier:assume_sorted; 21, False; 22, identifier:kwargs; 23, dictionary; 24, comment:"""Interpolate this object onto the coordinates of another object, filling the out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- scipy is required. If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- Dataset.interp Dataset.reindex_like """; 25, assignment; 26, assignment; 27, assignment; 28, pattern_list; 29, call; 30, block; 31, assignment; 32, identifier:object_coords; 33, comment:# We do not support interpolation along object coordinate.; 34, comment:# reindex instead.; 35, block; 36, call; 37, string_content:linear; 38, identifier:coords; 39, call; 40, identifier:numeric_coords; 41, call; 42, identifier:object_coords; 43, call; 44, identifier:k; 45, identifier:v; 46, attribute; 47, argument_list; 48, if_statement; 49, identifier:ds; 50, identifier:self; 51, expression_statement; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, identifier:OrderedDict; 57, argument_list; 58, identifier:OrderedDict; 59, argument_list; 60, identifier:coords; 61, identifier:items; 62, comparison_operator:v.dtype.kind in 'uifcMm'; 63, block; 64, else_clause; 65, assignment; 66, identifier:ds; 67, identifier:interp; 68, identifier:numeric_coords; 69, identifier:method; 70, identifier:assume_sorted; 71, identifier:kwargs; 72, identifier:alignment; 73, identifier:reindex_like_indexers; 74, identifier:self; 75, identifier:other; 76, attribute; 77, string; 78, expression_statement; 79, block; 80, identifier:ds; 81, call; 82, attribute; 83, identifier:kind; 84, string_content:uifcMm; 85, assignment; 86, expression_statement; 87, attribute; 88, argument_list; 89, identifier:v; 90, identifier:dtype; 91, subscript; 92, identifier:v; 93, assignment; 94, identifier:self; 95, identifier:reindex; 96, identifier:object_coords; 97, identifier:numeric_coords; 98, identifier:k; 99, subscript; 100, identifier:v; 101, identifier:object_coords; 102, identifier:k
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 7, 18; 7, 19; 8, 20; 8, 21; 9, 22; 9, 23; 10, 24; 11, 25; 12, 26; 13, 27; 14, 28; 14, 29; 14, 30; 15, 31; 16, 32; 16, 33; 16, 34; 16, 35; 17, 36; 19, 37; 25, 38; 25, 39; 26, 40; 26, 41; 27, 42; 27, 43; 28, 44; 28, 45; 29, 46; 29, 47; 30, 48; 31, 49; 31, 50; 35, 51; 36, 52; 36, 53; 39, 54; 39, 55; 41, 56; 41, 57; 43, 58; 43, 59; 46, 60; 46, 61; 48, 62; 48, 63; 48, 64; 51, 65; 52, 66; 52, 67; 53, 68; 53, 69; 53, 70; 53, 71; 54, 72; 54, 73; 55, 74; 55, 75; 62, 76; 62, 77; 63, 78; 64, 79; 65, 80; 65, 81; 76, 82; 76, 83; 77, 84; 78, 85; 79, 86; 81, 87; 81, 88; 82, 89; 82, 90; 85, 91; 85, 92; 86, 93; 87, 94; 87, 95; 88, 96; 91, 97; 91, 98; 93, 99; 93, 100; 99, 101; 99, 102
def interp_like(self, other, method='linear', assume_sorted=False, kwargs={}): """Interpolate this object onto the coordinates of another object, filling the out of range values with NaN. Parameters ---------- other : Dataset or DataArray Object with an 'indexes' attribute giving a mapping from dimension names to an 1d array-like, which provides coordinates upon which to index the variables in this dataset. method: string, optional. {'linear', 'nearest'} for multidimensional array, {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'} for 1-dimensional array. 'linear' is used by default. assume_sorted: boolean, optional If False, values of coordinates that are interpolated over can be in any order and they are sorted first. If True, interpolated coordinates are assumed to be an array of monotonically increasing values. kwargs: dictionary, optional Additional keyword passed to scipy's interpolator. Returns ------- interpolated: xr.Dataset Another dataset by interpolating this dataset's data along the coordinates of the other object. Notes ----- scipy is required. If the dataset has object-type coordinates, reindex is used for these coordinates instead of the interpolation. See Also -------- Dataset.interp Dataset.reindex_like """ coords = alignment.reindex_like_indexers(self, other) numeric_coords = OrderedDict() object_coords = OrderedDict() for k, v in coords.items(): if v.dtype.kind in 'uifcMm': numeric_coords[k] = v else: object_coords[k] = v ds = self if object_coords: # We do not support interpolation along object coordinate. # reindex instead. ds = self.reindex(object_coords) return ds.interp(numeric_coords, method, assume_sorted, kwargs)
0, module; 1, function_definition; 2, function_name:transpose; 3, parameters; 4, block; 5, identifier:self; 6, list_splat_pattern; 7, expression_statement; 8, if_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, identifier:dims; 13, comment:"""Return a new Dataset object with all array dimensions transposed. Although the order of dimensions on each array will change, the dataset dimensions themselves will remain in fixed (sorted) order. Parameters ---------- *dims : str, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Dataset Each array in the dataset (including) coordinates will be transposed to the given order. Notes ----- This operation returns a view of each array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded into memory. See Also -------- numpy.transpose DataArray.transpose """; 14, identifier:dims; 15, block; 16, assignment; 17, pattern_list; 18, call; 19, block; 20, identifier:ds; 21, if_statement; 22, identifier:ds; 23, call; 24, identifier:name; 25, identifier:var; 26, attribute; 27, argument_list; 28, expression_statement; 29, expression_statement; 30, binary_operator:set(dims) ^ set(self.dims); 31, block; 32, attribute; 33, argument_list; 34, attribute; 35, identifier:items; 36, assignment; 37, assignment; 38, call; 39, call; 40, raise_statement; 41, identifier:self; 42, identifier:copy; 43, identifier:self; 44, identifier:_variables; 45, identifier:var_dims; 46, call; 47, subscript; 48, call; 49, identifier:set; 50, argument_list; 51, identifier:set; 52, argument_list; 53, call; 54, identifier:tuple; 55, generator_expression; 56, attribute; 57, identifier:name; 58, attribute; 59, argument_list; 60, identifier:dims; 61, attribute; 62, identifier:ValueError; 63, argument_list; 64, identifier:dim; 65, for_in_clause; 66, if_clause; 67, identifier:ds; 68, identifier:_variables; 69, identifier:var; 70, identifier:transpose; 71, list_splat; 72, identifier:self; 73, identifier:dims; 74, binary_operator:'arguments to transpose (%s) must be ' 'permuted dataset dimensions (%s)' % (dims, tuple(self.dims)); 75, identifier:dim; 76, identifier:dims; 77, comparison_operator:dim in var.dims; 78, identifier:var_dims; 79, concatenated_string; 80, tuple; 81, identifier:dim; 82, attribute; 83, string; 84, string; 85, identifier:dims; 86, call; 87, identifier:var; 88, identifier:dims; 89, string_content:arguments to transpose (%s) must be; 90, string_content:permuted dataset dimensions (%s); 91, identifier:tuple; 92, argument_list; 93, attribute; 94, identifier:self; 95, identifier:dims
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 8, 14; 8, 15; 9, 16; 10, 17; 10, 18; 10, 19; 11, 20; 15, 21; 16, 22; 16, 23; 17, 24; 17, 25; 18, 26; 18, 27; 19, 28; 19, 29; 21, 30; 21, 31; 23, 32; 23, 33; 26, 34; 26, 35; 28, 36; 29, 37; 30, 38; 30, 39; 31, 40; 32, 41; 32, 42; 34, 43; 34, 44; 36, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 39, 51; 39, 52; 40, 53; 46, 54; 46, 55; 47, 56; 47, 57; 48, 58; 48, 59; 50, 60; 52, 61; 53, 62; 53, 63; 55, 64; 55, 65; 55, 66; 56, 67; 56, 68; 58, 69; 58, 70; 59, 71; 61, 72; 61, 73; 63, 74; 65, 75; 65, 76; 66, 77; 71, 78; 74, 79; 74, 80; 77, 81; 77, 82; 79, 83; 79, 84; 80, 85; 80, 86; 82, 87; 82, 88; 83, 89; 84, 90; 86, 91; 86, 92; 92, 93; 93, 94; 93, 95
def transpose(self, *dims): """Return a new Dataset object with all array dimensions transposed. Although the order of dimensions on each array will change, the dataset dimensions themselves will remain in fixed (sorted) order. Parameters ---------- *dims : str, optional By default, reverse the dimensions on each array. Otherwise, reorder the dimensions to this order. Returns ------- transposed : Dataset Each array in the dataset (including) coordinates will be transposed to the given order. Notes ----- This operation returns a view of each array's data. It is lazy for dask-backed DataArrays but not for numpy-backed DataArrays -- the data will be fully loaded into memory. See Also -------- numpy.transpose DataArray.transpose """ if dims: if set(dims) ^ set(self.dims): raise ValueError('arguments to transpose (%s) must be ' 'permuted dataset dimensions (%s)' % (dims, tuple(self.dims))) ds = self.copy() for name, var in self._variables.items(): var_dims = tuple(dim for dim in dims if dim in var.dims) ds._variables[name] = var.transpose(*var_dims) return ds
0, module; 1, function_definition; 2, function_name:to_dask_dataframe; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, import_statement; 10, import_statement; 11, if_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, for_statement; 18, expression_statement; 19, if_statement; 20, return_statement; 21, identifier:dim_order; 22, None; 23, identifier:set_index; 24, False; 25, comment:""" Convert this dataset into a dask.dataframe.DataFrame. The dimensions, coordinates and data variables in this dataset form the columns of the DataFrame. Parameters ---------- dim_order : list, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. If provided, must include all dimensions on this dataset. By default, dimensions are sorted alphabetically. set_index : bool, optional If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames to not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame """; 26, aliased_import; 27, aliased_import; 28, comparison_operator:dim_order is None; 29, block; 30, elif_clause; 31, assignment; 32, assignment; 33, call; 34, call; 35, assignment; 36, identifier:name; 37, identifier:columns; 38, block; 39, assignment; 40, identifier:set_index; 41, block; 42, identifier:df; 43, dotted_name; 44, identifier:da; 45, dotted_name; 46, identifier:dd; 47, identifier:dim_order; 48, None; 49, expression_statement; 50, comparison_operator:set(dim_order) != set(self.dims); 51, block; 52, identifier:ordered_dims; 53, call; 54, identifier:columns; 55, call; 56, attribute; 57, generator_expression; 58, attribute; 59, argument_list; 60, identifier:series_list; 61, list; 62, try_statement; 63, comment:# IndexVariable objects have a dummy .chunk() method; 64, if_statement; 65, expression_statement; 66, expression_statement; 67, expression_statement; 68, identifier:df; 69, call; 70, if_statement; 71, identifier:dask; 72, identifier:array; 73, identifier:dask; 74, identifier:dataframe; 75, assignment; 76, call; 77, call; 78, raise_statement; 79, identifier:OrderedDict; 80, generator_expression; 81, identifier:list; 82, argument_list; 83, identifier:columns; 84, identifier:extend; 85, identifier:k; 86, for_in_clause; 87, if_clause; 88, identifier:columns; 89, identifier:extend; 90, attribute; 91, block; 92, except_clause; 93, call; 94, block; 95, assignment; 96, assignment; 97, call; 98, attribute; 99, argument_list; 100, comparison_operator:len(dim_order) == 1; 101, block; 102, else_clause; 103, identifier:dim_order; 104, call; 105, identifier:set; 106, argument_list; 107, identifier:set; 108, argument_list; 109, call; 110, tuple; 111, for_in_clause; 112, identifier:ordered_dims; 113, identifier:k; 114, attribute; 115, comparison_operator:k not in self.dims; 116, identifier:self; 117, identifier:data_vars; 118, expression_statement; 119, identifier:KeyError; 120, comment:# dimension without a matching coordinate; 121, block; 122, identifier:isinstance; 123, argument_list; 124, expression_statement; 125, identifier:dask_array; 126, attribute; 127, identifier:series; 128, call; 129, attribute; 130, argument_list; 131, identifier:dd; 132, identifier:concat; 133, identifier:series_list; 134, keyword_argument; 135, call; 136, integer:1; 137, expression_statement; 138, expression_statement; 139, comment:# triggers an error about multi-indexes, even if only one; 140, comment:# dimension is passed; 141, block; 142, identifier:list; 143, argument_list; 144, identifier:dim_order; 145, attribute; 146, identifier:ValueError; 147, argument_list; 148, identifier:k; 149, subscript; 150, identifier:k; 151, identifier:dim_order; 152, identifier:self; 153, identifier:coords; 154, identifier:k; 155, attribute; 156, assignment; 157, expression_statement; 158, expression_statement; 159, expression_statement; 160, identifier:var; 161, identifier:IndexVariable; 162, assignment; 163, call; 164, identifier:data; 165, attribute; 166, argument_list; 167, identifier:series_list; 168, identifier:append; 169, identifier:series; 170, identifier:axis; 171, integer:1; 172, identifier:len; 173, argument_list; 174, assignment; 175, assignment; 176, expression_statement; 177, attribute; 178, identifier:self; 179, identifier:dims; 180, call; 181, attribute; 182, identifier:k; 183, identifier:self; 184, identifier:dims; 185, identifier:var; 186, subscript; 187, assignment; 188, assignment; 189, assignment; 190, identifier:var; 191, call; 192, attribute; 193, argument_list; 194, identifier:dd; 195, identifier:from_array; 196, call; 197, keyword_argument; 198, identifier:dim_order; 199, tuple_pattern; 200, identifier:dim_order; 201, identifier:df; 202, call; 203, assignment; 204, identifier:self; 205, identifier:dims; 206, attribute; 207, argument_list; 208, identifier:self; 209, identifier:dims; 210, attribute; 211, identifier:name; 212, identifier:size; 213, subscript; 214, identifier:data; 215, call; 216, identifier:var; 217, call; 218, attribute; 219, argument_list; 220, call; 221, identifier:chunk; 222, attribute; 223, attribute; 224, argument_list; 225, identifier:columns; 226, list; 227, identifier:dim; 228, attribute; 229, argument_list; 230, identifier:df; 231, call; 232, concatenated_string; 233, identifier:format; 234, identifier:dim_order; 235, call; 236, identifier:self; 237, identifier:variables; 238, attribute; 239, identifier:name; 240, attribute; 241, argument_list; 242, identifier:Variable; 243, argument_list; 244, identifier:var; 245, identifier:to_base_variable; 246, attribute; 247, argument_list; 248, identifier:self; 249, identifier:chunks; 250, identifier:dask_array; 251, identifier:reshape; 252, unary_operator; 253, identifier:name; 254, identifier:df; 255, identifier:set_index; 256, identifier:dim; 257, attribute; 258, argument_list; 259, string; 260, string; 261, identifier:list; 262, argument_list; 263, identifier:self; 264, identifier:dims; 265, identifier:da; 266, identifier:arange; 267, identifier:size; 268, keyword_argument; 269, keyword_argument; 270, tuple; 271, identifier:data; 272, identifier:var; 273, identifier:set_dims; 274, identifier:ordered_dims; 275, integer:1; 276, identifier:df; 277, identifier:set_index; 278, identifier:dim_order; 279, string_content:dim_order {} does not match the set of dimensions on this; 280, string_content:Dataset: {}; 281, attribute; 282, identifier:chunks; 283, identifier:size; 284, identifier:dtype; 285, attribute; 286, identifier:name; 287, identifier:self; 288, identifier:dims; 289, identifier:np; 290, identifier:int64
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 6, 21; 6, 22; 7, 23; 7, 24; 8, 25; 9, 26; 10, 27; 11, 28; 11, 29; 11, 30; 12, 31; 13, 32; 14, 33; 15, 34; 16, 35; 17, 36; 17, 37; 17, 38; 18, 39; 19, 40; 19, 41; 20, 42; 26, 43; 26, 44; 27, 45; 27, 46; 28, 47; 28, 48; 29, 49; 30, 50; 30, 51; 31, 52; 31, 53; 32, 54; 32, 55; 33, 56; 33, 57; 34, 58; 34, 59; 35, 60; 35, 61; 38, 62; 38, 63; 38, 64; 38, 65; 38, 66; 38, 67; 39, 68; 39, 69; 41, 70; 43, 71; 43, 72; 45, 73; 45, 74; 49, 75; 50, 76; 50, 77; 51, 78; 53, 79; 53, 80; 55, 81; 55, 82; 56, 83; 56, 84; 57, 85; 57, 86; 57, 87; 58, 88; 58, 89; 59, 90; 62, 91; 62, 92; 64, 93; 64, 94; 65, 95; 66, 96; 67, 97; 69, 98; 69, 99; 70, 100; 70, 101; 70, 102; 75, 103; 75, 104; 76, 105; 76, 106; 77, 107; 77, 108; 78, 109; 80, 110; 80, 111; 82, 112; 86, 113; 86, 114; 87, 115; 90, 116; 90, 117; 91, 118; 92, 119; 92, 120; 92, 121; 93, 122; 93, 123; 94, 124; 95, 125; 95, 126; 96, 127; 96, 128; 97, 129; 97, 130; 98, 131; 98, 132; 99, 133; 99, 134; 100, 135; 100, 136; 101, 137; 101, 138; 102, 139; 102, 140; 102, 141; 104, 142; 104, 143; 106, 144; 108, 145; 109, 146; 109, 147; 110, 148; 110, 149; 111, 150; 111, 151; 114, 152; 114, 153; 115, 154; 115, 155; 118, 156; 121, 157; 121, 158; 121, 159; 123, 160; 123, 161; 124, 162; 126, 163; 126, 164; 128, 165; 128, 166; 129, 167; 129, 168; 130, 169; 134, 170; 134, 171; 135, 172; 135, 173; 137, 174; 138, 175; 141, 176; 143, 177; 145, 178; 145, 179; 147, 180; 149, 181; 149, 182; 155, 183; 155, 184; 156, 185; 156, 186; 157, 187; 158, 188; 159, 189; 162, 190; 162, 191; 163, 192; 163, 193; 165, 194; 165, 195; 166, 196; 166, 197; 173, 198; 174, 199; 174, 200; 175, 201; 175, 202; 176, 203; 177, 204; 177, 205; 180, 206; 180, 207; 181, 208; 181, 209; 186, 210; 186, 211; 187, 212; 187, 213; 188, 214; 188, 215; 189, 216; 189, 217; 191, 218; 191, 219; 192, 220; 192, 221; 193, 222; 196, 223; 196, 224; 197, 225; 197, 226; 199, 227; 202, 228; 202, 229; 203, 230; 203, 231; 206, 232; 206, 233; 207, 234; 207, 235; 210, 236; 210, 237; 213, 238; 213, 239; 215, 240; 215, 241; 217, 242; 217, 243; 218, 244; 218, 245; 220, 246; 220, 247; 222, 248; 222, 249; 223, 250; 223, 251; 224, 252; 226, 253; 228, 254; 228, 255; 229, 256; 231, 257; 231, 258; 232, 259; 232, 260; 235, 261; 235, 262; 238, 263; 238, 264; 240, 265; 240, 266; 241, 267; 241, 268; 241, 269; 243, 270; 243, 271; 246, 272; 246, 273; 247, 274; 252, 275; 257, 276; 257, 277; 258, 278; 259, 279; 260, 280; 262, 281; 268, 282; 268, 283; 269, 284; 269, 285; 270, 286; 281, 287; 281, 288; 285, 289; 285, 290
def to_dask_dataframe(self, dim_order=None, set_index=False): """ Convert this dataset into a dask.dataframe.DataFrame. The dimensions, coordinates and data variables in this dataset form the columns of the DataFrame. Parameters ---------- dim_order : list, optional Hierarchical dimension order for the resulting dataframe. All arrays are transposed to this order and then written out as flat vectors in contiguous order, so the last dimension in this list will be contiguous in the resulting DataFrame. This has a major influence on which operations are efficient on the resulting dask dataframe. If provided, must include all dimensions on this dataset. By default, dimensions are sorted alphabetically. set_index : bool, optional If set_index=True, the dask DataFrame is indexed by this dataset's coordinate. Since dask DataFrames to not support multi-indexes, set_index only works if the dataset only contains one dimension. Returns ------- dask.dataframe.DataFrame """ import dask.array as da import dask.dataframe as dd if dim_order is None: dim_order = list(self.dims) elif set(dim_order) != set(self.dims): raise ValueError( 'dim_order {} does not match the set of dimensions on this ' 'Dataset: {}'.format(dim_order, list(self.dims))) ordered_dims = OrderedDict((k, self.dims[k]) for k in dim_order) columns = list(ordered_dims) columns.extend(k for k in self.coords if k not in self.dims) columns.extend(self.data_vars) series_list = [] for name in columns: try: var = self.variables[name] except KeyError: # dimension without a matching coordinate size = self.dims[name] data = da.arange(size, chunks=size, dtype=np.int64) var = Variable((name,), data) # IndexVariable objects have a dummy .chunk() method if isinstance(var, IndexVariable): var = var.to_base_variable() dask_array = var.set_dims(ordered_dims).chunk(self.chunks).data series = dd.from_array(dask_array.reshape(-1), columns=[name]) series_list.append(series) df = dd.concat(series_list, axis=1) if set_index: if len(dim_order) == 1: (dim,) = dim_order df = df.set_index(dim) else: # triggers an error about multi-indexes, even if only one # dimension is passed df = df.set_index(dim_order) return df
0, module; 1, function_definition; 2, function_name:interp; 3, parameters; 4, block; 5, identifier:var; 6, identifier:indexes_coords; 7, identifier:method; 8, dictionary_splat_pattern; 9, expression_statement; 10, if_statement; 11, comment:# simple speed up for the local interpolation; 12, if_statement; 13, comment:# default behavior; 14, expression_statement; 15, comment:# target dimensions; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, comment:# transpose to make the interpolated axis to the last position; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, expression_statement; 25, comment:# dimension of the output array; 26, expression_statement; 27, for_statement; 28, return_statement; 29, identifier:kwargs; 30, comment:""" Make an interpolation of Variable Parameters ---------- var: Variable index_coords: Mapping from dimension name to a pair of original and new coordinates. Original coordinates should be sorted in strictly ascending order. Note that all the coordinates should be Variable objects. method: string One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}. For multidimensional interpolation, only {'linear', 'nearest'} can be used. **kwargs: keyword arguments to be passed to scipy.interpolate Returns ------- Interpolated Variable See Also -------- DataArray.interp Dataset.interp """; 31, not_operator; 32, block; 33, comparison_operator:method in ['linear', 'nearest']; 34, block; 35, assignment; 36, assignment; 37, assignment; 38, assignment; 39, assignment; 40, assignment; 41, assignment; 42, assignment; 43, assignment; 44, assignment; 45, identifier:d; 46, attribute; 47, block; 48, call; 49, identifier:indexes_coords; 50, return_statement; 51, identifier:method; 52, list; 53, expression_statement; 54, subscript; 55, call; 56, identifier:dims; 57, call; 58, pattern_list; 59, call; 60, identifier:destination; 61, call; 62, identifier:broadcast_dims; 63, list_comprehension; 64, identifier:original_dims; 65, binary_operator:broadcast_dims + dims; 66, identifier:new_dims; 67, binary_operator:broadcast_dims + list(destination[0].dims); 68, identifier:interped; 69, call; 70, identifier:result; 71, call; 72, identifier:out_dims; 73, call; 74, identifier:var; 75, identifier:dims; 76, if_statement; 77, attribute; 78, argument_list; 79, call; 80, string; 81, string; 82, assignment; 83, identifier:kwargs; 84, string; 85, attribute; 86, argument_list; 87, identifier:list; 88, argument_list; 89, identifier:x; 90, identifier:new_x; 91, identifier:zip; 92, argument_list; 93, identifier:broadcast_variables; 94, argument_list; 95, identifier:d; 96, for_in_clause; 97, if_clause; 98, identifier:broadcast_dims; 99, identifier:dims; 100, identifier:broadcast_dims; 101, call; 102, identifier:interp_func; 103, argument_list; 104, identifier:Variable; 105, argument_list; 106, identifier:OrderedSet; 107, argument_list; 108, comparison_operator:d in dims; 109, block; 110, else_clause; 111, identifier:result; 112, identifier:transpose; 113, list_splat; 114, attribute; 115, argument_list; 116, string_content:linear; 117, string_content:nearest; 118, pattern_list; 119, call; 120, string_content:bounds_error; 121, identifier:kwargs; 122, identifier:get; 123, string; 124, False; 125, identifier:indexes_coords; 126, list_splat; 127, list_splat; 128, identifier:d; 129, attribute; 130, comparison_operator:d not in dims; 131, identifier:list; 132, argument_list; 133, attribute; 134, identifier:x; 135, identifier:destination; 136, identifier:method; 137, identifier:kwargs; 138, identifier:new_dims; 139, identifier:interped; 140, keyword_argument; 141, identifier:d; 142, identifier:dims; 143, expression_statement; 144, block; 145, call; 146, identifier:var; 147, identifier:copy; 148, identifier:var; 149, identifier:indexes_coords; 150, identifier:_localize; 151, argument_list; 152, string_content:bounds_error; 153, list_comprehension; 154, identifier:new_x; 155, identifier:var; 156, identifier:dims; 157, identifier:d; 158, identifier:dims; 159, attribute; 160, call; 161, identifier:data; 162, identifier:attrs; 163, attribute; 164, call; 165, expression_statement; 166, identifier:tuple; 167, argument_list; 168, identifier:var; 169, identifier:indexes_coords; 170, subscript; 171, for_in_clause; 172, subscript; 173, identifier:dims; 174, attribute; 175, argument_list; 176, identifier:var; 177, identifier:attrs; 178, attribute; 179, argument_list; 180, call; 181, identifier:out_dims; 182, identifier:indexes_coords; 183, identifier:d; 184, identifier:d; 185, identifier:dims; 186, identifier:destination; 187, integer:0; 188, identifier:var; 189, identifier:transpose; 190, list_splat; 191, identifier:out_dims; 192, identifier:update; 193, attribute; 194, attribute; 195, argument_list; 196, identifier:original_dims; 197, subscript; 198, identifier:dims; 199, identifier:out_dims; 200, identifier:add; 201, identifier:d; 202, subscript; 203, integer:1; 204, identifier:indexes_coords; 205, identifier:d
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 8, 29; 9, 30; 10, 31; 10, 32; 12, 33; 12, 34; 14, 35; 16, 36; 17, 37; 18, 38; 20, 39; 21, 40; 22, 41; 23, 42; 24, 43; 26, 44; 27, 45; 27, 46; 27, 47; 28, 48; 31, 49; 32, 50; 33, 51; 33, 52; 34, 53; 35, 54; 35, 55; 36, 56; 36, 57; 37, 58; 37, 59; 38, 60; 38, 61; 39, 62; 39, 63; 40, 64; 40, 65; 41, 66; 41, 67; 42, 68; 42, 69; 43, 70; 43, 71; 44, 72; 44, 73; 46, 74; 46, 75; 47, 76; 48, 77; 48, 78; 50, 79; 52, 80; 52, 81; 53, 82; 54, 83; 54, 84; 55, 85; 55, 86; 57, 87; 57, 88; 58, 89; 58, 90; 59, 91; 59, 92; 61, 93; 61, 94; 63, 95; 63, 96; 63, 97; 65, 98; 65, 99; 67, 100; 67, 101; 69, 102; 69, 103; 71, 104; 71, 105; 73, 106; 73, 107; 76, 108; 76, 109; 76, 110; 77, 111; 77, 112; 78, 113; 79, 114; 79, 115; 80, 116; 81, 117; 82, 118; 82, 119; 84, 120; 85, 121; 85, 122; 86, 123; 86, 124; 88, 125; 92, 126; 94, 127; 96, 128; 96, 129; 97, 130; 101, 131; 101, 132; 103, 133; 103, 134; 103, 135; 103, 136; 103, 137; 105, 138; 105, 139; 105, 140; 108, 141; 108, 142; 109, 143; 110, 144; 113, 145; 114, 146; 114, 147; 118, 148; 118, 149; 119, 150; 119, 151; 123, 152; 126, 153; 127, 154; 129, 155; 129, 156; 130, 157; 130, 158; 132, 159; 133, 160; 133, 161; 140, 162; 140, 163; 143, 164; 144, 165; 145, 166; 145, 167; 151, 168; 151, 169; 153, 170; 153, 171; 159, 172; 159, 173; 160, 174; 160, 175; 163, 176; 163, 177; 164, 178; 164, 179; 165, 180; 167, 181; 170, 182; 170, 183; 171, 184; 171, 185; 172, 186; 172, 187; 174, 188; 174, 189; 175, 190; 178, 191; 178, 192; 179, 193; 180, 194; 180, 195; 190, 196; 193, 197; 193, 198; 194, 199; 194, 200; 195, 201; 197, 202; 197, 203; 202, 204; 202, 205
def interp(var, indexes_coords, method, **kwargs): """ Make an interpolation of Variable Parameters ---------- var: Variable index_coords: Mapping from dimension name to a pair of original and new coordinates. Original coordinates should be sorted in strictly ascending order. Note that all the coordinates should be Variable objects. method: string One of {'linear', 'nearest', 'zero', 'slinear', 'quadratic', 'cubic'}. For multidimensional interpolation, only {'linear', 'nearest'} can be used. **kwargs: keyword arguments to be passed to scipy.interpolate Returns ------- Interpolated Variable See Also -------- DataArray.interp Dataset.interp """ if not indexes_coords: return var.copy() # simple speed up for the local interpolation if method in ['linear', 'nearest']: var, indexes_coords = _localize(var, indexes_coords) # default behavior kwargs['bounds_error'] = kwargs.get('bounds_error', False) # target dimensions dims = list(indexes_coords) x, new_x = zip(*[indexes_coords[d] for d in dims]) destination = broadcast_variables(*new_x) # transpose to make the interpolated axis to the last position broadcast_dims = [d for d in var.dims if d not in dims] original_dims = broadcast_dims + dims new_dims = broadcast_dims + list(destination[0].dims) interped = interp_func(var.transpose(*original_dims).data, x, destination, method, kwargs) result = Variable(new_dims, interped, attrs=var.attrs) # dimension of the output array out_dims = OrderedSet() for d in var.dims: if d in dims: out_dims.update(indexes_coords[d][1].dims) else: out_dims.add(d) return result.transpose(*tuple(out_dims))
0, module; 1, function_definition; 2, function_name:coerce_pandas_values; 3, parameters; 4, block; 5, identifier:objects; 6, expression_statement; 7, import_from_statement; 8, import_from_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, comment:"""Convert pandas values found in a list of labeled objects. Parameters ---------- objects : list of Dataset or mappings The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. Returns ------- List of Dataset or OrderedDict objects. Any inputs or values in the inputs that were pandas objects have been converted into native xarray objects. """; 13, relative_import; 14, dotted_name; 15, relative_import; 16, dotted_name; 17, assignment; 18, identifier:obj; 19, identifier:objects; 20, block; 21, identifier:out; 22, import_prefix; 23, dotted_name; 24, identifier:Dataset; 25, import_prefix; 26, dotted_name; 27, identifier:DataArray; 28, identifier:out; 29, list; 30, if_statement; 31, expression_statement; 32, identifier:dataset; 33, identifier:dataarray; 34, call; 35, block; 36, else_clause; 37, call; 38, identifier:isinstance; 39, argument_list; 40, expression_statement; 41, block; 42, attribute; 43, argument_list; 44, identifier:obj; 45, identifier:Dataset; 46, assignment; 47, expression_statement; 48, if_statement; 49, for_statement; 50, identifier:out; 51, identifier:append; 52, identifier:variables; 53, identifier:variables; 54, identifier:obj; 55, assignment; 56, call; 57, block; 58, pattern_list; 59, call; 60, block; 61, identifier:variables; 62, call; 63, identifier:isinstance; 64, argument_list; 65, expression_statement; 66, identifier:k; 67, identifier:v; 68, attribute; 69, argument_list; 70, if_statement; 71, expression_statement; 72, identifier:OrderedDict; 73, argument_list; 74, identifier:obj; 75, identifier:PANDAS_TYPES; 76, assignment; 77, identifier:obj; 78, identifier:items; 79, call; 80, block; 81, assignment; 82, identifier:obj; 83, call; 84, identifier:isinstance; 85, argument_list; 86, expression_statement; 87, subscript; 88, identifier:v; 89, identifier:OrderedDict; 90, argument_list; 91, identifier:v; 92, identifier:PANDAS_TYPES; 93, assignment; 94, identifier:variables; 95, identifier:k; 96, call; 97, identifier:v; 98, call; 99, attribute; 100, argument_list; 101, identifier:DataArray; 102, argument_list; 103, identifier:obj; 104, identifier:iteritems; 105, identifier:v
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 10, 18; 10, 19; 10, 20; 11, 21; 13, 22; 13, 23; 14, 24; 15, 25; 15, 26; 16, 27; 17, 28; 17, 29; 20, 30; 20, 31; 23, 32; 26, 33; 30, 34; 30, 35; 30, 36; 31, 37; 34, 38; 34, 39; 35, 40; 36, 41; 37, 42; 37, 43; 39, 44; 39, 45; 40, 46; 41, 47; 41, 48; 41, 49; 42, 50; 42, 51; 43, 52; 46, 53; 46, 54; 47, 55; 48, 56; 48, 57; 49, 58; 49, 59; 49, 60; 55, 61; 55, 62; 56, 63; 56, 64; 57, 65; 58, 66; 58, 67; 59, 68; 59, 69; 60, 70; 60, 71; 62, 72; 62, 73; 64, 74; 64, 75; 65, 76; 68, 77; 68, 78; 70, 79; 70, 80; 71, 81; 76, 82; 76, 83; 79, 84; 79, 85; 80, 86; 81, 87; 81, 88; 83, 89; 83, 90; 85, 91; 85, 92; 86, 93; 87, 94; 87, 95; 90, 96; 93, 97; 93, 98; 96, 99; 96, 100; 98, 101; 98, 102; 99, 103; 99, 104; 102, 105
def coerce_pandas_values(objects): """Convert pandas values found in a list of labeled objects. Parameters ---------- objects : list of Dataset or mappings The mappings may contain any sort of objects coercible to xarray.Variables as keys, including pandas objects. Returns ------- List of Dataset or OrderedDict objects. Any inputs or values in the inputs that were pandas objects have been converted into native xarray objects. """ from .dataset import Dataset from .dataarray import DataArray out = [] for obj in objects: if isinstance(obj, Dataset): variables = obj else: variables = OrderedDict() if isinstance(obj, PANDAS_TYPES): obj = OrderedDict(obj.iteritems()) for k, v in obj.items(): if isinstance(v, PANDAS_TYPES): v = DataArray(v) variables[k] = v out.append(variables) return out
0, module; 1, function_definition; 2, function_name:unique_value_groups; 3, parameters; 4, block; 5, identifier:ar; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, identifier:sort; 13, True; 14, comment:"""Group an array by its unique values. Parameters ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. sort : boolean, optional Whether or not to sort unique values. Returns ------- values : np.ndarray Sorted, unique values as returned by `np.unique`. indices : list of lists of int Each element provides the integer indices in `ar` with values given by the corresponding value in `unique_values`. """; 15, assignment; 16, assignment; 17, pattern_list; 18, call; 19, block; 20, expression_list; 21, pattern_list; 22, call; 23, identifier:groups; 24, list_comprehension; 25, identifier:n; 26, identifier:g; 27, identifier:enumerate; 28, argument_list; 29, if_statement; 30, identifier:values; 31, identifier:groups; 32, identifier:inverse; 33, identifier:values; 34, attribute; 35, argument_list; 36, list; 37, for_in_clause; 38, identifier:inverse; 39, comparison_operator:g >= 0; 40, comment:# pandas uses -1 to mark NaN, but doesn't include them in values; 41, block; 42, identifier:pd; 43, identifier:factorize; 44, identifier:ar; 45, keyword_argument; 46, identifier:_; 47, call; 48, identifier:g; 49, integer:0; 50, expression_statement; 51, identifier:sort; 52, identifier:sort; 53, identifier:range; 54, argument_list; 55, call; 56, call; 57, attribute; 58, argument_list; 59, identifier:len; 60, argument_list; 61, subscript; 62, identifier:append; 63, identifier:n; 64, identifier:values; 65, identifier:groups; 66, identifier:g
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 8, 15; 9, 16; 10, 17; 10, 18; 10, 19; 11, 20; 15, 21; 15, 22; 16, 23; 16, 24; 17, 25; 17, 26; 18, 27; 18, 28; 19, 29; 20, 30; 20, 31; 21, 32; 21, 33; 22, 34; 22, 35; 24, 36; 24, 37; 28, 38; 29, 39; 29, 40; 29, 41; 34, 42; 34, 43; 35, 44; 35, 45; 37, 46; 37, 47; 39, 48; 39, 49; 41, 50; 45, 51; 45, 52; 47, 53; 47, 54; 50, 55; 54, 56; 55, 57; 55, 58; 56, 59; 56, 60; 57, 61; 57, 62; 58, 63; 60, 64; 61, 65; 61, 66
def unique_value_groups(ar, sort=True): """Group an array by its unique values. Parameters ---------- ar : array-like Input array. This will be flattened if it is not already 1-D. sort : boolean, optional Whether or not to sort unique values. Returns ------- values : np.ndarray Sorted, unique values as returned by `np.unique`. indices : list of lists of int Each element provides the integer indices in `ar` with values given by the corresponding value in `unique_values`. """ inverse, values = pd.factorize(ar, sort=sort) groups = [[] for _ in range(len(values))] for n, g in enumerate(inverse): if g >= 0: # pandas uses -1 to mark NaN, but doesn't include them in values groups[g].append(n) return values, groups
0, module; 1, function_definition; 2, function_name:order; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, comment:# non-GA releases should appear before GA releases; 8, comment:# Order: tp -> beta -> rc -> GA; 9, if_statement; 10, return_statement; 11, comment:"""Return a representation that allows this object to be sorted correctly with the default comparator. """; 12, attribute; 13, block; 14, else_clause; 15, binary_operator:(int(self.major), int(self.minor), int(self.patch)) + stage; 16, identifier:self; 17, identifier:stage; 18, for_statement; 19, block; 20, tuple; 21, identifier:stage; 22, identifier:st; 23, identifier:STAGES; 24, block; 25, expression_statement; 26, call; 27, call; 28, call; 29, if_statement; 30, assignment; 31, identifier:int; 32, argument_list; 33, identifier:int; 34, argument_list; 35, identifier:int; 36, argument_list; 37, comparison_operator:st in self.stage; 38, block; 39, identifier:stage; 40, tuple; 41, attribute; 42, attribute; 43, attribute; 44, identifier:st; 45, attribute; 46, expression_statement; 47, break_statement; 48, call; 49, identifier:self; 50, identifier:major; 51, identifier:self; 52, identifier:minor; 53, identifier:self; 54, identifier:patch; 55, identifier:self; 56, identifier:stage; 57, assignment; 58, identifier:len; 59, argument_list; 60, identifier:stage; 61, tuple; 62, identifier:STAGES; 63, call; 64, attribute; 65, attribute; 66, argument_list; 67, identifier:self; 68, identifier:stage; 69, identifier:STAGES; 70, identifier:index; 71, identifier:st
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 9, 12; 9, 13; 9, 14; 10, 15; 12, 16; 12, 17; 13, 18; 14, 19; 15, 20; 15, 21; 18, 22; 18, 23; 18, 24; 19, 25; 20, 26; 20, 27; 20, 28; 24, 29; 25, 30; 26, 31; 26, 32; 27, 33; 27, 34; 28, 35; 28, 36; 29, 37; 29, 38; 30, 39; 30, 40; 32, 41; 34, 42; 36, 43; 37, 44; 37, 45; 38, 46; 38, 47; 40, 48; 41, 49; 41, 50; 42, 51; 42, 52; 43, 53; 43, 54; 45, 55; 45, 56; 46, 57; 48, 58; 48, 59; 57, 60; 57, 61; 59, 62; 61, 63; 61, 64; 63, 65; 63, 66; 64, 67; 64, 68; 65, 69; 65, 70; 66, 71
def order(self): """Return a representation that allows this object to be sorted correctly with the default comparator. """ # non-GA releases should appear before GA releases # Order: tp -> beta -> rc -> GA if self.stage: for st in STAGES: if st in self.stage: stage = (STAGES.index(st), self.stage) break else: stage = (len(STAGES),) return (int(self.major), int(self.minor), int(self.patch)) + stage
0, module; 1, function_definition; 2, function_name:zadd; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, identifier:mapping; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, if_statement; 14, if_statement; 15, if_statement; 16, expression_statement; 17, expression_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, if_statement; 22, for_statement; 23, return_statement; 24, identifier:nx; 25, False; 26, identifier:xx; 27, False; 28, identifier:ch; 29, False; 30, identifier:incr; 31, False; 32, comment:""" Set any number of element-name, score pairs to the key ``name``. Pairs are specified as a dict of element-names keys to score values. ``nx`` forces ZADD to only create new elements and not to update scores for elements that already exist. ``xx`` forces ZADD to only update scores of elements that already exist. New elements will not be added. ``ch`` modifies the return value to be the numbers of elements changed. Changed elements include new elements that were added and elements whose scores changed. ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a single element/score pair can be specified and the score is the amount the existing score will be incremented by. When using this mode the return value of ZADD will be the new score of the element. The return value of ZADD varies based on the mode specified. With no options, ZADD returns the number of new elements added to the sorted set. """; 33, not_operator; 34, block; 35, boolean_operator; 36, block; 37, boolean_operator; 38, block; 39, assignment; 40, assignment; 41, identifier:nx; 42, block; 43, identifier:xx; 44, block; 45, identifier:ch; 46, block; 47, identifier:incr; 48, block; 49, identifier:pair; 50, call; 51, block; 52, call; 53, identifier:mapping; 54, raise_statement; 55, identifier:nx; 56, identifier:xx; 57, raise_statement; 58, identifier:incr; 59, comparison_operator:len(mapping) != 1; 60, raise_statement; 61, identifier:pieces; 62, list; 63, identifier:options; 64, dictionary; 65, expression_statement; 66, expression_statement; 67, expression_statement; 68, expression_statement; 69, expression_statement; 70, identifier:iteritems; 71, argument_list; 72, expression_statement; 73, expression_statement; 74, attribute; 75, argument_list; 76, call; 77, call; 78, call; 79, integer:1; 80, call; 81, call; 82, call; 83, call; 84, call; 85, assignment; 86, identifier:mapping; 87, call; 88, call; 89, identifier:self; 90, identifier:execute_command; 91, string; 92, identifier:name; 93, list_splat; 94, dictionary_splat; 95, identifier:DataError; 96, argument_list; 97, identifier:DataError; 98, argument_list; 99, identifier:len; 100, argument_list; 101, identifier:DataError; 102, argument_list; 103, attribute; 104, argument_list; 105, attribute; 106, argument_list; 107, attribute; 108, argument_list; 109, attribute; 110, argument_list; 111, subscript; 112, True; 113, attribute; 114, argument_list; 115, attribute; 116, argument_list; 117, string_content:ZADD; 118, identifier:pieces; 119, identifier:options; 120, string:"ZADD requires at least one element/score pair"; 121, string:"ZADD allows either 'nx' or 'xx', not both"; 122, identifier:mapping; 123, concatenated_string; 124, identifier:pieces; 125, identifier:append; 126, call; 127, identifier:pieces; 128, identifier:append; 129, call; 130, identifier:pieces; 131, identifier:append; 132, call; 133, identifier:pieces; 134, identifier:append; 135, call; 136, identifier:options; 137, string; 138, identifier:pieces; 139, identifier:append; 140, subscript; 141, identifier:pieces; 142, identifier:append; 143, subscript; 144, string:"ZADD option 'incr' only works when passing a "; 145, string:"single element/score pair"; 146, attribute; 147, argument_list; 148, attribute; 149, argument_list; 150, attribute; 151, argument_list; 152, attribute; 153, argument_list; 154, string_content:as_score; 155, identifier:pair; 156, integer:1; 157, identifier:pair; 158, integer:0; 159, identifier:Token; 160, identifier:get_token; 161, string; 162, identifier:Token; 163, identifier:get_token; 164, string; 165, identifier:Token; 166, identifier:get_token; 167, string; 168, identifier:Token; 169, identifier:get_token; 170, string; 171, string_content:NX; 172, string_content:XX; 173, string_content:CH; 174, string_content:INCR
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 8, 24; 8, 25; 9, 26; 9, 27; 10, 28; 10, 29; 11, 30; 11, 31; 12, 32; 13, 33; 13, 34; 14, 35; 14, 36; 15, 37; 15, 38; 16, 39; 17, 40; 18, 41; 18, 42; 19, 43; 19, 44; 20, 45; 20, 46; 21, 47; 21, 48; 22, 49; 22, 50; 22, 51; 23, 52; 33, 53; 34, 54; 35, 55; 35, 56; 36, 57; 37, 58; 37, 59; 38, 60; 39, 61; 39, 62; 40, 63; 40, 64; 42, 65; 44, 66; 46, 67; 48, 68; 48, 69; 50, 70; 50, 71; 51, 72; 51, 73; 52, 74; 52, 75; 54, 76; 57, 77; 59, 78; 59, 79; 60, 80; 65, 81; 66, 82; 67, 83; 68, 84; 69, 85; 71, 86; 72, 87; 73, 88; 74, 89; 74, 90; 75, 91; 75, 92; 75, 93; 75, 94; 76, 95; 76, 96; 77, 97; 77, 98; 78, 99; 78, 100; 80, 101; 80, 102; 81, 103; 81, 104; 82, 105; 82, 106; 83, 107; 83, 108; 84, 109; 84, 110; 85, 111; 85, 112; 87, 113; 87, 114; 88, 115; 88, 116; 91, 117; 93, 118; 94, 119; 96, 120; 98, 121; 100, 122; 102, 123; 103, 124; 103, 125; 104, 126; 105, 127; 105, 128; 106, 129; 107, 130; 107, 131; 108, 132; 109, 133; 109, 134; 110, 135; 111, 136; 111, 137; 113, 138; 113, 139; 114, 140; 115, 141; 115, 142; 116, 143; 123, 144; 123, 145; 126, 146; 126, 147; 129, 148; 129, 149; 132, 150; 132, 151; 135, 152; 135, 153; 137, 154; 140, 155; 140, 156; 143, 157; 143, 158; 146, 159; 146, 160; 147, 161; 148, 162; 148, 163; 149, 164; 150, 165; 150, 166; 151, 167; 152, 168; 152, 169; 153, 170; 161, 171; 164, 172; 167, 173; 170, 174
def zadd(self, name, mapping, nx=False, xx=False, ch=False, incr=False): """ Set any number of element-name, score pairs to the key ``name``. Pairs are specified as a dict of element-names keys to score values. ``nx`` forces ZADD to only create new elements and not to update scores for elements that already exist. ``xx`` forces ZADD to only update scores of elements that already exist. New elements will not be added. ``ch`` modifies the return value to be the numbers of elements changed. Changed elements include new elements that were added and elements whose scores changed. ``incr`` modifies ZADD to behave like ZINCRBY. In this mode only a single element/score pair can be specified and the score is the amount the existing score will be incremented by. When using this mode the return value of ZADD will be the new score of the element. The return value of ZADD varies based on the mode specified. With no options, ZADD returns the number of new elements added to the sorted set. """ if not mapping: raise DataError("ZADD requires at least one element/score pair") if nx and xx: raise DataError("ZADD allows either 'nx' or 'xx', not both") if incr and len(mapping) != 1: raise DataError("ZADD option 'incr' only works when passing a " "single element/score pair") pieces = [] options = {} if nx: pieces.append(Token.get_token('NX')) if xx: pieces.append(Token.get_token('XX')) if ch: pieces.append(Token.get_token('CH')) if incr: pieces.append(Token.get_token('INCR')) options['as_score'] = True for pair in iteritems(mapping): pieces.append(pair[1]) pieces.append(pair[0]) return self.execute_command('ZADD', name, *pieces, **options)
0, module; 1, function_definition; 2, function_name:zincrby; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, identifier:amount; 8, identifier:value; 9, expression_statement; 10, return_statement; 11, string:"Increment the score of ``value`` in sorted set ``name`` by ``amount``"; 12, call; 13, attribute; 14, argument_list; 15, identifier:self; 16, identifier:execute_command; 17, string; 18, identifier:name; 19, identifier:amount; 20, identifier:value; 21, string_content:ZINCRBY
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 9, 11; 10, 12; 12, 13; 12, 14; 13, 15; 13, 16; 14, 17; 14, 18; 14, 19; 14, 20; 17, 21
def zincrby(self, name, amount, value): "Increment the score of ``value`` in sorted set ``name`` by ``amount``" return self.execute_command('ZINCRBY', name, amount, value)
0, module; 1, function_definition; 2, function_name:zinterstore; 3, parameters; 4, block; 5, identifier:self; 6, identifier:dest; 7, identifier:keys; 8, default_parameter; 9, expression_statement; 10, return_statement; 11, identifier:aggregate; 12, None; 13, comment:""" Intersect multiple sorted sets specified by ``keys`` into a new sorted set, ``dest``. Scores in the destination will be aggregated based on the ``aggregate``, or SUM if none is provided. """; 14, call; 15, attribute; 16, argument_list; 17, identifier:self; 18, identifier:_zaggregate; 19, string; 20, identifier:dest; 21, identifier:keys; 22, identifier:aggregate; 23, string_content:ZINTERSTORE
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 8, 11; 8, 12; 9, 13; 10, 14; 14, 15; 14, 16; 15, 17; 15, 18; 16, 19; 16, 20; 16, 21; 16, 22; 19, 23
def zinterstore(self, dest, keys, aggregate=None): """ Intersect multiple sorted sets specified by ``keys`` into a new sorted set, ``dest``. Scores in the destination will be aggregated based on the ``aggregate``, or SUM if none is provided. """ return self._zaggregate('ZINTERSTORE', dest, keys, aggregate)
0, module; 1, function_definition; 2, function_name:zlexcount; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, identifier:min; 8, identifier:max; 9, expression_statement; 10, return_statement; 11, comment:""" Return the number of items in the sorted set ``name`` between the lexicographical range ``min`` and ``max``. """; 12, call; 13, attribute; 14, argument_list; 15, identifier:self; 16, identifier:execute_command; 17, string; 18, identifier:name; 19, identifier:min; 20, identifier:max; 21, string_content:ZLEXCOUNT
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 9, 11; 10, 12; 12, 13; 12, 14; 13, 15; 13, 16; 14, 17; 14, 18; 14, 19; 14, 20; 17, 21
def zlexcount(self, name, min, max): """ Return the number of items in the sorted set ``name`` between the lexicographical range ``min`` and ``max``. """ return self.execute_command('ZLEXCOUNT', name, min, max)
0, module; 1, function_definition; 2, function_name:zpopmax; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, identifier:count; 13, None; 14, comment:""" Remove and return up to ``count`` members with the highest scores from the sorted set ``name``. """; 15, assignment; 16, assignment; 17, call; 18, identifier:args; 19, boolean_operator; 20, identifier:options; 21, dictionary; 22, attribute; 23, argument_list; 24, boolean_operator; 25, list; 26, pair; 27, identifier:self; 28, identifier:execute_command; 29, string; 30, identifier:name; 31, list_splat; 32, dictionary_splat; 33, parenthesized_expression; 34, list; 35, string; 36, True; 37, string_content:ZPOPMAX; 38, identifier:args; 39, identifier:options; 40, comparison_operator:count is not None; 41, identifier:count; 42, string_content:withscores; 43, identifier:count; 44, None
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 9, 15; 10, 16; 11, 17; 15, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 19, 24; 19, 25; 21, 26; 22, 27; 22, 28; 23, 29; 23, 30; 23, 31; 23, 32; 24, 33; 24, 34; 26, 35; 26, 36; 29, 37; 31, 38; 32, 39; 33, 40; 34, 41; 35, 42; 40, 43; 40, 44
def zpopmax(self, name, count=None): """ Remove and return up to ``count`` members with the highest scores from the sorted set ``name``. """ args = (count is not None) and [count] or [] options = { 'withscores': True } return self.execute_command('ZPOPMAX', name, *args, **options)
0, module; 1, function_definition; 2, function_name:zpopmin; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, identifier:count; 13, None; 14, comment:""" Remove and return up to ``count`` members with the lowest scores from the sorted set ``name``. """; 15, assignment; 16, assignment; 17, call; 18, identifier:args; 19, boolean_operator; 20, identifier:options; 21, dictionary; 22, attribute; 23, argument_list; 24, boolean_operator; 25, list; 26, pair; 27, identifier:self; 28, identifier:execute_command; 29, string; 30, identifier:name; 31, list_splat; 32, dictionary_splat; 33, parenthesized_expression; 34, list; 35, string; 36, True; 37, string_content:ZPOPMIN; 38, identifier:args; 39, identifier:options; 40, comparison_operator:count is not None; 41, identifier:count; 42, string_content:withscores; 43, identifier:count; 44, None
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 9, 15; 10, 16; 11, 17; 15, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 19, 24; 19, 25; 21, 26; 22, 27; 22, 28; 23, 29; 23, 30; 23, 31; 23, 32; 24, 33; 24, 34; 26, 35; 26, 36; 29, 37; 31, 38; 32, 39; 33, 40; 34, 41; 35, 42; 40, 43; 40, 44
def zpopmin(self, name, count=None): """ Remove and return up to ``count`` members with the lowest scores from the sorted set ``name``. """ args = (count is not None) and [count] or [] options = { 'withscores': True } return self.execute_command('ZPOPMIN', name, *args, **options)
0, module; 1, function_definition; 2, function_name:bzpopmax; 3, parameters; 4, block; 5, identifier:self; 6, identifier:keys; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:timeout; 14, integer:0; 15, comment:""" ZPOPMAX a value off of the first non-empty sorted set named in the ``keys`` list. If none of the sorted sets in ``keys`` has a value to ZPOPMAX, then block for ``timeout`` seconds, or until a member gets added to one of the sorted sets. If timeout is 0, then block indefinitely. """; 16, comparison_operator:timeout is None; 17, block; 18, assignment; 19, call; 20, call; 21, identifier:timeout; 22, None; 23, expression_statement; 24, identifier:keys; 25, call; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, assignment; 31, identifier:list_or_args; 32, argument_list; 33, identifier:keys; 34, identifier:append; 35, identifier:timeout; 36, identifier:self; 37, identifier:execute_command; 38, string; 39, list_splat; 40, identifier:timeout; 41, integer:0; 42, identifier:keys; 43, None; 44, string_content:BZPOPMAX; 45, identifier:keys
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 9, 17; 10, 18; 11, 19; 12, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 20, 29; 23, 30; 25, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 30, 40; 30, 41; 32, 42; 32, 43; 38, 44; 39, 45
def bzpopmax(self, keys, timeout=0): """ ZPOPMAX a value off of the first non-empty sorted set named in the ``keys`` list. If none of the sorted sets in ``keys`` has a value to ZPOPMAX, then block for ``timeout`` seconds, or until a member gets added to one of the sorted sets. If timeout is 0, then block indefinitely. """ if timeout is None: timeout = 0 keys = list_or_args(keys, None) keys.append(timeout) return self.execute_command('BZPOPMAX', *keys)
0, module; 1, function_definition; 2, function_name:bzpopmin; 3, parameters; 4, block; 5, identifier:self; 6, identifier:keys; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:timeout; 14, integer:0; 15, comment:""" ZPOPMIN a value off of the first non-empty sorted set named in the ``keys`` list. If none of the sorted sets in ``keys`` has a value to ZPOPMIN, then block for ``timeout`` seconds, or until a member gets added to one of the sorted sets. If timeout is 0, then block indefinitely. """; 16, comparison_operator:timeout is None; 17, block; 18, assignment; 19, call; 20, call; 21, identifier:timeout; 22, None; 23, expression_statement; 24, identifier:keys; 25, call; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, assignment; 31, identifier:list_or_args; 32, argument_list; 33, identifier:keys; 34, identifier:append; 35, identifier:timeout; 36, identifier:self; 37, identifier:execute_command; 38, string; 39, list_splat; 40, identifier:timeout; 41, integer:0; 42, identifier:keys; 43, None; 44, string_content:BZPOPMIN; 45, identifier:keys
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 9, 17; 10, 18; 11, 19; 12, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 20, 29; 23, 30; 25, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 30, 40; 30, 41; 32, 42; 32, 43; 38, 44; 39, 45
def bzpopmin(self, keys, timeout=0): """ ZPOPMIN a value off of the first non-empty sorted set named in the ``keys`` list. If none of the sorted sets in ``keys`` has a value to ZPOPMIN, then block for ``timeout`` seconds, or until a member gets added to one of the sorted sets. If timeout is 0, then block indefinitely. """ if timeout is None: timeout = 0 keys = list_or_args(keys, None) keys.append(timeout) return self.execute_command('BZPOPMIN', *keys)
0, module; 1, function_definition; 2, function_name:zremrangebylex; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, identifier:min; 8, identifier:max; 9, expression_statement; 10, return_statement; 11, comment:""" Remove all elements in the sorted set ``name`` between the lexicographical range specified by ``min`` and ``max``. Returns the number of elements removed. """; 12, call; 13, attribute; 14, argument_list; 15, identifier:self; 16, identifier:execute_command; 17, string; 18, identifier:name; 19, identifier:min; 20, identifier:max; 21, string_content:ZREMRANGEBYLEX
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 9, 11; 10, 12; 12, 13; 12, 14; 13, 15; 13, 16; 14, 17; 14, 18; 14, 19; 14, 20; 17, 21
def zremrangebylex(self, name, min, max): """ Remove all elements in the sorted set ``name`` between the lexicographical range specified by ``min`` and ``max``. Returns the number of elements removed. """ return self.execute_command('ZREMRANGEBYLEX', name, min, max)
0, module; 1, function_definition; 2, function_name:zremrangebyrank; 3, parameters; 4, block; 5, identifier:self; 6, identifier:name; 7, identifier:min; 8, identifier:max; 9, expression_statement; 10, return_statement; 11, comment:""" Remove all elements in the sorted set ``name`` with ranks between ``min`` and ``max``. Values are 0-based, ordered from smallest score to largest. Values can be negative indicating the highest scores. Returns the number of elements removed """; 12, call; 13, attribute; 14, argument_list; 15, identifier:self; 16, identifier:execute_command; 17, string; 18, identifier:name; 19, identifier:min; 20, identifier:max; 21, string_content:ZREMRANGEBYRANK
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 9, 11; 10, 12; 12, 13; 12, 14; 13, 15; 13, 16; 14, 17; 14, 18; 14, 19; 14, 20; 17, 21
def zremrangebyrank(self, name, min, max): """ Remove all elements in the sorted set ``name`` with ranks between ``min`` and ``max``. Values are 0-based, ordered from smallest score to largest. Values can be negative indicating the highest scores. Returns the number of elements removed """ return self.execute_command('ZREMRANGEBYRANK', name, min, max)
0, module; 1, function_definition; 2, function_name:argsort_k_smallest; 3, parameters; 4, block; 5, identifier:x; 6, identifier:k; 7, expression_statement; 8, if_statement; 9, if_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, comment:""" Return no more than ``k`` indices of smallest values. """; 14, comparison_operator:k == 0; 15, block; 16, boolean_operator; 17, block; 18, assignment; 19, assignment; 20, subscript; 21, identifier:k; 22, integer:0; 23, return_statement; 24, comparison_operator:k is None; 25, comparison_operator:k >= len(x); 26, return_statement; 27, identifier:indices; 28, subscript; 29, identifier:values; 30, subscript; 31, identifier:indices; 32, call; 33, call; 34, identifier:k; 35, None; 36, identifier:k; 37, call; 38, call; 39, call; 40, slice; 41, identifier:x; 42, identifier:indices; 43, attribute; 44, argument_list; 45, attribute; 46, argument_list; 47, identifier:len; 48, argument_list; 49, attribute; 50, argument_list; 51, attribute; 52, argument_list; 53, identifier:k; 54, identifier:np; 55, identifier:argsort; 56, identifier:values; 57, identifier:np; 58, identifier:array; 59, list; 60, keyword_argument; 61, identifier:x; 62, identifier:np; 63, identifier:argsort; 64, identifier:x; 65, identifier:np; 66, identifier:argpartition; 67, identifier:x; 68, identifier:k; 69, identifier:dtype; 70, attribute; 71, identifier:np; 72, identifier:intp
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 8, 14; 8, 15; 9, 16; 9, 17; 10, 18; 11, 19; 12, 20; 14, 21; 14, 22; 15, 23; 16, 24; 16, 25; 17, 26; 18, 27; 18, 28; 19, 29; 19, 30; 20, 31; 20, 32; 23, 33; 24, 34; 24, 35; 25, 36; 25, 37; 26, 38; 28, 39; 28, 40; 30, 41; 30, 42; 32, 43; 32, 44; 33, 45; 33, 46; 37, 47; 37, 48; 38, 49; 38, 50; 39, 51; 39, 52; 40, 53; 43, 54; 43, 55; 44, 56; 45, 57; 45, 58; 46, 59; 46, 60; 48, 61; 49, 62; 49, 63; 50, 64; 51, 65; 51, 66; 52, 67; 52, 68; 60, 69; 60, 70; 70, 71; 70, 72
def argsort_k_smallest(x, k): """ Return no more than ``k`` indices of smallest values. """ if k == 0: return np.array([], dtype=np.intp) if k is None or k >= len(x): return np.argsort(x) indices = np.argpartition(x, k)[:k] values = x[indices] return indices[np.argsort(values)]
0, module; 1, function_definition; 2, function_name:lookup; 3, parameters; 4, block; 5, typed_parameter; 6, typed_parameter; 7, expression_statement; 8, try_statement; 9, identifier:source_id; 10, type; 11, identifier:schema_id; 12, type; 13, comment:""" Create a new schema object from an existing ledger schema :param source_id: Institution's personal identification for the schema :param schema_id: Ledger schema ID for lookup Example: source_id = 'foobar123' name = 'Address Schema' version = '1.0' attrs = ['address', 'city', 'state'] payment_handle = 0 schema1 = await Schema.create(source_id, name, version, attrs, payment_handle) id1 = await schema.get_schema_id() data = await Schema.lookup(source_id, schema_id) assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort() assert data.name == 'test-licence' assert data.handle > 0 :return: schema object """; 14, block; 15, except_clause; 16, identifier:str; 17, identifier:str; 18, expression_statement; 19, if_statement; 20, expression_statement; 21, expression_statement; 22, expression_statement; 23, expression_statement; 24, expression_statement; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, return_statement; 30, identifier:KeyError; 31, block; 32, assignment; 33, not_operator; 34, block; 35, assignment; 36, assignment; 37, assignment; 38, call; 39, assignment; 40, assignment; 41, assignment; 42, assignment; 43, assignment; 44, identifier:schema; 45, raise_statement; 46, identifier:schema; 47, call; 48, call; 49, expression_statement; 50, expression_statement; 51, identifier:c_source_id; 52, call; 53, identifier:c_schema_id; 54, call; 55, pattern_list; 56, await; 57, attribute; 58, argument_list; 59, identifier:schema_result; 60, call; 61, attribute; 62, subscript; 63, attribute; 64, subscript; 65, attribute; 66, subscript; 67, attribute; 68, identifier:handle; 69, call; 70, identifier:Schema; 71, argument_list; 72, identifier:hasattr; 73, argument_list; 74, call; 75, assignment; 76, identifier:c_char_p; 77, argument_list; 78, identifier:c_char_p; 79, argument_list; 80, identifier:handle; 81, identifier:data; 82, call; 83, attribute; 84, identifier:debug; 85, string:"created schema object"; 86, attribute; 87, argument_list; 88, identifier:schema; 89, identifier:attrs; 90, identifier:schema_result; 91, string; 92, identifier:schema; 93, identifier:name; 94, identifier:schema_result; 95, string; 96, identifier:schema; 97, identifier:version; 98, identifier:schema_result; 99, string; 100, identifier:schema; 101, identifier:handle; 102, identifier:VcxError; 103, argument_list; 104, identifier:source_id; 105, string; 106, string; 107, list; 108, attribute; 109, string:"cb"; 110, attribute; 111, argument_list; 112, attribute; 113, call; 114, call; 115, call; 116, identifier:do_call; 117, argument_list; 118, identifier:schema; 119, identifier:logger; 120, identifier:json; 121, identifier:loads; 122, call; 123, string_content:data; 124, string_content:name; 125, string_content:version; 126, attribute; 127, identifier:Schema; 128, identifier:lookup; 129, attribute; 130, identifier:debug; 131, string:"vcx_schema_get_attributes: Creating callback"; 132, attribute; 133, identifier:cb; 134, identifier:create_cb; 135, argument_list; 136, attribute; 137, argument_list; 138, attribute; 139, argument_list; 140, string; 141, identifier:c_source_id; 142, identifier:c_schema_id; 143, attribute; 144, attribute; 145, argument_list; 146, identifier:ErrorCode; 147, identifier:InvalidSchema; 148, identifier:schema; 149, identifier:logger; 150, identifier:Schema; 151, identifier:lookup; 152, call; 153, identifier:source_id; 154, identifier:encode; 155, string; 156, identifier:schema_id; 157, identifier:encode; 158, string; 159, string_content:vcx_schema_get_attributes; 160, attribute; 161, identifier:cb; 162, identifier:data; 163, identifier:decode; 164, identifier:CFUNCTYPE; 165, argument_list; 166, string_content:utf-8; 167, string_content:utf-8; 168, identifier:Schema; 169, identifier:lookup; 170, None; 171, identifier:c_uint32; 172, identifier:c_uint32; 173, identifier:c_uint32; 174, identifier:c_char_p
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 5, 9; 5, 10; 6, 11; 6, 12; 7, 13; 8, 14; 8, 15; 10, 16; 12, 17; 14, 18; 14, 19; 14, 20; 14, 21; 14, 22; 14, 23; 14, 24; 14, 25; 14, 26; 14, 27; 14, 28; 14, 29; 15, 30; 15, 31; 18, 32; 19, 33; 19, 34; 20, 35; 21, 36; 22, 37; 23, 38; 24, 39; 25, 40; 26, 41; 27, 42; 28, 43; 29, 44; 31, 45; 32, 46; 32, 47; 33, 48; 34, 49; 34, 50; 35, 51; 35, 52; 36, 53; 36, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 39, 60; 40, 61; 40, 62; 41, 63; 41, 64; 42, 65; 42, 66; 43, 67; 43, 68; 45, 69; 47, 70; 47, 71; 48, 72; 48, 73; 49, 74; 50, 75; 52, 76; 52, 77; 54, 78; 54, 79; 55, 80; 55, 81; 56, 82; 57, 83; 57, 84; 58, 85; 60, 86; 60, 87; 61, 88; 61, 89; 62, 90; 62, 91; 63, 92; 63, 93; 64, 94; 64, 95; 65, 96; 65, 97; 66, 98; 66, 99; 67, 100; 67, 101; 69, 102; 69, 103; 71, 104; 71, 105; 71, 106; 71, 107; 73, 108; 73, 109; 74, 110; 74, 111; 75, 112; 75, 113; 77, 114; 79, 115; 82, 116; 82, 117; 83, 118; 83, 119; 86, 120; 86, 121; 87, 122; 91, 123; 95, 124; 99, 125; 103, 126; 108, 127; 108, 128; 110, 129; 110, 130; 111, 131; 112, 132; 112, 133; 113, 134; 113, 135; 114, 136; 114, 137; 115, 138; 115, 139; 117, 140; 117, 141; 117, 142; 117, 143; 122, 144; 122, 145; 126, 146; 126, 147; 129, 148; 129, 149; 132, 150; 132, 151; 135, 152; 136, 153; 136, 154; 137, 155; 138, 156; 138, 157; 139, 158; 140, 159; 143, 160; 143, 161; 144, 162; 144, 163; 152, 164; 152, 165; 155, 166; 158, 167; 160, 168; 160, 169; 165, 170; 165, 171; 165, 172; 165, 173; 165, 174
async def lookup(source_id: str, schema_id: str): """ Create a new schema object from an existing ledger schema :param source_id: Institution's personal identification for the schema :param schema_id: Ledger schema ID for lookup Example: source_id = 'foobar123' name = 'Address Schema' version = '1.0' attrs = ['address', 'city', 'state'] payment_handle = 0 schema1 = await Schema.create(source_id, name, version, attrs, payment_handle) id1 = await schema.get_schema_id() data = await Schema.lookup(source_id, schema_id) assert data.attrs.sort() == ['sex', 'age', 'name', 'height'].sort() assert data.name == 'test-licence' assert data.handle > 0 :return: schema object """ try: schema = Schema(source_id, '', '', []) if not hasattr(Schema.lookup, "cb"): schema.logger.debug("vcx_schema_get_attributes: Creating callback") Schema.lookup.cb = create_cb(CFUNCTYPE(None, c_uint32, c_uint32, c_uint32, c_char_p)) c_source_id = c_char_p(source_id.encode('utf-8')) c_schema_id = c_char_p(schema_id.encode('utf-8')) handle, data = await do_call('vcx_schema_get_attributes', c_source_id, c_schema_id, Schema.lookup.cb) schema.logger.debug("created schema object") schema_result = json.loads(data.decode()) schema.attrs = schema_result['data'] schema.name = schema_result['name'] schema.version = schema_result['version'] schema.handle = handle return schema except KeyError: raise VcxError(ErrorCode.InvalidSchema)
0, module; 1, function_definition; 2, function_name:interleave_keys; 3, parameters; 4, block; 5, identifier:a; 6, identifier:b; 7, expression_statement; 8, function_definition; 9, return_statement; 10, comment:"""Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference. """; 11, function_name:interleave; 12, parameters; 13, block; 14, call; 15, identifier:args; 16, return_statement; 17, identifier:int; 18, argument_list; 19, call; 20, call; 21, keyword_argument; 22, attribute; 23, argument_list; 24, attribute; 25, argument_list; 26, identifier:base; 27, integer:2; 28, string; 29, identifier:join; 30, list_comprehension; 31, string; 32, identifier:join; 33, call; 34, identifier:x; 35, for_in_clause; 36, for_in_clause; 37, identifier:interleave; 38, generator_expression; 39, identifier:t; 40, call; 41, identifier:x; 42, identifier:t; 43, call; 44, for_in_clause; 45, identifier:zip; 46, argument_list; 47, identifier:format; 48, argument_list; 49, identifier:x; 50, tuple; 51, list_splat; 52, identifier:x; 53, string; 54, identifier:a; 55, identifier:b; 56, identifier:args; 57, string_content:016b
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 7, 10; 8, 11; 8, 12; 8, 13; 9, 14; 12, 15; 13, 16; 14, 17; 14, 18; 16, 19; 18, 20; 18, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 21, 27; 22, 28; 22, 29; 23, 30; 24, 31; 24, 32; 25, 33; 30, 34; 30, 35; 30, 36; 33, 37; 33, 38; 35, 39; 35, 40; 36, 41; 36, 42; 38, 43; 38, 44; 40, 45; 40, 46; 43, 47; 43, 48; 44, 49; 44, 50; 46, 51; 48, 52; 48, 53; 50, 54; 50, 55; 51, 56; 53, 57
def interleave_keys(a, b): """Interleave bits from two sort keys to form a joint sort key. Examples that are similar in both of the provided keys will have similar values for the key defined by this function. Useful for tasks with two text fields like machine translation or natural language inference. """ def interleave(args): return ''.join([x for t in zip(*args) for x in t]) return int(''.join(interleave(format(x, '016b') for x in (a, b))), base=2)
0, module; 1, function_definition; 2, function_name:pool; 3, parameters; 4, block; 5, identifier:data; 6, identifier:batch_size; 7, identifier:key; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, if_statement; 14, for_statement; 15, identifier:batch_size_fn; 16, lambda; 17, identifier:random_shuffler; 18, None; 19, identifier:shuffle; 20, False; 21, identifier:sort_within_batch; 22, False; 23, comment:"""Sort within buckets, then batch, then shuffle batches. Partitions data into chunks of size 100*batch_size, sorts examples within each chunk using sort_key, then batch these examples and shuffle the batches. """; 24, comparison_operator:random_shuffler is None; 25, block; 26, identifier:p; 27, call; 28, block; 29, lambda_parameters; 30, identifier:count; 31, identifier:random_shuffler; 32, None; 33, expression_statement; 34, identifier:batch; 35, argument_list; 36, expression_statement; 37, if_statement; 38, identifier:new; 39, identifier:count; 40, identifier:sofar; 41, assignment; 42, identifier:data; 43, binary_operator:batch_size * 100; 44, identifier:batch_size_fn; 45, assignment; 46, identifier:shuffle; 47, block; 48, else_clause; 49, identifier:random_shuffler; 50, attribute; 51, identifier:batch_size; 52, integer:100; 53, identifier:p_batch; 54, conditional_expression:batch(sorted(p, key=key), batch_size, batch_size_fn) \ if sort_within_batch \ else batch(p, batch_size, batch_size_fn); 55, for_statement; 56, block; 57, identifier:random; 58, identifier:shuffle; 59, call; 60, line_continuation:\; 61, identifier:sort_within_batch; 62, line_continuation:\; 63, call; 64, identifier:b; 65, call; 66, block; 67, for_statement; 68, identifier:batch; 69, argument_list; 70, identifier:batch; 71, argument_list; 72, identifier:random_shuffler; 73, argument_list; 74, expression_statement; 75, identifier:b; 76, call; 77, block; 78, call; 79, identifier:batch_size; 80, identifier:batch_size_fn; 81, identifier:p; 82, identifier:batch_size; 83, identifier:batch_size_fn; 84, call; 85, yield; 86, identifier:list; 87, argument_list; 88, expression_statement; 89, identifier:sorted; 90, argument_list; 91, identifier:list; 92, argument_list; 93, identifier:b; 94, identifier:p_batch; 95, yield; 96, identifier:p; 97, keyword_argument; 98, identifier:p_batch; 99, identifier:b; 100, identifier:key; 101, identifier:key
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 8, 15; 8, 16; 9, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 13, 24; 13, 25; 14, 26; 14, 27; 14, 28; 16, 29; 16, 30; 24, 31; 24, 32; 25, 33; 27, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 29, 40; 33, 41; 35, 42; 35, 43; 35, 44; 36, 45; 37, 46; 37, 47; 37, 48; 41, 49; 41, 50; 43, 51; 43, 52; 45, 53; 45, 54; 47, 55; 48, 56; 50, 57; 50, 58; 54, 59; 54, 60; 54, 61; 54, 62; 54, 63; 55, 64; 55, 65; 55, 66; 56, 67; 59, 68; 59, 69; 63, 70; 63, 71; 65, 72; 65, 73; 66, 74; 67, 75; 67, 76; 67, 77; 69, 78; 69, 79; 69, 80; 71, 81; 71, 82; 71, 83; 73, 84; 74, 85; 76, 86; 76, 87; 77, 88; 78, 89; 78, 90; 84, 91; 84, 92; 85, 93; 87, 94; 88, 95; 90, 96; 90, 97; 92, 98; 95, 99; 97, 100; 97, 101
def pool(data, batch_size, key, batch_size_fn=lambda new, count, sofar: count, random_shuffler=None, shuffle=False, sort_within_batch=False): """Sort within buckets, then batch, then shuffle batches. Partitions data into chunks of size 100*batch_size, sorts examples within each chunk using sort_key, then batch these examples and shuffle the batches. """ if random_shuffler is None: random_shuffler = random.shuffle for p in batch(data, batch_size * 100, batch_size_fn): p_batch = batch(sorted(p, key=key), batch_size, batch_size_fn) \ if sort_within_batch \ else batch(p, batch_size, batch_size_fn) if shuffle: for b in random_shuffler(list(p_batch)): yield b else: for b in list(p_batch): yield b
0, module; 1, function_definition; 2, function_name:data; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, if_statement; 8, return_statement; 9, comment:"""Return the examples in the dataset in order, sorted, or shuffled."""; 10, attribute; 11, block; 12, elif_clause; 13, else_clause; 14, identifier:xs; 15, identifier:self; 16, identifier:sort; 17, expression_statement; 18, attribute; 19, block; 20, block; 21, assignment; 22, identifier:self; 23, identifier:shuffle; 24, expression_statement; 25, expression_statement; 26, identifier:xs; 27, call; 28, assignment; 29, assignment; 30, identifier:sorted; 31, argument_list; 32, identifier:xs; 33, list_comprehension; 34, identifier:xs; 35, attribute; 36, attribute; 37, keyword_argument; 38, subscript; 39, for_in_clause; 40, identifier:self; 41, identifier:dataset; 42, identifier:self; 43, identifier:dataset; 44, identifier:key; 45, attribute; 46, attribute; 47, identifier:i; 48, identifier:i; 49, call; 50, identifier:self; 51, identifier:sort_key; 52, identifier:self; 53, identifier:dataset; 54, attribute; 55, argument_list; 56, identifier:self; 57, identifier:random_shuffler; 58, call; 59, identifier:range; 60, argument_list; 61, call; 62, identifier:len; 63, argument_list; 64, attribute; 65, identifier:self; 66, identifier:dataset
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 7, 11; 7, 12; 7, 13; 8, 14; 10, 15; 10, 16; 11, 17; 12, 18; 12, 19; 13, 20; 17, 21; 18, 22; 18, 23; 19, 24; 20, 25; 21, 26; 21, 27; 24, 28; 25, 29; 27, 30; 27, 31; 28, 32; 28, 33; 29, 34; 29, 35; 31, 36; 31, 37; 33, 38; 33, 39; 35, 40; 35, 41; 36, 42; 36, 43; 37, 44; 37, 45; 38, 46; 38, 47; 39, 48; 39, 49; 45, 50; 45, 51; 46, 52; 46, 53; 49, 54; 49, 55; 54, 56; 54, 57; 55, 58; 58, 59; 58, 60; 60, 61; 61, 62; 61, 63; 63, 64; 64, 65; 64, 66
def data(self): """Return the examples in the dataset in order, sorted, or shuffled.""" if self.sort: xs = sorted(self.dataset, key=self.sort_key) elif self.shuffle: xs = [self.dataset[i] for i in self.random_shuffler(range(len(self.dataset)))] else: xs = self.dataset return xs
0, module; 1, function_definition; 2, function_name:color_table; 3, parameters; 4, block; 5, identifier:color; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, if_statement; 13, function_definition; 14, expression_statement; 15, for_statement; 16, expression_statement; 17, if_statement; 18, return_statement; 19, identifier:N; 20, integer:1; 21, identifier:sort; 22, False; 23, identifier:sort_values; 24, False; 25, identifier:inline; 26, False; 27, identifier:as_html; 28, False; 29, comment:""" Generates a colour table Parameters: ----------- color : string | list | dict Color representation in rgba|rgb|hex If a list of colors is passed then these are displayed in a table N : int number of colours to generate When color is not a list then it generaes a range of N colors sort : bool if True then items are sorted sort_values : bool if True then items are sorted by color values. Only applies if color is a dictionary inline : bool if True it returns single line color blocks as_html : bool if True it returns the HTML code Example: color_table('#ff9933') color_table(cufflinks.cnames) color_table(['pink','salmon','yellow']) Note: This function only works in iPython Notebook """; 30, call; 31, block; 32, elif_clause; 33, else_clause; 34, function_name:_color; 35, parameters; 36, block; 37, assignment; 38, identifier:c; 39, identifier:rgb_tup; 40, block; 41, augmented_assignment; 42, identifier:as_html; 43, block; 44, call; 45, identifier:isinstance; 46, argument_list; 47, expression_statement; 48, expression_statement; 49, if_statement; 50, call; 51, block; 52, block; 53, identifier:c; 54, if_statement; 55, if_statement; 56, return_statement; 57, identifier:s; 58, conditional_expression:'<ul style="list-style-type: none;">' if not inline else ''; 59, if_statement; 60, if_statement; 61, identifier:s; 62, conditional_expression:'</ul>' if not inline else ''; 63, return_statement; 64, identifier:display; 65, argument_list; 66, identifier:color; 67, identifier:list; 68, assignment; 69, assignment; 70, identifier:sort; 71, block; 72, identifier:isinstance; 73, argument_list; 74, expression_statement; 75, expression_statement; 76, if_statement; 77, expression_statement; 78, expression_statement; 79, if_statement; 80, comparison_operator:hex_to_hsv(c)[2] < .5; 81, block; 82, else_clause; 83, comparison_operator:c == c_; 84, block; 85, else_clause; 86, expression_list; 87, string:'<ul style="list-style-type: none;">'; 88, not_operator; 89, string; 90, call; 91, block; 92, else_clause; 93, identifier:inline; 94, block; 95, else_clause; 96, string; 97, not_operator; 98, string; 99, identifier:s; 100, call; 101, identifier:c_; 102, string; 103, identifier:rgb_tup; 104, list_comprehension; 105, expression_statement; 106, identifier:color; 107, identifier:dict; 108, assignment; 109, assignment; 110, identifier:sort_values; 111, block; 112, elif_clause; 113, assignment; 114, assignment; 115, comparison_operator:N > 1; 116, block; 117, else_clause; 118, subscript; 119, float:.5; 120, expression_statement; 121, expression_statement; 122, block; 123, identifier:c; 124, identifier:c_; 125, expression_statement; 126, block; 127, identifier:color; 128, identifier:shadow; 129, identifier:border; 130, identifier:inline; 131, identifier:isinstance; 132, argument_list; 133, expression_statement; 134, expression_statement; 135, block; 136, expression_statement; 137, block; 138, string_content:</ul>; 139, identifier:inline; 140, identifier:HTML; 141, argument_list; 142, call; 143, for_in_clause; 144, call; 145, identifier:c_; 146, string; 147, identifier:items; 148, list_comprehension; 149, expression_statement; 150, identifier:sort; 151, block; 152, identifier:rgb_tup; 153, list_comprehension; 154, identifier:c_; 155, call; 156, identifier:N; 157, integer:1; 158, expression_statement; 159, block; 160, call; 161, integer:2; 162, assignment; 163, assignment; 164, expression_statement; 165, expression_statement; 166, assignment; 167, expression_statement; 168, identifier:c; 169, identifier:tuple; 170, assignment; 171, augmented_assignment; 172, expression_statement; 173, augmented_assignment; 174, expression_statement; 175, expression_statement; 176, identifier:s; 177, identifier:normalize; 178, argument_list; 179, identifier:c; 180, identifier:color; 181, attribute; 182, argument_list; 183, tuple; 184, for_in_clause; 185, assignment; 186, expression_statement; 187, tuple; 188, for_in_clause; 189, identifier:normalize; 190, argument_list; 191, assignment; 192, expression_statement; 193, identifier:hex_to_hsv; 194, argument_list; 195, identifier:color; 196, string:"#ffffff"; 197, identifier:shadow; 198, string; 199, assignment; 200, assignment; 201, identifier:border; 202, string:" border: 1px solid #ffffff;"; 203, assignment; 204, pattern_list; 205, identifier:c; 206, identifier:k; 207, string; 208, assignment; 209, identifier:s; 210, call; 211, assignment; 212, augmented_assignment; 213, identifier:c; 214, identifier:rgb_tup; 215, identifier:sort; 216, identifier:k; 217, call; 218, call; 219, pattern_list; 220, call; 221, identifier:items; 222, call; 223, assignment; 224, identifier:k; 225, identifier:v; 226, pattern_list; 227, identifier:items; 228, identifier:color; 229, identifier:rgb_tup; 230, subscript; 231, assignment; 232, identifier:c; 233, string_content:0 1px 0 #000; 234, identifier:color; 235, string:"#000000"; 236, identifier:shadow; 237, string; 238, identifier:border; 239, string; 240, identifier:k; 241, identifier:c; 242, string_content::; 243, identifier:k; 244, string; 245, attribute; 246, argument_list; 247, pattern_list; 248, call; 249, identifier:s; 250, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k + c.upper() + """</span> </li>"""; 251, identifier:normalize; 252, argument_list; 253, identifier:hex_to_hsv; 254, argument_list; 255, identifier:k; 256, identifier:v; 257, identifier:list; 258, argument_list; 259, identifier:sorted; 260, argument_list; 261, identifier:items; 262, call; 263, identifier:k; 264, identifier:v; 265, identifier:_; 266, call; 267, slice; 268, identifier:rgb_tup; 269, list; 270, string_content:0 1px 0 rgba(255,255,255,0.6); 271, string:'<div style="background-color:{0};height:20px;width:20px;display:inline-block;"></div>'; 272, identifier:format; 273, identifier:c; 274, identifier:color; 275, identifier:shadow; 276, identifier:border; 277, identifier:_color; 278, argument_list; 279, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k + c.upper(); 280, comment:"""</span> </li>"""; 281, identifier:v; 282, call; 283, call; 284, identifier:items; 285, keyword_argument; 286, identifier:sorted; 287, argument_list; 288, attribute; 289, argument_list; 290, unary_operator; 291, identifier:c_; 292, identifier:c; 293, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k; 294, call; 295, identifier:normalize; 296, argument_list; 297, attribute; 298, argument_list; 299, identifier:key; 300, call; 301, identifier:items; 302, keyword_argument; 303, identifier:np; 304, identifier:array; 305, call; 306, integer:1; 307, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">"""; 308, identifier:k; 309, attribute; 310, argument_list; 311, identifier:v; 312, identifier:color; 313, identifier:items; 314, attribute; 315, argument_list; 316, identifier:key; 317, call; 318, identifier:color_range; 319, argument_list; 320, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color; 321, comment:""";">"""; 322, identifier:c; 323, identifier:upper; 324, identifier:operator; 325, identifier:itemgetter; 326, integer:2; 327, attribute; 328, argument_list; 329, identifier:c_; 330, identifier:N; 331, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:"""; 332, identifier:color; 333, identifier:operator; 334, identifier:itemgetter; 335, integer:0; 336, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow; 337, comment:"""; color:"""; 338, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:"""; 339, identifier:shadow; 340, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c; 341, comment:""";"> <span style=" text-shadow:"""; 342, binary_operator:"""<li style="text-align:center;""" + border + """line-height:30px;background-color:"""; 343, identifier:c; 344, binary_operator:"""<li style="text-align:center;""" + border; 345, comment:"""line-height:30px;background-color:"""; 346, comment:"""<li style="text-align:center;"""; 347, identifier:border
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 6, 19; 6, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 9, 26; 10, 27; 10, 28; 11, 29; 12, 30; 12, 31; 12, 32; 12, 33; 13, 34; 13, 35; 13, 36; 14, 37; 15, 38; 15, 39; 15, 40; 16, 41; 17, 42; 17, 43; 18, 44; 30, 45; 30, 46; 31, 47; 31, 48; 31, 49; 32, 50; 32, 51; 33, 52; 35, 53; 36, 54; 36, 55; 36, 56; 37, 57; 37, 58; 40, 59; 40, 60; 41, 61; 41, 62; 43, 63; 44, 64; 44, 65; 46, 66; 46, 67; 47, 68; 48, 69; 49, 70; 49, 71; 50, 72; 50, 73; 51, 74; 51, 75; 51, 76; 51, 77; 52, 78; 52, 79; 54, 80; 54, 81; 54, 82; 55, 83; 55, 84; 55, 85; 56, 86; 58, 87; 58, 88; 58, 89; 59, 90; 59, 91; 59, 92; 60, 93; 60, 94; 60, 95; 62, 96; 62, 97; 62, 98; 63, 99; 65, 100; 68, 101; 68, 102; 69, 103; 69, 104; 71, 105; 73, 106; 73, 107; 74, 108; 75, 109; 76, 110; 76, 111; 76, 112; 77, 113; 78, 114; 79, 115; 79, 116; 79, 117; 80, 118; 80, 119; 81, 120; 81, 121; 82, 122; 83, 123; 83, 124; 84, 125; 85, 126; 86, 127; 86, 128; 86, 129; 88, 130; 90, 131; 90, 132; 91, 133; 91, 134; 92, 135; 94, 136; 95, 137; 96, 138; 97, 139; 100, 140; 100, 141; 104, 142; 104, 143; 105, 144; 108, 145; 108, 146; 109, 147; 109, 148; 111, 149; 112, 150; 112, 151; 113, 152; 113, 153; 114, 154; 114, 155; 115, 156; 115, 157; 116, 158; 117, 159; 118, 160; 118, 161; 120, 162; 121, 163; 122, 164; 122, 165; 125, 166; 126, 167; 132, 168; 132, 169; 133, 170; 134, 171; 135, 172; 136, 173; 137, 174; 137, 175; 141, 176; 142, 177; 142, 178; 143, 179; 143, 180; 144, 181; 144, 182; 148, 183; 148, 184; 149, 185; 151, 186; 153, 187; 153, 188; 155, 189; 155, 190; 158, 191; 159, 192; 160, 193; 160, 194; 162, 195; 162, 196; 163, 197; 163, 198; 164, 199; 165, 200; 166, 201; 166, 202; 167, 203; 170, 204; 170, 205; 171, 206; 171, 207; 172, 208; 173, 209; 173, 210; 174, 211; 175, 212; 178, 213; 181, 214; 181, 215; 183, 216; 183, 217; 183, 218; 184, 219; 184, 220; 185, 221; 185, 222; 186, 223; 187, 224; 187, 225; 188, 226; 188, 227; 190, 228; 191, 229; 191, 230; 192, 231; 194, 232; 198, 233; 199, 234; 199, 235; 200, 236; 200, 237; 203, 238; 203, 239; 204, 240; 204, 241; 207, 242; 208, 243; 208, 244; 210, 245; 210, 246; 211, 247; 211, 248; 212, 249; 212, 250; 217, 251; 217, 252; 218, 253; 218, 254; 219, 255; 219, 256; 220, 257; 220, 258; 222, 259; 222, 260; 223, 261; 223, 262; 226, 263; 226, 264; 226, 265; 230, 266; 230, 267; 231, 268; 231, 269; 237, 270; 245, 271; 245, 272; 246, 273; 247, 274; 247, 275; 247, 276; 248, 277; 248, 278; 250, 279; 250, 280; 252, 281; 254, 282; 258, 283; 260, 284; 260, 285; 262, 286; 262, 287; 266, 288; 266, 289; 267, 290; 269, 291; 278, 292; 279, 293; 279, 294; 282, 295; 282, 296; 283, 297; 283, 298; 285, 299; 285, 300; 287, 301; 287, 302; 288, 303; 288, 304; 289, 305; 290, 306; 293, 307; 293, 308; 294, 309; 294, 310; 296, 311; 297, 312; 297, 313; 300, 314; 300, 315; 302, 316; 302, 317; 305, 318; 305, 319; 307, 320; 307, 321; 309, 322; 309, 323; 314, 324; 314, 325; 315, 326; 317, 327; 317, 328; 319, 329; 319, 330; 320, 331; 320, 332; 327, 333; 327, 334; 328, 335; 331, 336; 331, 337; 336, 338; 336, 339; 338, 340; 338, 341; 340, 342; 340, 343; 342, 344; 342, 345; 344, 346; 344, 347
def color_table(color, N=1, sort=False, sort_values=False, inline=False, as_html=False): """ Generates a colour table Parameters: ----------- color : string | list | dict Color representation in rgba|rgb|hex If a list of colors is passed then these are displayed in a table N : int number of colours to generate When color is not a list then it generaes a range of N colors sort : bool if True then items are sorted sort_values : bool if True then items are sorted by color values. Only applies if color is a dictionary inline : bool if True it returns single line color blocks as_html : bool if True it returns the HTML code Example: color_table('#ff9933') color_table(cufflinks.cnames) color_table(['pink','salmon','yellow']) Note: This function only works in iPython Notebook """ if isinstance(color, list): c_ = '' rgb_tup = [normalize(c) for c in color] if sort: rgb_tup.sort() elif isinstance(color, dict): c_ = '' items = [(k, normalize(v), hex_to_hsv(normalize(v))) for k, v in list(color.items())] if sort_values: items = sorted(items, key=operator.itemgetter(2)) elif sort: items = sorted(items, key=operator.itemgetter(0)) rgb_tup = [(k, v) for k, v, _ in items] else: c_ = normalize(color) if N > 1: rgb_tup = np.array(color_range(c_, N))[::-1] else: rgb_tup = [c_] def _color(c): if hex_to_hsv(c)[2] < .5: color = "#ffffff" shadow = '0 1px 0 #000' else: color = "#000000" shadow = '0 1px 0 rgba(255,255,255,0.6)' if c == c_: border = " border: 1px solid #ffffff;" else: border = '' return color, shadow, border s = '<ul style="list-style-type: none;">' if not inline else '' for c in rgb_tup: if isinstance(c, tuple): k, c = c k += ' : ' else: k = '' if inline: s += '<div style="background-color:{0};height:20px;width:20px;display:inline-block;"></div>'.format( c) else: color, shadow, border = _color(c) s += """<li style="text-align:center;""" + border + """line-height:30px;background-color:""" + c + """;"> <span style=" text-shadow:""" + shadow + """; color:""" + color + """;">""" + k + c.upper() + """</span> </li>""" s += '</ul>' if not inline else '' if as_html: return s return display(HTML(s))
0, module; 1, function_definition; 2, function_name:get_nearest_edge; 3, parameters; 4, block; 5, identifier:G; 6, identifier:point; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, return_statement; 17, comment:""" Return the nearest edge to a pair of coordinates. Pass in a graph and a tuple with the coordinates. We first get all the edges in the graph. Secondly we compute the euclidean distance from the coordinates to the segments determined by each edge. The last step is to sort the edge segments in ascending order based on the distance from the coordinates to the edge. In the end, the first element in the list of edges will be the closest edge that we will return as a tuple containing the shapely geometry and the u, v nodes. Parameters ---------- G : networkx multidigraph point : tuple The (lat, lng) or (y, x) point for which we will find the nearest edge in the graph Returns ------- closest_edge_to_point : tuple (shapely.geometry, u, v) A geometry object representing the segment and the coordinates of the two nodes that determine the edge section, u and v, the OSM ids of the nodes. """; 18, assignment; 19, assignment; 20, assignment; 21, assignment; 22, assignment; 23, assignment; 24, assignment; 25, call; 26, expression_list; 27, identifier:start_time; 28, call; 29, identifier:gdf; 30, call; 31, identifier:graph_edges; 32, call; 33, identifier:edges_with_distances; 34, list_comprehension; 35, identifier:edges_with_distances; 36, call; 37, identifier:closest_edge_to_point; 38, subscript; 39, pattern_list; 40, identifier:closest_edge_to_point; 41, identifier:log; 42, argument_list; 43, identifier:geometry; 44, identifier:u; 45, identifier:v; 46, attribute; 47, argument_list; 48, identifier:graph_to_gdfs; 49, argument_list; 50, attribute; 51, argument_list; 52, tuple; 53, for_in_clause; 54, identifier:sorted; 55, argument_list; 56, subscript; 57, integer:0; 58, identifier:geometry; 59, identifier:u; 60, identifier:v; 61, call; 62, identifier:time; 63, identifier:time; 64, identifier:G; 65, keyword_argument; 66, keyword_argument; 67, attribute; 68, identifier:tolist; 69, identifier:graph_edge; 70, call; 71, identifier:graph_edge; 72, identifier:graph_edges; 73, identifier:edges_with_distances; 74, keyword_argument; 75, identifier:edges_with_distances; 76, integer:0; 77, attribute; 78, argument_list; 79, identifier:nodes; 80, False; 81, identifier:fill_edge_geometry; 82, True; 83, subscript; 84, identifier:values; 85, attribute; 86, argument_list; 87, identifier:key; 88, lambda; 89, string; 90, identifier:format; 91, tuple; 92, identifier:point; 93, binary_operator:time.time() - start_time; 94, identifier:gdf; 95, list; 96, call; 97, identifier:distance; 98, subscript; 99, lambda_parameters; 100, subscript; 101, string_content:Found nearest edge ({}) to point {} in {:,.2f} seconds; 102, identifier:u; 103, identifier:v; 104, call; 105, identifier:start_time; 106, string:"geometry"; 107, string:"u"; 108, string:"v"; 109, identifier:Point; 110, argument_list; 111, identifier:graph_edge; 112, integer:0; 113, identifier:x; 114, identifier:x; 115, integer:1; 116, attribute; 117, argument_list; 118, call; 119, identifier:time; 120, identifier:time; 121, identifier:tuple; 122, argument_list; 123, call; 124, identifier:reversed; 125, argument_list; 126, identifier:point
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 7, 17; 8, 18; 9, 19; 10, 20; 11, 21; 12, 22; 13, 23; 14, 24; 15, 25; 16, 26; 18, 27; 18, 28; 19, 29; 19, 30; 20, 31; 20, 32; 21, 33; 21, 34; 22, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 25, 41; 25, 42; 26, 43; 26, 44; 26, 45; 28, 46; 28, 47; 30, 48; 30, 49; 32, 50; 32, 51; 34, 52; 34, 53; 36, 54; 36, 55; 38, 56; 38, 57; 39, 58; 39, 59; 39, 60; 42, 61; 46, 62; 46, 63; 49, 64; 49, 65; 49, 66; 50, 67; 50, 68; 52, 69; 52, 70; 53, 71; 53, 72; 55, 73; 55, 74; 56, 75; 56, 76; 61, 77; 61, 78; 65, 79; 65, 80; 66, 81; 66, 82; 67, 83; 67, 84; 70, 85; 70, 86; 74, 87; 74, 88; 77, 89; 77, 90; 78, 91; 78, 92; 78, 93; 83, 94; 83, 95; 85, 96; 85, 97; 86, 98; 88, 99; 88, 100; 89, 101; 91, 102; 91, 103; 93, 104; 93, 105; 95, 106; 95, 107; 95, 108; 96, 109; 96, 110; 98, 111; 98, 112; 99, 113; 100, 114; 100, 115; 104, 116; 104, 117; 110, 118; 116, 119; 116, 120; 118, 121; 118, 122; 122, 123; 123, 124; 123, 125; 125, 126
def get_nearest_edge(G, point): """ Return the nearest edge to a pair of coordinates. Pass in a graph and a tuple with the coordinates. We first get all the edges in the graph. Secondly we compute the euclidean distance from the coordinates to the segments determined by each edge. The last step is to sort the edge segments in ascending order based on the distance from the coordinates to the edge. In the end, the first element in the list of edges will be the closest edge that we will return as a tuple containing the shapely geometry and the u, v nodes. Parameters ---------- G : networkx multidigraph point : tuple The (lat, lng) or (y, x) point for which we will find the nearest edge in the graph Returns ------- closest_edge_to_point : tuple (shapely.geometry, u, v) A geometry object representing the segment and the coordinates of the two nodes that determine the edge section, u and v, the OSM ids of the nodes. """ start_time = time.time() gdf = graph_to_gdfs(G, nodes=False, fill_edge_geometry=True) graph_edges = gdf[["geometry", "u", "v"]].values.tolist() edges_with_distances = [ ( graph_edge, Point(tuple(reversed(point))).distance(graph_edge[0]) ) for graph_edge in graph_edges ] edges_with_distances = sorted(edges_with_distances, key=lambda x: x[1]) closest_edge_to_point = edges_with_distances[0][0] geometry, u, v = closest_edge_to_point log('Found nearest edge ({}) to point {} in {:,.2f} seconds'.format((u, v), point, time.time() - start_time)) return geometry, u, v
0, module; 1, function_definition; 2, function_name:get_http_headers; 3, parameters; 4, block; 5, default_parameter; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, if_statement; 11, if_statement; 12, expression_statement; 13, expression_statement; 14, return_statement; 15, identifier:user_agent; 16, None; 17, identifier:referer; 18, None; 19, identifier:accept_language; 20, None; 21, comment:""" Update the default requests HTTP headers with OSMnx info. Parameters ---------- user_agent : str the user agent string, if None will set with OSMnx default referer : str the referer string, if None will set with OSMnx default accept_language : str make accept-language explicit e.g. for consistent nominatim result sorting Returns ------- headers : dict """; 22, comparison_operator:user_agent is None; 23, block; 24, comparison_operator:referer is None; 25, block; 26, comparison_operator:accept_language is None; 27, block; 28, assignment; 29, call; 30, identifier:headers; 31, identifier:user_agent; 32, None; 33, expression_statement; 34, identifier:referer; 35, None; 36, expression_statement; 37, identifier:accept_language; 38, None; 39, expression_statement; 40, identifier:headers; 41, call; 42, attribute; 43, argument_list; 44, assignment; 45, assignment; 46, assignment; 47, attribute; 48, argument_list; 49, identifier:headers; 50, identifier:update; 51, dictionary; 52, identifier:user_agent; 53, attribute; 54, identifier:referer; 55, attribute; 56, identifier:accept_language; 57, attribute; 58, attribute; 59, identifier:default_headers; 60, pair; 61, pair; 62, pair; 63, identifier:settings; 64, identifier:default_user_agent; 65, identifier:settings; 66, identifier:default_referer; 67, identifier:settings; 68, identifier:default_accept_language; 69, identifier:requests; 70, identifier:utils; 71, string; 72, identifier:user_agent; 73, string; 74, identifier:referer; 75, string; 76, identifier:accept_language; 77, string_content:User-Agent; 78, string_content:referer; 79, string_content:Accept-Language
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 5, 15; 5, 16; 6, 17; 6, 18; 7, 19; 7, 20; 8, 21; 9, 22; 9, 23; 10, 24; 10, 25; 11, 26; 11, 27; 12, 28; 13, 29; 14, 30; 22, 31; 22, 32; 23, 33; 24, 34; 24, 35; 25, 36; 26, 37; 26, 38; 27, 39; 28, 40; 28, 41; 29, 42; 29, 43; 33, 44; 36, 45; 39, 46; 41, 47; 41, 48; 42, 49; 42, 50; 43, 51; 44, 52; 44, 53; 45, 54; 45, 55; 46, 56; 46, 57; 47, 58; 47, 59; 51, 60; 51, 61; 51, 62; 53, 63; 53, 64; 55, 65; 55, 66; 57, 67; 57, 68; 58, 69; 58, 70; 60, 71; 60, 72; 61, 73; 61, 74; 62, 75; 62, 76; 71, 77; 73, 78; 75, 79
def get_http_headers(user_agent=None, referer=None, accept_language=None): """ Update the default requests HTTP headers with OSMnx info. Parameters ---------- user_agent : str the user agent string, if None will set with OSMnx default referer : str the referer string, if None will set with OSMnx default accept_language : str make accept-language explicit e.g. for consistent nominatim result sorting Returns ------- headers : dict """ if user_agent is None: user_agent = settings.default_user_agent if referer is None: referer = settings.default_referer if accept_language is None: accept_language = settings.default_accept_language headers = requests.utils.default_headers() headers.update({'User-Agent': user_agent, 'referer': referer, 'Accept-Language': accept_language}) return headers
0, module; 1, function_definition; 2, function_name:_has_sorted_sa_indices; 3, parameters; 4, block; 5, identifier:s_indices; 6, identifier:a_indices; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:""" Check whether `s_indices` and `a_indices` are sorted in lexicographic order. Parameters ---------- s_indices, a_indices : ndarray(ndim=1) Returns ------- bool Whether `s_indices` and `a_indices` are sorted. """; 12, assignment; 13, identifier:i; 14, call; 15, block; 16, True; 17, identifier:L; 18, call; 19, identifier:range; 20, argument_list; 21, if_statement; 22, if_statement; 23, identifier:len; 24, argument_list; 25, binary_operator:L-1; 26, comparison_operator:s_indices[i] > s_indices[i+1]; 27, block; 28, comparison_operator:s_indices[i] == s_indices[i+1]; 29, block; 30, identifier:s_indices; 31, identifier:L; 32, integer:1; 33, subscript; 34, subscript; 35, return_statement; 36, subscript; 37, subscript; 38, if_statement; 39, identifier:s_indices; 40, identifier:i; 41, identifier:s_indices; 42, binary_operator:i+1; 43, False; 44, identifier:s_indices; 45, identifier:i; 46, identifier:s_indices; 47, binary_operator:i+1; 48, comparison_operator:a_indices[i] >= a_indices[i+1]; 49, block; 50, identifier:i; 51, integer:1; 52, identifier:i; 53, integer:1; 54, subscript; 55, subscript; 56, return_statement; 57, identifier:a_indices; 58, identifier:i; 59, identifier:a_indices; 60, binary_operator:i+1; 61, False; 62, identifier:i; 63, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 14, 19; 14, 20; 15, 21; 15, 22; 18, 23; 18, 24; 20, 25; 21, 26; 21, 27; 22, 28; 22, 29; 24, 30; 25, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 29, 38; 33, 39; 33, 40; 34, 41; 34, 42; 35, 43; 36, 44; 36, 45; 37, 46; 37, 47; 38, 48; 38, 49; 42, 50; 42, 51; 47, 52; 47, 53; 48, 54; 48, 55; 49, 56; 54, 57; 54, 58; 55, 59; 55, 60; 56, 61; 60, 62; 60, 63
def _has_sorted_sa_indices(s_indices, a_indices): """ Check whether `s_indices` and `a_indices` are sorted in lexicographic order. Parameters ---------- s_indices, a_indices : ndarray(ndim=1) Returns ------- bool Whether `s_indices` and `a_indices` are sorted. """ L = len(s_indices) for i in range(L-1): if s_indices[i] > s_indices[i+1]: return False if s_indices[i] == s_indices[i+1]: if a_indices[i] >= a_indices[i+1]: return False return True
0, module; 1, function_definition; 2, function_name:_generate_a_indptr; 3, parameters; 4, block; 5, identifier:num_states; 6, identifier:s_indices; 7, identifier:out; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, for_statement; 12, expression_statement; 13, comment:""" Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be in sorted order. Parameters ---------- num_states : scalar(int) s_indices : ndarray(int, ndim=1) out : ndarray(int, ndim=1) Length must be num_states+1. """; 14, assignment; 15, assignment; 16, identifier:s; 17, call; 18, block; 19, assignment; 20, identifier:idx; 21, integer:0; 22, subscript; 23, integer:0; 24, identifier:range; 25, argument_list; 26, while_statement; 27, expression_statement; 28, subscript; 29, call; 30, identifier:out; 31, integer:0; 32, binary_operator:num_states-1; 33, parenthesized_expression; 34, block; 35, assignment; 36, identifier:out; 37, identifier:num_states; 38, identifier:len; 39, argument_list; 40, identifier:num_states; 41, integer:1; 42, comparison_operator:s_indices[idx] == s; 43, expression_statement; 44, subscript; 45, identifier:idx; 46, identifier:s_indices; 47, subscript; 48, identifier:s; 49, augmented_assignment; 50, identifier:out; 51, binary_operator:s+1; 52, identifier:s_indices; 53, identifier:idx; 54, identifier:idx; 55, integer:1; 56, identifier:s; 57, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 9, 14; 10, 15; 11, 16; 11, 17; 11, 18; 12, 19; 14, 20; 14, 21; 15, 22; 15, 23; 17, 24; 17, 25; 18, 26; 18, 27; 19, 28; 19, 29; 22, 30; 22, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 32, 40; 32, 41; 33, 42; 34, 43; 35, 44; 35, 45; 39, 46; 42, 47; 42, 48; 43, 49; 44, 50; 44, 51; 47, 52; 47, 53; 49, 54; 49, 55; 51, 56; 51, 57
def _generate_a_indptr(num_states, s_indices, out): """ Generate `a_indptr`; stored in `out`. `s_indices` is assumed to be in sorted order. Parameters ---------- num_states : scalar(int) s_indices : ndarray(int, ndim=1) out : ndarray(int, ndim=1) Length must be num_states+1. """ idx = 0 out[0] = 0 for s in range(num_states-1): while(s_indices[idx] == s): idx += 1 out[s+1] = idx out[num_states] = len(s_indices)
0, module; 1, function_definition; 2, function_name:sort_topologically; 3, parameters; 4, block; 5, identifier:dag; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, while_statement; 11, if_statement; 12, return_statement; 13, comment:"""Sort the dag breath first topologically. Only the nodes inside the dag are returned, i.e. the nodes that are also keys. Returns: a topological ordering of the DAG. Raises: an error if this is not possible (graph is not valid). """; 14, assignment; 15, assignment; 16, assignment; 17, identifier:independent_nodes; 18, block; 19, comparison_operator:len(sorted_nodes) != len(dag.keys()); 20, block; 21, identifier:sorted_nodes; 22, identifier:dag; 23, call; 24, identifier:sorted_nodes; 25, list; 26, identifier:independent_nodes; 27, call; 28, expression_statement; 29, expression_statement; 30, comment:# this alters the dag so that we are sure we are visiting the nodes only once; 31, expression_statement; 32, while_statement; 33, call; 34, call; 35, raise_statement; 36, attribute; 37, argument_list; 38, identifier:deque; 39, argument_list; 40, assignment; 41, call; 42, assignment; 43, identifier:downstream_nodes; 44, block; 45, identifier:len; 46, argument_list; 47, identifier:len; 48, argument_list; 49, call; 50, identifier:copy; 51, identifier:deepcopy; 52, identifier:dag; 53, call; 54, identifier:node; 55, call; 56, attribute; 57, argument_list; 58, identifier:downstream_nodes; 59, subscript; 60, expression_statement; 61, if_statement; 62, if_statement; 63, identifier:sorted_nodes; 64, call; 65, identifier:ValueError; 66, argument_list; 67, identifier:get_independent_nodes; 68, argument_list; 69, attribute; 70, argument_list; 71, identifier:sorted_nodes; 72, identifier:append; 73, identifier:node; 74, identifier:dag; 75, identifier:node; 76, assignment; 77, comparison_operator:downstream_node not in dag; 78, block; 79, not_operator; 80, block; 81, attribute; 82, argument_list; 83, string; 84, identifier:dag; 85, identifier:independent_nodes; 86, identifier:popleft; 87, identifier:downstream_node; 88, call; 89, identifier:downstream_node; 90, identifier:dag; 91, continue_statement; 92, call; 93, expression_statement; 94, identifier:dag; 95, identifier:keys; 96, string_content:graph is not acyclic; 97, attribute; 98, argument_list; 99, identifier:has_dependencies; 100, argument_list; 101, call; 102, identifier:downstream_nodes; 103, identifier:pop; 104, integer:0; 105, identifier:downstream_node; 106, identifier:dag; 107, attribute; 108, argument_list; 109, identifier:independent_nodes; 110, identifier:append; 111, identifier:downstream_node
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 7, 14; 8, 15; 9, 16; 10, 17; 10, 18; 11, 19; 11, 20; 12, 21; 14, 22; 14, 23; 15, 24; 15, 25; 16, 26; 16, 27; 18, 28; 18, 29; 18, 30; 18, 31; 18, 32; 19, 33; 19, 34; 20, 35; 23, 36; 23, 37; 27, 38; 27, 39; 28, 40; 29, 41; 31, 42; 32, 43; 32, 44; 33, 45; 33, 46; 34, 47; 34, 48; 35, 49; 36, 50; 36, 51; 37, 52; 39, 53; 40, 54; 40, 55; 41, 56; 41, 57; 42, 58; 42, 59; 44, 60; 44, 61; 44, 62; 46, 63; 48, 64; 49, 65; 49, 66; 53, 67; 53, 68; 55, 69; 55, 70; 56, 71; 56, 72; 57, 73; 59, 74; 59, 75; 60, 76; 61, 77; 61, 78; 62, 79; 62, 80; 64, 81; 64, 82; 66, 83; 68, 84; 69, 85; 69, 86; 76, 87; 76, 88; 77, 89; 77, 90; 78, 91; 79, 92; 80, 93; 81, 94; 81, 95; 83, 96; 88, 97; 88, 98; 92, 99; 92, 100; 93, 101; 97, 102; 97, 103; 98, 104; 100, 105; 100, 106; 101, 107; 101, 108; 107, 109; 107, 110; 108, 111
def sort_topologically(dag): """Sort the dag breath first topologically. Only the nodes inside the dag are returned, i.e. the nodes that are also keys. Returns: a topological ordering of the DAG. Raises: an error if this is not possible (graph is not valid). """ dag = copy.deepcopy(dag) sorted_nodes = [] independent_nodes = deque(get_independent_nodes(dag)) while independent_nodes: node = independent_nodes.popleft() sorted_nodes.append(node) # this alters the dag so that we are sure we are visiting the nodes only once downstream_nodes = dag[node] while downstream_nodes: downstream_node = downstream_nodes.pop(0) if downstream_node not in dag: continue if not has_dependencies(downstream_node, dag): independent_nodes.append(downstream_node) if len(sorted_nodes) != len(dag.keys()): raise ValueError('graph is not acyclic') return sorted_nodes
0, module; 1, function_definition; 2, function_name:set_topological_dag_upstreams; 3, parameters; 4, block; 5, identifier:dag; 6, identifier:ops; 7, identifier:op_runs; 8, identifier:runs_by_ops; 9, expression_statement; 10, expression_statement; 11, for_statement; 12, comment:"""Set the upstream runs for the operation runs in the dag following the topological sort."""; 13, assignment; 14, identifier:op_id; 15, identifier:sorted_ops; 16, block; 17, identifier:sorted_ops; 18, call; 19, expression_statement; 20, expression_statement; 21, expression_statement; 22, attribute; 23, argument_list; 24, assignment; 25, assignment; 26, call; 27, identifier:dags; 28, identifier:sort_topologically; 29, keyword_argument; 30, identifier:op_run_id; 31, subscript; 32, identifier:op_run; 33, subscript; 34, identifier:set_op_upstreams; 35, argument_list; 36, identifier:dag; 37, identifier:dag; 38, identifier:runs_by_ops; 39, identifier:op_id; 40, identifier:op_runs; 41, identifier:op_run_id; 42, keyword_argument; 43, keyword_argument; 44, identifier:op_run; 45, identifier:op_run; 46, identifier:op; 47, subscript; 48, identifier:ops; 49, identifier:op_id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 9, 12; 10, 13; 11, 14; 11, 15; 11, 16; 13, 17; 13, 18; 16, 19; 16, 20; 16, 21; 18, 22; 18, 23; 19, 24; 20, 25; 21, 26; 22, 27; 22, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33; 26, 34; 26, 35; 29, 36; 29, 37; 31, 38; 31, 39; 33, 40; 33, 41; 35, 42; 35, 43; 42, 44; 42, 45; 43, 46; 43, 47; 47, 48; 47, 49
def set_topological_dag_upstreams(dag, ops, op_runs, runs_by_ops): """Set the upstream runs for the operation runs in the dag following the topological sort.""" sorted_ops = dags.sort_topologically(dag=dag) for op_id in sorted_ops: op_run_id = runs_by_ops[op_id] op_run = op_runs[op_run_id] set_op_upstreams(op_run=op_run, op=ops[op_id])
0, module; 1, function_definition; 2, function_name:generate_from_text; 3, parameters; 4, block; 5, identifier:self; 6, identifier:text; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""Generate wordcloud from text. The input "text" is expected to be a natural text. If you pass a sorted list of words, words will appear in your output twice. To remove this duplication, set ``collocations=False``. Calls process_text and generate_from_frequencies. ..versionchanged:: 1.2.2 Argument of generate_from_frequencies() is not return of process_text() any more. Returns ------- self """; 12, assignment; 13, call; 14, identifier:self; 15, identifier:words; 16, call; 17, attribute; 18, argument_list; 19, attribute; 20, argument_list; 21, identifier:self; 22, identifier:generate_from_frequencies; 23, identifier:words; 24, identifier:self; 25, identifier:process_text; 26, identifier:text
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 16, 19; 16, 20; 17, 21; 17, 22; 18, 23; 19, 24; 19, 25; 20, 26
def generate_from_text(self, text): """Generate wordcloud from text. The input "text" is expected to be a natural text. If you pass a sorted list of words, words will appear in your output twice. To remove this duplication, set ``collocations=False``. Calls process_text and generate_from_frequencies. ..versionchanged:: 1.2.2 Argument of generate_from_frequencies() is not return of process_text() any more. Returns ------- self """ words = self.process_text(text) self.generate_from_frequencies(words) return self
0, module; 1, function_definition; 2, function_name:_update_pods_metrics; 3, parameters; 4, block; 5, identifier:self; 6, identifier:instance; 7, identifier:pods; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, expression_statement; 12, for_statement; 13, comment:""" Reports the number of running pods on this node, tagged by service and creator We go though all the pods, extract tags then count them by tag list, sorted and serialized in a pipe-separated string (it is an illegar character for tags) """; 14, assignment; 15, identifier:pod; 16, subscript; 17, block; 18, assignment; 19, pattern_list; 20, call; 21, block; 22, identifier:tags_map; 23, call; 24, identifier:pods; 25, string; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, if_statement; 30, if_statement; 31, expression_statement; 32, identifier:commmon_tags; 33, call; 34, identifier:pod_tags; 35, identifier:pod_count; 36, attribute; 37, argument_list; 38, expression_statement; 39, expression_statement; 40, expression_statement; 41, identifier:defaultdict; 42, argument_list; 43, string_content:items; 44, assignment; 45, assignment; 46, assignment; 47, call; 48, block; 49, comparison_operator:'namespace' in pod_meta; 50, block; 51, augmented_assignment; 52, attribute; 53, argument_list; 54, identifier:tags_map; 55, identifier:iteritems; 56, assignment; 57, call; 58, call; 59, identifier:int; 60, identifier:pod_meta; 61, call; 62, identifier:pod_tags; 63, call; 64, identifier:services; 65, call; 66, identifier:isinstance; 67, argument_list; 68, for_statement; 69, string; 70, identifier:pod_meta; 71, expression_statement; 72, subscript; 73, integer:1; 74, identifier:instance; 75, identifier:get; 76, string; 77, list; 78, identifier:tags; 79, call; 80, attribute; 81, argument_list; 82, attribute; 83, argument_list; 84, attribute; 85, argument_list; 86, attribute; 87, argument_list; 88, attribute; 89, argument_list; 90, identifier:services; 91, identifier:list; 92, identifier:service; 93, identifier:services; 94, block; 95, string_content:namespace; 96, call; 97, identifier:tags_map; 98, call; 99, string_content:tags; 100, identifier:list; 101, argument_list; 102, identifier:tags; 103, identifier:extend; 104, identifier:commmon_tags; 105, identifier:self; 106, identifier:publish_gauge; 107, identifier:self; 108, binary_operator:NAMESPACE + '.pods.running'; 109, identifier:pod_count; 110, identifier:tags; 111, identifier:pod; 112, identifier:get; 113, string; 114, dictionary; 115, attribute; 116, identifier:get_pod_creator_tags; 117, identifier:pod_meta; 118, keyword_argument; 119, attribute; 120, identifier:match_services_for_pod; 121, identifier:pod_meta; 122, expression_statement; 123, attribute; 124, argument_list; 125, identifier:frozenset; 126, argument_list; 127, identifier:pod_tags; 128, identifier:NAMESPACE; 129, string; 130, string_content:metadata; 131, identifier:self; 132, identifier:kubeutil; 133, identifier:legacy_rep_controller_tag; 134, True; 135, identifier:self; 136, identifier:kubeutil; 137, call; 138, identifier:pod_tags; 139, identifier:append; 140, binary_operator:'kube_namespace:%s' % pod_meta['namespace']; 141, identifier:pod_tags; 142, string_content:.pods.running; 143, attribute; 144, argument_list; 145, string; 146, subscript; 147, identifier:pod_tags; 148, identifier:append; 149, binary_operator:'kube_service:%s' % service; 150, string_content:kube_namespace:%s; 151, identifier:pod_meta; 152, string; 153, string; 154, identifier:service; 155, string_content:namespace; 156, string_content:kube_service:%s
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 9, 14; 10, 15; 10, 16; 10, 17; 11, 18; 12, 19; 12, 20; 12, 21; 14, 22; 14, 23; 16, 24; 16, 25; 17, 26; 17, 27; 17, 28; 17, 29; 17, 30; 17, 31; 18, 32; 18, 33; 19, 34; 19, 35; 20, 36; 20, 37; 21, 38; 21, 39; 21, 40; 23, 41; 23, 42; 25, 43; 26, 44; 27, 45; 28, 46; 29, 47; 29, 48; 30, 49; 30, 50; 31, 51; 33, 52; 33, 53; 36, 54; 36, 55; 38, 56; 39, 57; 40, 58; 42, 59; 44, 60; 44, 61; 45, 62; 45, 63; 46, 64; 46, 65; 47, 66; 47, 67; 48, 68; 49, 69; 49, 70; 50, 71; 51, 72; 51, 73; 52, 74; 52, 75; 53, 76; 53, 77; 56, 78; 56, 79; 57, 80; 57, 81; 58, 82; 58, 83; 61, 84; 61, 85; 63, 86; 63, 87; 65, 88; 65, 89; 67, 90; 67, 91; 68, 92; 68, 93; 68, 94; 69, 95; 71, 96; 72, 97; 72, 98; 76, 99; 79, 100; 79, 101; 80, 102; 80, 103; 81, 104; 82, 105; 82, 106; 83, 107; 83, 108; 83, 109; 83, 110; 84, 111; 84, 112; 85, 113; 85, 114; 86, 115; 86, 116; 87, 117; 87, 118; 88, 119; 88, 120; 89, 121; 94, 122; 96, 123; 96, 124; 98, 125; 98, 126; 101, 127; 108, 128; 108, 129; 113, 130; 115, 131; 115, 132; 118, 133; 118, 134; 119, 135; 119, 136; 122, 137; 123, 138; 123, 139; 124, 140; 126, 141; 129, 142; 137, 143; 137, 144; 140, 145; 140, 146; 143, 147; 143, 148; 144, 149; 145, 150; 146, 151; 146, 152; 149, 153; 149, 154; 152, 155; 153, 156
def _update_pods_metrics(self, instance, pods): """ Reports the number of running pods on this node, tagged by service and creator We go though all the pods, extract tags then count them by tag list, sorted and serialized in a pipe-separated string (it is an illegar character for tags) """ tags_map = defaultdict(int) for pod in pods['items']: pod_meta = pod.get('metadata', {}) pod_tags = self.kubeutil.get_pod_creator_tags(pod_meta, legacy_rep_controller_tag=True) services = self.kubeutil.match_services_for_pod(pod_meta) if isinstance(services, list): for service in services: pod_tags.append('kube_service:%s' % service) if 'namespace' in pod_meta: pod_tags.append('kube_namespace:%s' % pod_meta['namespace']) tags_map[frozenset(pod_tags)] += 1 commmon_tags = instance.get('tags', []) for pod_tags, pod_count in tags_map.iteritems(): tags = list(pod_tags) tags.extend(commmon_tags) self.publish_gauge(self, NAMESPACE + '.pods.running', pod_count, tags)
0, module; 1, function_definition; 2, function_name:get_agent_tags; 3, parameters; 4, block; 5, identifier:since; 6, identifier:to; 7, expression_statement; 8, expression_statement; 9, comment:# default value for `to` is the latest tag; 10, if_statement; 11, expression_statement; 12, comment:# filter out versions according to the interval [since, to]; 13, expression_statement; 14, comment:# reverse so we have descendant order; 15, return_statement; 16, comment:""" Return a list of tags from integrations-core representing an Agent release, sorted by more recent first. """; 17, assignment; 18, identifier:to; 19, block; 20, else_clause; 21, assignment; 22, assignment; 23, list_comprehension; 24, identifier:agent_tags; 25, call; 26, expression_statement; 27, block; 28, identifier:since; 29, call; 30, identifier:agent_tags; 31, list_comprehension; 32, call; 33, for_in_clause; 34, identifier:sorted; 35, generator_expression; 36, assignment; 37, expression_statement; 38, identifier:parse_version_info; 39, argument_list; 40, identifier:t; 41, for_in_clause; 42, if_clause; 43, identifier:str; 44, argument_list; 45, identifier:t; 46, call; 47, call; 48, for_in_clause; 49, identifier:to; 50, call; 51, assignment; 52, identifier:since; 53, identifier:t; 54, identifier:agent_tags; 55, comparison_operator:since <= t <= to; 56, identifier:t; 57, identifier:reversed; 58, argument_list; 59, identifier:parse_version_info; 60, argument_list; 61, identifier:t; 62, call; 63, identifier:parse_version_info; 64, argument_list; 65, identifier:to; 66, subscript; 67, identifier:since; 68, identifier:t; 69, identifier:to; 70, identifier:agent_tags; 71, identifier:t; 72, identifier:git_tag_list; 73, argument_list; 74, identifier:to; 75, identifier:agent_tags; 76, unary_operator; 77, string; 78, integer:1; 79, string_content:^\d+\.\d+\.\d+$
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 8, 17; 10, 18; 10, 19; 10, 20; 11, 21; 13, 22; 15, 23; 17, 24; 17, 25; 19, 26; 20, 27; 21, 28; 21, 29; 22, 30; 22, 31; 23, 32; 23, 33; 25, 34; 25, 35; 26, 36; 27, 37; 29, 38; 29, 39; 31, 40; 31, 41; 31, 42; 32, 43; 32, 44; 33, 45; 33, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 39, 52; 41, 53; 41, 54; 42, 55; 44, 56; 46, 57; 46, 58; 47, 59; 47, 60; 48, 61; 48, 62; 50, 63; 50, 64; 51, 65; 51, 66; 55, 67; 55, 68; 55, 69; 58, 70; 60, 71; 62, 72; 62, 73; 64, 74; 66, 75; 66, 76; 73, 77; 76, 78; 77, 79
def get_agent_tags(since, to): """ Return a list of tags from integrations-core representing an Agent release, sorted by more recent first. """ agent_tags = sorted(parse_version_info(t) for t in git_tag_list(r'^\d+\.\d+\.\d+$')) # default value for `to` is the latest tag if to: to = parse_version_info(to) else: to = agent_tags[-1] since = parse_version_info(since) # filter out versions according to the interval [since, to] agent_tags = [t for t in agent_tags if since <= t <= to] # reverse so we have descendant order return [str(t) for t in reversed(agent_tags)]
0, module; 1, function_definition; 2, function_name:sort; 3, parameters; 4, block; 5, identifier:self; 6, identifier:key_or_list; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:direction; 14, None; 15, comment:"""Sorts this cursor's results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ ('field1', pymongo.ASCENDING), ('field2', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """; 16, call; 17, assignment; 18, assignment; 19, identifier:self; 20, attribute; 21, argument_list; 22, identifier:keys; 23, call; 24, attribute; 25, call; 26, identifier:self; 27, identifier:__check_okay_to_chain; 28, attribute; 29, argument_list; 30, identifier:self; 31, identifier:__ordering; 32, attribute; 33, argument_list; 34, identifier:helpers; 35, identifier:_index_list; 36, identifier:key_or_list; 37, identifier:direction; 38, identifier:helpers; 39, identifier:_index_document; 40, identifier:keys
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 10, 17; 11, 18; 12, 19; 16, 20; 16, 21; 17, 22; 17, 23; 18, 24; 18, 25; 20, 26; 20, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33; 28, 34; 28, 35; 29, 36; 29, 37; 32, 38; 32, 39; 33, 40
def sort(self, key_or_list, direction=None): """Sorts this cursor's results. Pass a field name and a direction, either :data:`~pymongo.ASCENDING` or :data:`~pymongo.DESCENDING`:: for doc in collection.find().sort('field', pymongo.ASCENDING): print(doc) To sort by multiple fields, pass a list of (key, direction) pairs:: for doc in collection.find().sort([ ('field1', pymongo.ASCENDING), ('field2', pymongo.DESCENDING)]): print(doc) Beginning with MongoDB version 2.6, text search results can be sorted by relevance:: cursor = db.test.find( {'$text': {'$search': 'some words'}}, {'score': {'$meta': 'textScore'}}) # Sort by 'score' field. cursor.sort([('score', {'$meta': 'textScore'})]) for doc in cursor: print(doc) Raises :class:`~pymongo.errors.InvalidOperation` if this cursor has already been used. Only the last :meth:`sort` applied to this cursor has any effect. :Parameters: - `key_or_list`: a single key or a list of (key, direction) pairs specifying the keys to sort on - `direction` (optional): only used if `key_or_list` is a single key, if not given :data:`~pymongo.ASCENDING` is assumed """ self.__check_okay_to_chain() keys = helpers._index_list(key_or_list, direction) self.__ordering = helpers._index_document(keys) return self
0, module; 1, function_definition; 2, function_name:find_one_and_replace; 3, parameters; 4, block; 5, identifier:self; 6, identifier:filter; 7, identifier:replacement; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, dictionary_splat_pattern; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, return_statement; 17, identifier:projection; 18, None; 19, identifier:sort; 20, None; 21, identifier:upsert; 22, False; 23, identifier:return_document; 24, attribute; 25, identifier:kwargs; 26, comment:"""Finds a single document and replaces it, returning either the original or the replaced document. The :meth:`find_one_and_replace` method differs from :meth:`find_one_and_update` by replacing the document matched by *filter*, rather than modifying the existing document. >>> for doc in db.test.find({}): ... print(doc) ... {u'x': 1, u'_id': 0} {u'x': 1, u'_id': 1} {u'x': 1, u'_id': 2} >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) {u'x': 1, u'_id': 0} >>> for doc in db.test.find({}): ... print(doc) ... {u'y': 1, u'_id': 0} {u'x': 1, u'_id': 1} {u'x': 1, u'_id': 2} :Parameters: - `filter`: A query that matches the document to replace. - `replacement`: The replacement document. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a mapping to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is replaced. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was replaced, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the replaced or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0 """; 27, call; 28, assignment; 29, call; 30, identifier:ReturnDocument; 31, identifier:BEFORE; 32, attribute; 33, argument_list; 34, subscript; 35, identifier:replacement; 36, attribute; 37, argument_list; 38, identifier:common; 39, identifier:validate_ok_for_replace; 40, identifier:replacement; 41, identifier:kwargs; 42, string; 43, identifier:self; 44, identifier:__find_and_modify; 45, identifier:filter; 46, identifier:projection; 47, identifier:sort; 48, identifier:upsert; 49, identifier:return_document; 50, dictionary_splat; 51, string_content:update; 52, identifier:kwargs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 10, 22; 11, 23; 11, 24; 12, 25; 13, 26; 14, 27; 15, 28; 16, 29; 24, 30; 24, 31; 27, 32; 27, 33; 28, 34; 28, 35; 29, 36; 29, 37; 32, 38; 32, 39; 33, 40; 34, 41; 34, 42; 36, 43; 36, 44; 37, 45; 37, 46; 37, 47; 37, 48; 37, 49; 37, 50; 42, 51; 50, 52
def find_one_and_replace(self, filter, replacement, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, **kwargs): """Finds a single document and replaces it, returning either the original or the replaced document. The :meth:`find_one_and_replace` method differs from :meth:`find_one_and_update` by replacing the document matched by *filter*, rather than modifying the existing document. >>> for doc in db.test.find({}): ... print(doc) ... {u'x': 1, u'_id': 0} {u'x': 1, u'_id': 1} {u'x': 1, u'_id': 2} >>> db.test.find_one_and_replace({'x': 1}, {'y': 1}) {u'x': 1, u'_id': 0} >>> for doc in db.test.find({}): ... print(doc) ... {u'y': 1, u'_id': 0} {u'x': 1, u'_id': 1} {u'x': 1, u'_id': 2} :Parameters: - `filter`: A query that matches the document to replace. - `replacement`: The replacement document. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a mapping to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is replaced. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was replaced, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the replaced or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0 """ common.validate_ok_for_replace(replacement) kwargs['update'] = replacement return self.__find_and_modify(filter, projection, sort, upsert, return_document, **kwargs)
0, module; 1, function_definition; 2, function_name:find_one_and_update; 3, parameters; 4, block; 5, identifier:self; 6, identifier:filter; 7, identifier:update; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, dictionary_splat_pattern; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, return_statement; 17, identifier:projection; 18, None; 19, identifier:sort; 20, None; 21, identifier:upsert; 22, False; 23, identifier:return_document; 24, attribute; 25, identifier:kwargs; 26, comment:"""Finds a single document and updates it, returning either the original or the updated document. >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) {u'_id': 665, u'done': False, u'count': 25}} By default :meth:`find_one_and_update` returns the original version of the document before the update was applied. To return the updated version of the document instead, use the *return_document* option. >>> from pymongo import ReturnDocument >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) {u'_id': u'userid', u'seq': 1} You can limit the fields returned with the *projection* option. >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) {u'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. >>> db.example.delete_many({}).deleted_count 1 >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) {u'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... {u'_id': 665, u'done': True, u'result': {u'count': 26}} {u'_id': 701, u'done': True, u'result': {u'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) {u'_id': 701, u'done': True, u'result': {u'count': 17}} :Parameters: - `filter`: A query that matches the document to update. - `update`: The update operations to apply. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is updated. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was updated, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0 """; 27, call; 28, assignment; 29, call; 30, identifier:ReturnDocument; 31, identifier:BEFORE; 32, attribute; 33, argument_list; 34, subscript; 35, identifier:update; 36, attribute; 37, argument_list; 38, identifier:common; 39, identifier:validate_ok_for_update; 40, identifier:update; 41, identifier:kwargs; 42, string; 43, identifier:self; 44, identifier:__find_and_modify; 45, identifier:filter; 46, identifier:projection; 47, identifier:sort; 48, identifier:upsert; 49, identifier:return_document; 50, dictionary_splat; 51, string_content:update; 52, identifier:kwargs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 10, 22; 11, 23; 11, 24; 12, 25; 13, 26; 14, 27; 15, 28; 16, 29; 24, 30; 24, 31; 27, 32; 27, 33; 28, 34; 28, 35; 29, 36; 29, 37; 32, 38; 32, 39; 33, 40; 34, 41; 34, 42; 36, 43; 36, 44; 37, 45; 37, 46; 37, 47; 37, 48; 37, 49; 37, 50; 42, 51; 50, 52
def find_one_and_update(self, filter, update, projection=None, sort=None, upsert=False, return_document=ReturnDocument.BEFORE, **kwargs): """Finds a single document and updates it, returning either the original or the updated document. >>> db.test.find_one_and_update( ... {'_id': 665}, {'$inc': {'count': 1}, '$set': {'done': True}}) {u'_id': 665, u'done': False, u'count': 25}} By default :meth:`find_one_and_update` returns the original version of the document before the update was applied. To return the updated version of the document instead, use the *return_document* option. >>> from pymongo import ReturnDocument >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... return_document=ReturnDocument.AFTER) {u'_id': u'userid', u'seq': 1} You can limit the fields returned with the *projection* option. >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... return_document=ReturnDocument.AFTER) {u'seq': 2} The *upsert* option can be used to create the document if it doesn't already exist. >>> db.example.delete_many({}).deleted_count 1 >>> db.example.find_one_and_update( ... {'_id': 'userid'}, ... {'$inc': {'seq': 1}}, ... projection={'seq': True, '_id': False}, ... upsert=True, ... return_document=ReturnDocument.AFTER) {u'seq': 1} If multiple documents match *filter*, a *sort* can be applied. >>> for doc in db.test.find({'done': True}): ... print(doc) ... {u'_id': 665, u'done': True, u'result': {u'count': 26}} {u'_id': 701, u'done': True, u'result': {u'count': 17}} >>> db.test.find_one_and_update( ... {'done': True}, ... {'$set': {'final': True}}, ... sort=[('_id', pymongo.DESCENDING)]) {u'_id': 701, u'done': True, u'result': {u'count': 17}} :Parameters: - `filter`: A query that matches the document to update. - `update`: The update operations to apply. - `projection` (optional): A list of field names that should be returned in the result document or a mapping specifying the fields to include or exclude. If `projection` is a list "_id" will always be returned. Use a dict to exclude fields from the result (e.g. projection={'_id': False}). - `sort` (optional): a list of (key, direction) pairs specifying the sort order for the query. If multiple documents match the query, they are sorted and the first is updated. - `upsert` (optional): When ``True``, inserts a new document if no document matches the query. Defaults to ``False``. - `return_document`: If :attr:`ReturnDocument.BEFORE` (the default), returns the original document before it was updated, or ``None`` if no document matches. If :attr:`ReturnDocument.AFTER`, returns the updated or inserted document. - `**kwargs` (optional): additional command arguments can be passed as keyword arguments (for example maxTimeMS can be used with recent server versions). .. versionchanged:: 3.4 Added the `collation` option. .. versionchanged:: 3.2 Respects write concern. .. warning:: Starting in PyMongo 3.2, this command uses the :class:`~pymongo.write_concern.WriteConcern` of this :class:`~pymongo.collection.Collection` when connected to MongoDB >= 3.2. Note that using an elevated write concern with this command may be slower compared to using the default write concern. .. versionadded:: 3.0 """ common.validate_ok_for_update(update) kwargs['update'] = update return self.__find_and_modify(filter, projection, sort, upsert, return_document, **kwargs)
0, module; 1, function_definition; 2, function_name:feature_correlation; 3, parameters; 4, block; 5, identifier:X; 6, identifier:y; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, dictionary_splat_pattern; 14, expression_statement; 15, comment:# Instantiate the visualizer; 16, expression_statement; 17, comment:# Fit and transform the visualizer (calls draw); 18, expression_statement; 19, expression_statement; 20, comment:# Return the axes object on the visualizer; 21, return_statement; 22, identifier:ax; 23, None; 24, identifier:method; 25, string; 26, identifier:labels; 27, None; 28, identifier:sort; 29, False; 30, identifier:feature_index; 31, None; 32, identifier:feature_names; 33, None; 34, identifier:kwargs; 35, comment:""" Displays the correlation between features and dependent variables. This visualizer can be used side-by-side with yellowbrick.features.JointPlotVisualizer that plots a feature against the target and shows the distribution of each via a histogram on each axis. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : matplotlib Axes, default: None The axis to plot the figure on. If None is passed in the current axes will be used (or generated if required). method : str, default: 'pearson' The method to calculate correlation between features and target. Options include: - 'pearson', which uses ``scipy.stats.pearsonr`` - 'mutual_info-regression', which uses ``mutual_info-regression`` from ``sklearn.feature_selection`` - 'mutual_info-classification', which uses ``mutual_info_classif`` from ``sklearn.feature_selection`` 'mutual_info-classification'], default: 'pearson' labels : list, default: None A list of feature names to use. If a DataFrame is passed to fit and features is None, feature names are selected as the column names. sort : boolean, default: False If false, the features are are not sorted in the plot; otherwise features are sorted in ascending order of correlation. feature_index : list, A list of feature index to include in the plot. feature_names : list of feature names A list of feature names to include in the plot. Must have labels or the fitted data is a DataFrame with column names. If feature_index is provided, feature_names will be ignored. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Returns ------- ax : matplotlib axes Returns the axes that the parallel coordinates were drawn on. """; 36, assignment; 37, call; 38, call; 39, attribute; 40, string_content:pearson; 41, identifier:viz; 42, call; 43, attribute; 44, argument_list; 45, attribute; 46, argument_list; 47, identifier:viz; 48, identifier:ax; 49, identifier:FeatureCorrelation; 50, argument_list; 51, identifier:viz; 52, identifier:fit; 53, identifier:X; 54, identifier:y; 55, dictionary_splat; 56, identifier:viz; 57, identifier:finalize; 58, identifier:ax; 59, identifier:method; 60, identifier:labels; 61, identifier:sort; 62, identifier:feature_index; 63, identifier:feature_names; 64, dictionary_splat; 65, identifier:kwargs; 66, identifier:kwargs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 7, 22; 7, 23; 8, 24; 8, 25; 9, 26; 9, 27; 10, 28; 10, 29; 11, 30; 11, 31; 12, 32; 12, 33; 13, 34; 14, 35; 16, 36; 18, 37; 19, 38; 21, 39; 25, 40; 36, 41; 36, 42; 37, 43; 37, 44; 38, 45; 38, 46; 39, 47; 39, 48; 42, 49; 42, 50; 43, 51; 43, 52; 44, 53; 44, 54; 44, 55; 45, 56; 45, 57; 50, 58; 50, 59; 50, 60; 50, 61; 50, 62; 50, 63; 50, 64; 55, 65; 64, 66
def feature_correlation(X, y, ax=None, method='pearson', labels=None, sort=False, feature_index=None, feature_names=None, **kwargs): """ Displays the correlation between features and dependent variables. This visualizer can be used side-by-side with yellowbrick.features.JointPlotVisualizer that plots a feature against the target and shows the distribution of each via a histogram on each axis. Parameters ---------- X : ndarray or DataFrame of shape n x m A matrix of n instances with m features y : ndarray or Series of length n An array or series of target or class values ax : matplotlib Axes, default: None The axis to plot the figure on. If None is passed in the current axes will be used (or generated if required). method : str, default: 'pearson' The method to calculate correlation between features and target. Options include: - 'pearson', which uses ``scipy.stats.pearsonr`` - 'mutual_info-regression', which uses ``mutual_info-regression`` from ``sklearn.feature_selection`` - 'mutual_info-classification', which uses ``mutual_info_classif`` from ``sklearn.feature_selection`` 'mutual_info-classification'], default: 'pearson' labels : list, default: None A list of feature names to use. If a DataFrame is passed to fit and features is None, feature names are selected as the column names. sort : boolean, default: False If false, the features are are not sorted in the plot; otherwise features are sorted in ascending order of correlation. feature_index : list, A list of feature index to include in the plot. feature_names : list of feature names A list of feature names to include in the plot. Must have labels or the fitted data is a DataFrame with column names. If feature_index is provided, feature_names will be ignored. kwargs : dict Keyword arguments that are passed to the base class and may influence the visualization as defined in other Visualizers. Returns ------- ax : matplotlib axes Returns the axes that the parallel coordinates were drawn on. """ # Instantiate the visualizer viz = FeatureCorrelation(ax, method, labels, sort, feature_index, feature_names, **kwargs) # Fit and transform the visualizer (calls draw) viz.fit(X, y, **kwargs) viz.finalize() # Return the axes object on the visualizer return viz.ax
0, module; 1, function_definition; 2, function_name:dispersion; 3, parameters; 4, block; 5, identifier:words; 6, identifier:corpus; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, dictionary_splat_pattern; 15, expression_statement; 16, comment:# Instantiate the visualizer; 17, expression_statement; 18, comment:# Fit and transform the visualizer (calls draw); 19, expression_statement; 20, comment:# Return the axes object on the visualizer; 21, return_statement; 22, identifier:y; 23, None; 24, identifier:ax; 25, None; 26, identifier:colors; 27, None; 28, identifier:colormap; 29, None; 30, identifier:labels; 31, None; 32, identifier:annotate_docs; 33, False; 34, identifier:ignore_case; 35, False; 36, identifier:kwargs; 37, comment:""" Displays lexical dispersion plot for words in a corpus This helper function is a quick wrapper to utilize the DisperstionPlot Visualizer for one-off analysis Parameters ---------- words : list A list of words whose dispersion will be examined within a corpus y : ndarray or Series of length n An optional array or series of target or class values for instances. If this is specified, then the points will be colored according to their class. corpus : list Should be provided as a list of documents that contain a list of words in the order they appear in the document. ax : matplotlib axes, default: None The axes to plot the figure on. labels : list of strings The names of the classes in the target, used to create a legend. Labels must match names of classes in sorted order. colors : list or tuple of colors Specify the colors for each individual class colormap : string or matplotlib cmap Qualitative colormap for discrete target annotate_docs : boolean, default: False Specify whether document boundaries will be displayed. Vertical lines are positioned at the end of each document. ignore_case : boolean, default: False Specify whether input will be case-sensitive. kwargs : dict Pass any additional keyword arguments to the super class. Returns ------- ax: matplotlib axes Returns the axes that the plot was drawn on """; 38, assignment; 39, call; 40, attribute; 41, identifier:visualizer; 42, call; 43, attribute; 44, argument_list; 45, identifier:visualizer; 46, identifier:ax; 47, identifier:DispersionPlot; 48, argument_list; 49, identifier:visualizer; 50, identifier:fit; 51, identifier:corpus; 52, identifier:y; 53, dictionary_splat; 54, identifier:words; 55, keyword_argument; 56, keyword_argument; 57, keyword_argument; 58, keyword_argument; 59, keyword_argument; 60, keyword_argument; 61, dictionary_splat; 62, identifier:kwargs; 63, identifier:ax; 64, identifier:ax; 65, identifier:colors; 66, identifier:colors; 67, identifier:colormap; 68, identifier:colormap; 69, identifier:ignore_case; 70, identifier:ignore_case; 71, identifier:labels; 72, identifier:labels; 73, identifier:annotate_docs; 74, identifier:annotate_docs; 75, identifier:kwargs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 7, 22; 7, 23; 8, 24; 8, 25; 9, 26; 9, 27; 10, 28; 10, 29; 11, 30; 11, 31; 12, 32; 12, 33; 13, 34; 13, 35; 14, 36; 15, 37; 17, 38; 19, 39; 21, 40; 38, 41; 38, 42; 39, 43; 39, 44; 40, 45; 40, 46; 42, 47; 42, 48; 43, 49; 43, 50; 44, 51; 44, 52; 44, 53; 48, 54; 48, 55; 48, 56; 48, 57; 48, 58; 48, 59; 48, 60; 48, 61; 53, 62; 55, 63; 55, 64; 56, 65; 56, 66; 57, 67; 57, 68; 58, 69; 58, 70; 59, 71; 59, 72; 60, 73; 60, 74; 61, 75
def dispersion(words, corpus, y=None, ax=None, colors=None, colormap=None, labels=None, annotate_docs=False, ignore_case=False, **kwargs): """ Displays lexical dispersion plot for words in a corpus This helper function is a quick wrapper to utilize the DisperstionPlot Visualizer for one-off analysis Parameters ---------- words : list A list of words whose dispersion will be examined within a corpus y : ndarray or Series of length n An optional array or series of target or class values for instances. If this is specified, then the points will be colored according to their class. corpus : list Should be provided as a list of documents that contain a list of words in the order they appear in the document. ax : matplotlib axes, default: None The axes to plot the figure on. labels : list of strings The names of the classes in the target, used to create a legend. Labels must match names of classes in sorted order. colors : list or tuple of colors Specify the colors for each individual class colormap : string or matplotlib cmap Qualitative colormap for discrete target annotate_docs : boolean, default: False Specify whether document boundaries will be displayed. Vertical lines are positioned at the end of each document. ignore_case : boolean, default: False Specify whether input will be case-sensitive. kwargs : dict Pass any additional keyword arguments to the super class. Returns ------- ax: matplotlib axes Returns the axes that the plot was drawn on """ # Instantiate the visualizer visualizer = DispersionPlot( words, ax=ax, colors=colors, colormap=colormap, ignore_case=ignore_case, labels=labels, annotate_docs=annotate_docs, **kwargs ) # Fit and transform the visualizer (calls draw) visualizer.fit(corpus, y, **kwargs) # Return the axes object on the visualizer return visualizer.ax
0, module; 1, function_definition; 2, function_name:sorted_product_set; 3, parameters; 4, block; 5, identifier:array_a; 6, identifier:array_b; 7, expression_statement; 8, return_statement; 9, comment:"""Compute the product set of array_a and array_b and sort it."""; 10, subscript; 11, call; 12, slice; 13, attribute; 14, argument_list; 15, unary_operator; 16, identifier:np; 17, identifier:sort; 18, call; 19, integer:1; 20, attribute; 21, argument_list; 22, identifier:np; 23, identifier:concatenate; 24, list_comprehension; 25, keyword_argument; 26, binary_operator:array_a[i] * array_b; 27, for_in_clause; 28, identifier:axis; 29, integer:0; 30, subscript; 31, identifier:array_b; 32, identifier:i; 33, call; 34, identifier:array_a; 35, identifier:i; 36, identifier:xrange; 37, argument_list; 38, call; 39, identifier:len; 40, argument_list; 41, identifier:array_a
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 7, 9; 8, 10; 10, 11; 10, 12; 11, 13; 11, 14; 12, 15; 13, 16; 13, 17; 14, 18; 15, 19; 18, 20; 18, 21; 20, 22; 20, 23; 21, 24; 21, 25; 24, 26; 24, 27; 25, 28; 25, 29; 26, 30; 26, 31; 27, 32; 27, 33; 30, 34; 30, 35; 33, 36; 33, 37; 37, 38; 38, 39; 38, 40; 40, 41
def sorted_product_set(array_a, array_b): """Compute the product set of array_a and array_b and sort it.""" return np.sort( np.concatenate( [array_a[i] * array_b for i in xrange(len(array_a))], axis=0) )[::-1]
0, module; 1, function_definition; 2, function_name:_get_sorted_inputs; 3, parameters; 4, block; 5, identifier:filename; 6, expression_statement; 7, with_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, for_statement; 13, return_statement; 14, comment:"""Read and sort lines from the file sorted by decreasing length. Args: filename: String name of file to read inputs from. Returns: Sorted list of inputs, and dictionary mapping original index->sorted index of each element. """; 15, with_clause; 16, block; 17, assignment; 18, assignment; 19, assignment; 20, assignment; 21, pattern_list; 22, call; 23, block; 24, expression_list; 25, with_item; 26, expression_statement; 27, expression_statement; 28, if_statement; 29, identifier:input_lens; 30, list_comprehension; 31, identifier:sorted_input_lens; 32, call; 33, identifier:sorted_inputs; 34, list; 35, identifier:sorted_keys; 36, dictionary; 37, identifier:i; 38, tuple_pattern; 39, identifier:enumerate; 40, argument_list; 41, expression_statement; 42, expression_statement; 43, identifier:sorted_inputs; 44, identifier:sorted_keys; 45, as_pattern; 46, assignment; 47, assignment; 48, not_operator; 49, block; 50, tuple; 51, for_in_clause; 52, identifier:sorted; 53, argument_list; 54, identifier:index; 55, identifier:_; 56, identifier:sorted_input_lens; 57, call; 58, assignment; 59, call; 60, as_pattern_target; 61, identifier:records; 62, call; 63, identifier:inputs; 64, list_comprehension; 65, subscript; 66, expression_statement; 67, identifier:i; 68, call; 69, pattern_list; 70, call; 71, identifier:input_lens; 72, keyword_argument; 73, keyword_argument; 74, attribute; 75, argument_list; 76, subscript; 77, identifier:i; 78, attribute; 79, argument_list; 80, identifier:f; 81, attribute; 82, argument_list; 83, call; 84, for_in_clause; 85, identifier:inputs; 86, unary_operator; 87, call; 88, identifier:len; 89, argument_list; 90, identifier:i; 91, identifier:line; 92, identifier:enumerate; 93, argument_list; 94, identifier:key; 95, lambda; 96, identifier:reverse; 97, True; 98, identifier:sorted_inputs; 99, identifier:append; 100, subscript; 101, identifier:sorted_keys; 102, identifier:index; 103, attribute; 104, identifier:Open; 105, identifier:filename; 106, call; 107, identifier:split; 108, string:"\n"; 109, attribute; 110, argument_list; 111, identifier:record; 112, identifier:records; 113, integer:1; 114, attribute; 115, argument_list; 116, call; 117, identifier:inputs; 118, lambda_parameters; 119, subscript; 120, identifier:inputs; 121, identifier:index; 122, identifier:tf; 123, identifier:gfile; 124, attribute; 125, argument_list; 126, identifier:record; 127, identifier:strip; 128, identifier:inputs; 129, identifier:pop; 130, attribute; 131, argument_list; 132, identifier:x; 133, identifier:x; 134, integer:1; 135, identifier:f; 136, identifier:read; 137, identifier:line; 138, identifier:split
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 7, 15; 7, 16; 8, 17; 9, 18; 10, 19; 11, 20; 12, 21; 12, 22; 12, 23; 13, 24; 15, 25; 16, 26; 16, 27; 16, 28; 17, 29; 17, 30; 18, 31; 18, 32; 19, 33; 19, 34; 20, 35; 20, 36; 21, 37; 21, 38; 22, 39; 22, 40; 23, 41; 23, 42; 24, 43; 24, 44; 25, 45; 26, 46; 27, 47; 28, 48; 28, 49; 30, 50; 30, 51; 32, 52; 32, 53; 38, 54; 38, 55; 40, 56; 41, 57; 42, 58; 45, 59; 45, 60; 46, 61; 46, 62; 47, 63; 47, 64; 48, 65; 49, 66; 50, 67; 50, 68; 51, 69; 51, 70; 53, 71; 53, 72; 53, 73; 57, 74; 57, 75; 58, 76; 58, 77; 59, 78; 59, 79; 60, 80; 62, 81; 62, 82; 64, 83; 64, 84; 65, 85; 65, 86; 66, 87; 68, 88; 68, 89; 69, 90; 69, 91; 70, 92; 70, 93; 72, 94; 72, 95; 73, 96; 73, 97; 74, 98; 74, 99; 75, 100; 76, 101; 76, 102; 78, 103; 78, 104; 79, 105; 81, 106; 81, 107; 82, 108; 83, 109; 83, 110; 84, 111; 84, 112; 86, 113; 87, 114; 87, 115; 89, 116; 93, 117; 95, 118; 95, 119; 100, 120; 100, 121; 103, 122; 103, 123; 106, 124; 106, 125; 109, 126; 109, 127; 114, 128; 114, 129; 116, 130; 116, 131; 118, 132; 119, 133; 119, 134; 124, 135; 124, 136; 130, 137; 130, 138
def _get_sorted_inputs(filename): """Read and sort lines from the file sorted by decreasing length. Args: filename: String name of file to read inputs from. Returns: Sorted list of inputs, and dictionary mapping original index->sorted index of each element. """ with tf.gfile.Open(filename) as f: records = f.read().split("\n") inputs = [record.strip() for record in records] if not inputs[-1]: inputs.pop() input_lens = [(i, len(line.split())) for i, line in enumerate(inputs)] sorted_input_lens = sorted(input_lens, key=lambda x: x[1], reverse=True) sorted_inputs = [] sorted_keys = {} for i, (index, _) in enumerate(sorted_input_lens): sorted_inputs.append(inputs[index]) sorted_keys[index] = i return sorted_inputs, sorted_keys
0, module; 1, function_definition; 2, function_name:games_by_time; 3, parameters; 4, block; 5, identifier:self; 6, identifier:start_game; 7, identifier:end_game; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, function_definition; 12, return_statement; 13, comment:"""Given a range of games, return the games sorted by time. Returns [(time, game_number), ...] The time will be a `datetime.datetime` and the game number is the integer used as the basis of the row ID. Note that when a cluster of self-play nodes are writing concurrently, the game numbers may be out of order. """; 14, assignment; 15, assignment; 16, function_name:parse; 17, parameters; 18, block; 19, call; 20, identifier:move_count; 21, string; 22, identifier:rows; 23, call; 24, identifier:r; 25, expression_statement; 26, expression_statement; 27, return_statement; 28, identifier:sorted; 29, argument_list; 30, string_content:move_count; 31, attribute; 32, argument_list; 33, assignment; 34, assignment; 35, tuple; 36, list_comprehension; 37, keyword_argument; 38, attribute; 39, identifier:read_rows; 40, call; 41, call; 42, keyword_argument; 43, identifier:rk; 44, call; 45, identifier:game; 46, subscript; 47, attribute; 48, identifier:game; 49, call; 50, for_in_clause; 51, identifier:key; 52, call; 53, identifier:self; 54, identifier:bt_table; 55, attribute; 56, argument_list; 57, attribute; 58, argument_list; 59, identifier:filter_; 60, call; 61, identifier:str; 62, argument_list; 63, call; 64, integer:0; 65, subscript; 66, identifier:timestamp; 67, identifier:parse; 68, argument_list; 69, identifier:r; 70, identifier:rows; 71, attribute; 72, argument_list; 73, identifier:ROWCOUNT_PREFIX; 74, identifier:format; 75, identifier:start_game; 76, identifier:ROWCOUNT_PREFIX; 77, identifier:format; 78, identifier:end_game; 79, attribute; 80, argument_list; 81, attribute; 82, string; 83, attribute; 84, argument_list; 85, subscript; 86, integer:0; 87, identifier:r; 88, identifier:operator; 89, identifier:itemgetter; 90, integer:0; 91, identifier:bigtable_row_filters; 92, identifier:ColumnRangeFilter; 93, identifier:METADATA; 94, identifier:move_count; 95, identifier:move_count; 96, identifier:r; 97, identifier:row_key; 98, string_content:utf-8; 99, call; 100, identifier:groups; 101, subscript; 102, identifier:move_count; 103, attribute; 104, argument_list; 105, attribute; 106, identifier:METADATA; 107, identifier:_game_from_counter; 108, identifier:match; 109, identifier:rk; 110, identifier:r; 111, identifier:cells
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 9, 14; 10, 15; 11, 16; 11, 17; 11, 18; 12, 19; 14, 20; 14, 21; 15, 22; 15, 23; 17, 24; 18, 25; 18, 26; 18, 27; 19, 28; 19, 29; 21, 30; 23, 31; 23, 32; 25, 33; 26, 34; 27, 35; 29, 36; 29, 37; 31, 38; 31, 39; 32, 40; 32, 41; 32, 42; 33, 43; 33, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 37, 52; 38, 53; 38, 54; 40, 55; 40, 56; 41, 57; 41, 58; 42, 59; 42, 60; 44, 61; 44, 62; 46, 63; 46, 64; 47, 65; 47, 66; 49, 67; 49, 68; 50, 69; 50, 70; 52, 71; 52, 72; 55, 73; 55, 74; 56, 75; 57, 76; 57, 77; 58, 78; 60, 79; 60, 80; 62, 81; 62, 82; 63, 83; 63, 84; 65, 85; 65, 86; 68, 87; 71, 88; 71, 89; 72, 90; 79, 91; 79, 92; 80, 93; 80, 94; 80, 95; 81, 96; 81, 97; 82, 98; 83, 99; 83, 100; 85, 101; 85, 102; 99, 103; 99, 104; 101, 105; 101, 106; 103, 107; 103, 108; 104, 109; 105, 110; 105, 111
def games_by_time(self, start_game, end_game): """Given a range of games, return the games sorted by time. Returns [(time, game_number), ...] The time will be a `datetime.datetime` and the game number is the integer used as the basis of the row ID. Note that when a cluster of self-play nodes are writing concurrently, the game numbers may be out of order. """ move_count = b'move_count' rows = self.bt_table.read_rows( ROWCOUNT_PREFIX.format(start_game), ROWCOUNT_PREFIX.format(end_game), filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, move_count, move_count)) def parse(r): rk = str(r.row_key, 'utf-8') game = _game_from_counter.match(rk).groups()[0] return (r.cells[METADATA][move_count][0].timestamp, game) return sorted([parse(r) for r in rows], key=operator.itemgetter(0))
0, module; 1, function_definition; 2, function_name:bleakest_moves; 3, parameters; 4, block; 5, identifier:self; 6, identifier:start_game; 7, identifier:end_game; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, function_definition; 12, return_statement; 13, comment:"""Given a range of games, return the bleakest moves. Returns a list of (game, move, q) sorted by q. """; 14, assignment; 15, assignment; 16, function_name:parse; 17, parameters; 18, block; 19, call; 20, identifier:bleak; 21, string; 22, identifier:rows; 23, call; 24, identifier:r; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, return_statement; 29, identifier:sorted; 30, argument_list; 31, string_content:bleakest_q; 32, attribute; 33, argument_list; 34, assignment; 35, assignment; 36, assignment; 37, expression_list; 38, list_comprehension; 39, keyword_argument; 40, attribute; 41, identifier:read_rows; 42, call; 43, call; 44, keyword_argument; 45, identifier:rk; 46, call; 47, pattern_list; 48, call; 49, identifier:q; 50, call; 51, call; 52, call; 53, call; 54, call; 55, for_in_clause; 56, identifier:key; 57, call; 58, identifier:self; 59, identifier:bt_table; 60, attribute; 61, argument_list; 62, attribute; 63, argument_list; 64, identifier:filter_; 65, call; 66, identifier:str; 67, argument_list; 68, identifier:g; 69, identifier:m; 70, attribute; 71, argument_list; 72, attribute; 73, argument_list; 74, identifier:int; 75, argument_list; 76, identifier:int; 77, argument_list; 78, identifier:float; 79, argument_list; 80, identifier:parse; 81, argument_list; 82, identifier:r; 83, identifier:rows; 84, attribute; 85, argument_list; 86, identifier:ROW_PREFIX; 87, identifier:format; 88, identifier:start_game; 89, identifier:ROW_PREFIX; 90, identifier:format; 91, identifier:end_game; 92, attribute; 93, argument_list; 94, attribute; 95, string; 96, call; 97, identifier:groups; 98, identifier:r; 99, identifier:cell_value; 100, identifier:METADATA; 101, identifier:bleak; 102, identifier:g; 103, identifier:m; 104, identifier:q; 105, identifier:r; 106, identifier:operator; 107, identifier:itemgetter; 108, integer:2; 109, identifier:bigtable_row_filters; 110, identifier:ColumnRangeFilter; 111, identifier:METADATA; 112, identifier:bleak; 113, identifier:bleak; 114, identifier:r; 115, identifier:row_key; 116, string_content:utf-8; 117, attribute; 118, argument_list; 119, identifier:_game_row_key; 120, identifier:match; 121, identifier:rk
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 9, 14; 10, 15; 11, 16; 11, 17; 11, 18; 12, 19; 14, 20; 14, 21; 15, 22; 15, 23; 17, 24; 18, 25; 18, 26; 18, 27; 18, 28; 19, 29; 19, 30; 21, 31; 23, 32; 23, 33; 25, 34; 26, 35; 27, 36; 28, 37; 30, 38; 30, 39; 32, 40; 32, 41; 33, 42; 33, 43; 33, 44; 34, 45; 34, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 37, 52; 37, 53; 38, 54; 38, 55; 39, 56; 39, 57; 40, 58; 40, 59; 42, 60; 42, 61; 43, 62; 43, 63; 44, 64; 44, 65; 46, 66; 46, 67; 47, 68; 47, 69; 48, 70; 48, 71; 50, 72; 50, 73; 51, 74; 51, 75; 52, 76; 52, 77; 53, 78; 53, 79; 54, 80; 54, 81; 55, 82; 55, 83; 57, 84; 57, 85; 60, 86; 60, 87; 61, 88; 62, 89; 62, 90; 63, 91; 65, 92; 65, 93; 67, 94; 67, 95; 70, 96; 70, 97; 72, 98; 72, 99; 73, 100; 73, 101; 75, 102; 77, 103; 79, 104; 81, 105; 84, 106; 84, 107; 85, 108; 92, 109; 92, 110; 93, 111; 93, 112; 93, 113; 94, 114; 94, 115; 95, 116; 96, 117; 96, 118; 117, 119; 117, 120; 118, 121
def bleakest_moves(self, start_game, end_game): """Given a range of games, return the bleakest moves. Returns a list of (game, move, q) sorted by q. """ bleak = b'bleakest_q' rows = self.bt_table.read_rows( ROW_PREFIX.format(start_game), ROW_PREFIX.format(end_game), filter_=bigtable_row_filters.ColumnRangeFilter( METADATA, bleak, bleak)) def parse(r): rk = str(r.row_key, 'utf-8') g, m = _game_row_key.match(rk).groups() q = r.cell_value(METADATA, bleak) return int(g), int(m), float(q) return sorted([parse(r) for r in rows], key=operator.itemgetter(2))
0, module; 1, function_definition; 2, function_name:_generate_subtokens; 3, parameters; 4, block; 5, identifier:token_counts; 6, identifier:alphabet; 7, identifier:min_count; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, if_statement; 12, comment:# Use alphabet set to create initial list of subtokens; 13, expression_statement; 14, expression_statement; 15, comment:# On each iteration, segment all words using the subtokens defined in; 16, comment:# subtoken_dict, count how often the resulting subtokens appear, and update; 17, comment:# the dictionary with subtokens w/ high enough counts.; 18, for_statement; 19, return_statement; 20, identifier:num_iterations; 21, integer:4; 22, identifier:reserved_tokens; 23, None; 24, comment:"""Create a list of subtokens in decreasing order of frequency. Args: token_counts: dict mapping str tokens -> int count alphabet: set of characters min_count: int minimum number of times a subtoken must appear before it is added to the vocabulary. num_iterations: int number of iterations to generate new tokens. reserved_tokens: list of tokens that will be added to the beginning to the returned subtoken list. Returns: Sorted list of subtokens (most frequent first) """; 25, comparison_operator:reserved_tokens is None; 26, block; 27, assignment; 28, assignment; 29, identifier:i; 30, call; 31, block; 32, identifier:subtoken_list; 33, identifier:reserved_tokens; 34, None; 35, expression_statement; 36, identifier:subtoken_list; 37, binary_operator:reserved_tokens + list(alphabet); 38, identifier:max_subtoken_length; 39, integer:1; 40, identifier:xrange; 41, argument_list; 42, expression_statement; 43, comment:# Generate new subtoken->id dictionary using the new subtoken list.; 44, expression_statement; 45, comment:# Create dict mapping subtoken->count, with additional subtokens created; 46, comment:# from substrings taken from the tokens.; 47, expression_statement; 48, comment:# Generate new list of subtokens sorted by subtoken count.; 49, expression_statement; 50, expression_statement; 51, assignment; 52, identifier:reserved_tokens; 53, call; 54, identifier:num_iterations; 55, call; 56, assignment; 57, assignment; 58, assignment; 59, call; 60, identifier:reserved_tokens; 61, identifier:RESERVED_TOKENS; 62, identifier:list; 63, argument_list; 64, attribute; 65, argument_list; 66, identifier:subtoken_dict; 67, call; 68, identifier:subtoken_counts; 69, call; 70, pattern_list; 71, call; 72, attribute; 73, argument_list; 74, identifier:alphabet; 75, attribute; 76, identifier:info; 77, binary_operator:"\tGenerating subtokens: iteration %d" % i; 78, identifier:_list_to_index_dict; 79, argument_list; 80, identifier:_count_and_gen_subtokens; 81, argument_list; 82, identifier:subtoken_list; 83, identifier:max_subtoken_length; 84, identifier:_gen_new_subtoken_list; 85, argument_list; 86, attribute; 87, identifier:info; 88, binary_operator:"\tVocab size: %d" % len(subtoken_list); 89, identifier:tf; 90, identifier:logging; 91, string:"\tGenerating subtokens: iteration %d"; 92, identifier:i; 93, identifier:subtoken_list; 94, identifier:token_counts; 95, identifier:alphabet; 96, identifier:subtoken_dict; 97, identifier:max_subtoken_length; 98, identifier:subtoken_counts; 99, identifier:min_count; 100, identifier:alphabet; 101, identifier:reserved_tokens; 102, identifier:tf; 103, identifier:logging; 104, string:"\tVocab size: %d"; 105, call; 106, identifier:len; 107, argument_list; 108, identifier:subtoken_list
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 8, 20; 8, 21; 9, 22; 9, 23; 10, 24; 11, 25; 11, 26; 13, 27; 14, 28; 18, 29; 18, 30; 18, 31; 19, 32; 25, 33; 25, 34; 26, 35; 27, 36; 27, 37; 28, 38; 28, 39; 30, 40; 30, 41; 31, 42; 31, 43; 31, 44; 31, 45; 31, 46; 31, 47; 31, 48; 31, 49; 31, 50; 35, 51; 37, 52; 37, 53; 41, 54; 42, 55; 44, 56; 47, 57; 49, 58; 50, 59; 51, 60; 51, 61; 53, 62; 53, 63; 55, 64; 55, 65; 56, 66; 56, 67; 57, 68; 57, 69; 58, 70; 58, 71; 59, 72; 59, 73; 63, 74; 64, 75; 64, 76; 65, 77; 67, 78; 67, 79; 69, 80; 69, 81; 70, 82; 70, 83; 71, 84; 71, 85; 72, 86; 72, 87; 73, 88; 75, 89; 75, 90; 77, 91; 77, 92; 79, 93; 81, 94; 81, 95; 81, 96; 81, 97; 85, 98; 85, 99; 85, 100; 85, 101; 86, 102; 86, 103; 88, 104; 88, 105; 105, 106; 105, 107; 107, 108
def _generate_subtokens( token_counts, alphabet, min_count, num_iterations=4, reserved_tokens=None): """Create a list of subtokens in decreasing order of frequency. Args: token_counts: dict mapping str tokens -> int count alphabet: set of characters min_count: int minimum number of times a subtoken must appear before it is added to the vocabulary. num_iterations: int number of iterations to generate new tokens. reserved_tokens: list of tokens that will be added to the beginning to the returned subtoken list. Returns: Sorted list of subtokens (most frequent first) """ if reserved_tokens is None: reserved_tokens = RESERVED_TOKENS # Use alphabet set to create initial list of subtokens subtoken_list = reserved_tokens + list(alphabet) max_subtoken_length = 1 # On each iteration, segment all words using the subtokens defined in # subtoken_dict, count how often the resulting subtokens appear, and update # the dictionary with subtokens w/ high enough counts. for i in xrange(num_iterations): tf.logging.info("\tGenerating subtokens: iteration %d" % i) # Generate new subtoken->id dictionary using the new subtoken list. subtoken_dict = _list_to_index_dict(subtoken_list) # Create dict mapping subtoken->count, with additional subtokens created # from substrings taken from the tokens. subtoken_counts = _count_and_gen_subtokens( token_counts, alphabet, subtoken_dict, max_subtoken_length) # Generate new list of subtokens sorted by subtoken count. subtoken_list, max_subtoken_length = _gen_new_subtoken_list( subtoken_counts, min_count, alphabet, reserved_tokens) tf.logging.info("\tVocab size: %d" % len(subtoken_list)) return subtoken_list
0, module; 1, function_definition; 2, function_name:sparse_svd; 3, parameters; 4, block; 5, identifier:sparse_matrix; 6, identifier:num_values; 7, identifier:max_iter; 8, expression_statement; 9, if_statement; 10, if_statement; 11, if_statement; 12, expression_statement; 13, return_statement; 14, comment:"""Wrapper around SciPy's Singular Value Decomposition for sparse matrices. Args: sparse_matrix: a SciPy sparse matrix (typically large). num_values: the number of largest singular values to compute. max_iter: maximum number of iterations (>= 0) in the decomposition. If max_iter is None, runs FLAGS.max_iter_sparse_svd steps. If max_iter == 0, runs until convergence. Otherwise will run max_iter steps. Returns: A (u, s, v) tuple where s is an array entailing the singular values, and (u, v) the singular vector matrices. u is column orthogonal and v is row orthogonal. s is sorted in increasing order. """; 15, comparison_operator:num_values <= 0; 16, block; 17, boolean_operator; 18, block; 19, comparison_operator:max_iter is None; 20, block; 21, elif_clause; 22, assignment; 23, tuple; 24, identifier:num_values; 25, integer:0; 26, raise_statement; 27, comparison_operator:max_iter is not None; 28, comparison_operator:max_iter < 0; 29, raise_statement; 30, identifier:max_iter; 31, None; 32, expression_statement; 33, not_operator; 34, block; 35, pattern_list; 36, call; 37, identifier:u; 38, identifier:s; 39, identifier:v; 40, call; 41, identifier:max_iter; 42, None; 43, identifier:max_iter; 44, integer:0; 45, call; 46, assignment; 47, identifier:max_iter; 48, expression_statement; 49, identifier:u; 50, identifier:s; 51, identifier:v; 52, attribute; 53, argument_list; 54, identifier:ValueError; 55, argument_list; 56, identifier:ValueError; 57, argument_list; 58, identifier:max_iter; 59, attribute; 60, assignment; 61, identifier:linalg; 62, identifier:svds; 63, identifier:sparse_matrix; 64, keyword_argument; 65, keyword_argument; 66, keyword_argument; 67, binary_operator:"num_values should be > 0 but instead is %d." % num_values; 68, binary_operator:"max_iter should be >= 0 but instead is %d." % max_iter; 69, identifier:FLAGS; 70, identifier:max_iter_sparse_svd; 71, identifier:max_iter; 72, None; 73, identifier:k; 74, identifier:num_values; 75, identifier:maxiter; 76, identifier:max_iter; 77, identifier:return_singular_vectors; 78, True; 79, string:"num_values should be > 0 but instead is %d."; 80, identifier:num_values; 81, string:"max_iter should be >= 0 but instead is %d."; 82, identifier:max_iter
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 8, 14; 9, 15; 9, 16; 10, 17; 10, 18; 11, 19; 11, 20; 11, 21; 12, 22; 13, 23; 15, 24; 15, 25; 16, 26; 17, 27; 17, 28; 18, 29; 19, 30; 19, 31; 20, 32; 21, 33; 21, 34; 22, 35; 22, 36; 23, 37; 23, 38; 23, 39; 26, 40; 27, 41; 27, 42; 28, 43; 28, 44; 29, 45; 32, 46; 33, 47; 34, 48; 35, 49; 35, 50; 35, 51; 36, 52; 36, 53; 40, 54; 40, 55; 45, 56; 45, 57; 46, 58; 46, 59; 48, 60; 52, 61; 52, 62; 53, 63; 53, 64; 53, 65; 53, 66; 55, 67; 57, 68; 59, 69; 59, 70; 60, 71; 60, 72; 64, 73; 64, 74; 65, 75; 65, 76; 66, 77; 66, 78; 67, 79; 67, 80; 68, 81; 68, 82
def sparse_svd(sparse_matrix, num_values, max_iter): """Wrapper around SciPy's Singular Value Decomposition for sparse matrices. Args: sparse_matrix: a SciPy sparse matrix (typically large). num_values: the number of largest singular values to compute. max_iter: maximum number of iterations (>= 0) in the decomposition. If max_iter is None, runs FLAGS.max_iter_sparse_svd steps. If max_iter == 0, runs until convergence. Otherwise will run max_iter steps. Returns: A (u, s, v) tuple where s is an array entailing the singular values, and (u, v) the singular vector matrices. u is column orthogonal and v is row orthogonal. s is sorted in increasing order. """ if num_values <= 0: raise ValueError("num_values should be > 0 but instead is %d." % num_values) if max_iter is not None and max_iter < 0: raise ValueError("max_iter should be >= 0 but instead is %d." % max_iter) if max_iter is None: max_iter = FLAGS.max_iter_sparse_svd elif not max_iter: max_iter = None u, s, v = linalg.svds( sparse_matrix, k=num_values, maxiter=max_iter, return_singular_vectors=True) return (u, s, v)
0, module; 1, function_definition; 2, function_name:build_collate_fn; 3, parameters; 4, block; 5, default_parameter; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, function_definition; 10, function_definition; 11, function_definition; 12, if_statement; 13, identifier:batch_first; 14, False; 15, identifier:parallel; 16, True; 17, identifier:sort; 18, False; 19, comment:""" Factory for collate_fn functions. :param batch_first: if True returns batches in (batch, seq) format, if False returns in (seq, batch) format :param parallel: if True builds batches from parallel corpus (src, tgt) :param sort: if True sorts by src sequence length within each batch """; 20, function_name:collate_seq; 21, parameters; 22, block; 23, function_name:parallel_collate; 24, parameters; 25, block; 26, function_name:single_collate; 27, parameters; 28, block; 29, identifier:parallel; 30, block; 31, else_clause; 32, identifier:seq; 33, expression_statement; 34, expression_statement; 35, expression_statement; 36, expression_statement; 37, expression_statement; 38, for_statement; 39, if_statement; 40, return_statement; 41, identifier:seqs; 42, expression_statement; 43, expression_statement; 44, if_statement; 45, return_statement; 46, identifier:src_seqs; 47, expression_statement; 48, if_statement; 49, return_statement; 50, return_statement; 51, block; 52, comment:""" Builds batches for training or inference. Batches are returned as pytorch tensors, with padding. :param seq: list of sequences """; 53, assignment; 54, assignment; 55, assignment; 56, assignment; 57, pattern_list; 58, call; 59, block; 60, identifier:batch_first; 61, block; 62, tuple; 63, comment:""" Builds batches from parallel dataset (src, tgt), optionally sorts batch by src sequence length. :param seqs: tuple of (src, tgt) sequences """; 64, assignment; 65, identifier:sort; 66, block; 67, call; 68, comment:""" Builds batches from text dataset, optionally sorts batch by src sequence length. :param src_seqs: source sequences """; 69, identifier:sort; 70, block; 71, else_clause; 72, expression_list; 73, identifier:parallel_collate; 74, return_statement; 75, identifier:lengths; 76, list_comprehension; 77, identifier:batch_length; 78, call; 79, identifier:shape; 80, tuple; 81, identifier:seq_tensor; 82, call; 83, identifier:i; 84, identifier:s; 85, identifier:enumerate; 86, argument_list; 87, expression_statement; 88, expression_statement; 89, expression_statement; 90, identifier:seq_tensor; 91, identifier:lengths; 92, pattern_list; 93, call; 94, expression_statement; 95, expression_statement; 96, identifier:tuple; 97, argument_list; 98, expression_statement; 99, block; 100, call; 101, call; 102, identifier:single_collate; 103, call; 104, for_in_clause; 105, identifier:max; 106, argument_list; 107, identifier:batch_length; 108, call; 109, attribute; 110, argument_list; 111, identifier:seq; 112, assignment; 113, call; 114, assignment; 115, identifier:src_seqs; 116, identifier:tgt_seqs; 117, identifier:zip; 118, argument_list; 119, assignment; 120, assignment; 121, list_comprehension; 122, assignment; 123, expression_statement; 124, identifier:collate_seq; 125, argument_list; 126, identifier:tuple; 127, argument_list; 128, identifier:len; 129, argument_list; 130, identifier:s; 131, identifier:seq; 132, identifier:lengths; 133, identifier:len; 134, argument_list; 135, identifier:torch; 136, identifier:full; 137, identifier:shape; 138, attribute; 139, keyword_argument; 140, identifier:end_seq; 141, subscript; 142, attribute; 143, argument_list; 144, identifier:seq_tensor; 145, call; 146, list_splat; 147, pattern_list; 148, call; 149, identifier:tgt_seqs; 150, list_comprehension; 151, call; 152, for_in_clause; 153, pattern_list; 154, call; 155, assignment; 156, identifier:src_seqs; 157, identifier:indices; 158, identifier:s; 159, identifier:seq; 160, identifier:config; 161, identifier:PAD; 162, identifier:dtype; 163, attribute; 164, identifier:lengths; 165, identifier:i; 166, subscript; 167, identifier:copy_; 168, subscript; 169, attribute; 170, argument_list; 171, identifier:seqs; 172, identifier:indices; 173, identifier:src_seqs; 174, identifier:zip; 175, argument_list; 176, subscript; 177, for_in_clause; 178, identifier:collate_seq; 179, argument_list; 180, identifier:s; 181, list; 182, identifier:indices; 183, identifier:src_seqs; 184, identifier:zip; 185, argument_list; 186, identifier:indices; 187, call; 188, identifier:torch; 189, identifier:int64; 190, identifier:seq_tensor; 191, slice; 192, identifier:i; 193, identifier:s; 194, slice; 195, identifier:seq_tensor; 196, identifier:t; 197, list_splat; 198, identifier:tgt_seqs; 199, identifier:idx; 200, identifier:idx; 201, identifier:indices; 202, identifier:s; 203, identifier:src_seqs; 204, identifier:tgt_seqs; 205, list_splat; 206, identifier:range; 207, argument_list; 208, identifier:end_seq; 209, identifier:end_seq; 210, call; 211, call; 212, call; 213, identifier:sorted; 214, argument_list; 215, identifier:sorted; 216, argument_list; 217, identifier:len; 218, argument_list; 219, call; 220, keyword_argument; 221, keyword_argument; 222, call; 223, keyword_argument; 224, keyword_argument; 225, identifier:src_seqs; 226, identifier:enumerate; 227, argument_list; 228, identifier:key; 229, lambda; 230, identifier:reverse; 231, True; 232, identifier:enumerate; 233, argument_list; 234, identifier:key; 235, lambda; 236, identifier:reverse; 237, True; 238, identifier:src_seqs; 239, lambda_parameters; 240, call; 241, identifier:src_seqs; 242, lambda_parameters; 243, call; 244, identifier:item; 245, identifier:len; 246, argument_list; 247, identifier:item; 248, identifier:len; 249, argument_list; 250, subscript; 251, subscript; 252, identifier:item; 253, integer:1; 254, identifier:item; 255, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 5, 13; 5, 14; 6, 15; 6, 16; 7, 17; 7, 18; 8, 19; 9, 20; 9, 21; 9, 22; 10, 23; 10, 24; 10, 25; 11, 26; 11, 27; 11, 28; 12, 29; 12, 30; 12, 31; 21, 32; 22, 33; 22, 34; 22, 35; 22, 36; 22, 37; 22, 38; 22, 39; 22, 40; 24, 41; 25, 42; 25, 43; 25, 44; 25, 45; 27, 46; 28, 47; 28, 48; 28, 49; 30, 50; 31, 51; 33, 52; 34, 53; 35, 54; 36, 55; 37, 56; 38, 57; 38, 58; 38, 59; 39, 60; 39, 61; 40, 62; 42, 63; 43, 64; 44, 65; 44, 66; 45, 67; 47, 68; 48, 69; 48, 70; 48, 71; 49, 72; 50, 73; 51, 74; 53, 75; 53, 76; 54, 77; 54, 78; 55, 79; 55, 80; 56, 81; 56, 82; 57, 83; 57, 84; 58, 85; 58, 86; 59, 87; 59, 88; 61, 89; 62, 90; 62, 91; 64, 92; 64, 93; 66, 94; 66, 95; 67, 96; 67, 97; 70, 98; 71, 99; 72, 100; 72, 101; 74, 102; 76, 103; 76, 104; 78, 105; 78, 106; 80, 107; 80, 108; 82, 109; 82, 110; 86, 111; 87, 112; 88, 113; 89, 114; 92, 115; 92, 116; 93, 117; 93, 118; 94, 119; 95, 120; 97, 121; 98, 122; 99, 123; 100, 124; 100, 125; 101, 126; 101, 127; 103, 128; 103, 129; 104, 130; 104, 131; 106, 132; 108, 133; 108, 134; 109, 135; 109, 136; 110, 137; 110, 138; 110, 139; 112, 140; 112, 141; 113, 142; 113, 143; 114, 144; 114, 145; 118, 146; 119, 147; 119, 148; 120, 149; 120, 150; 121, 151; 121, 152; 122, 153; 122, 154; 123, 155; 125, 156; 127, 157; 129, 158; 134, 159; 138, 160; 138, 161; 139, 162; 139, 163; 141, 164; 141, 165; 142, 166; 142, 167; 143, 168; 145, 169; 145, 170; 146, 171; 147, 172; 147, 173; 148, 174; 148, 175; 150, 176; 150, 177; 151, 178; 151, 179; 152, 180; 152, 181; 153, 182; 153, 183; 154, 184; 154, 185; 155, 186; 155, 187; 163, 188; 163, 189; 166, 190; 166, 191; 166, 192; 168, 193; 168, 194; 169, 195; 169, 196; 175, 197; 176, 198; 176, 199; 177, 200; 177, 201; 179, 202; 181, 203; 181, 204; 185, 205; 187, 206; 187, 207; 191, 208; 194, 209; 197, 210; 205, 211; 207, 212; 210, 213; 210, 214; 211, 215; 211, 216; 212, 217; 212, 218; 214, 219; 214, 220; 214, 221; 216, 222; 216, 223; 216, 224; 218, 225; 219, 226; 219, 227; 220, 228; 220, 229; 221, 230; 221, 231; 222, 232; 222, 233; 223, 234; 223, 235; 224, 236; 224, 237; 227, 238; 229, 239; 229, 240; 233, 241; 235, 242; 235, 243; 239, 244; 240, 245; 240, 246; 242, 247; 243, 248; 243, 249; 246, 250; 249, 251; 250, 252; 250, 253; 251, 254; 251, 255
def build_collate_fn(batch_first=False, parallel=True, sort=False): """ Factory for collate_fn functions. :param batch_first: if True returns batches in (batch, seq) format, if False returns in (seq, batch) format :param parallel: if True builds batches from parallel corpus (src, tgt) :param sort: if True sorts by src sequence length within each batch """ def collate_seq(seq): """ Builds batches for training or inference. Batches are returned as pytorch tensors, with padding. :param seq: list of sequences """ lengths = [len(s) for s in seq] batch_length = max(lengths) shape = (batch_length, len(seq)) seq_tensor = torch.full(shape, config.PAD, dtype=torch.int64) for i, s in enumerate(seq): end_seq = lengths[i] seq_tensor[:end_seq, i].copy_(s[:end_seq]) if batch_first: seq_tensor = seq_tensor.t() return (seq_tensor, lengths) def parallel_collate(seqs): """ Builds batches from parallel dataset (src, tgt), optionally sorts batch by src sequence length. :param seqs: tuple of (src, tgt) sequences """ src_seqs, tgt_seqs = zip(*seqs) if sort: indices, src_seqs = zip(*sorted(enumerate(src_seqs), key=lambda item: len(item[1]), reverse=True)) tgt_seqs = [tgt_seqs[idx] for idx in indices] return tuple([collate_seq(s) for s in [src_seqs, tgt_seqs]]) def single_collate(src_seqs): """ Builds batches from text dataset, optionally sorts batch by src sequence length. :param src_seqs: source sequences """ if sort: indices, src_seqs = zip(*sorted(enumerate(src_seqs), key=lambda item: len(item[1]), reverse=True)) else: indices = range(len(src_seqs)) return collate_seq(src_seqs), tuple(indices) if parallel: return parallel_collate else: return single_collate
0, module; 1, function_definition; 2, function_name:get_golden_chunk_records; 3, parameters; 4, block; 5, expression_statement; 6, expression_statement; 7, return_statement; 8, comment:"""Return up to num_records of golden chunks to train on. Returns: A list of golden chunks up to num_records in length, sorted by path. """; 9, assignment; 10, subscript; 11, identifier:pattern; 12, call; 13, call; 14, slice; 15, attribute; 16, argument_list; 17, identifier:sorted; 18, argument_list; 19, attribute; 20, attribute; 21, identifier:join; 22, call; 23, string; 24, call; 25, keyword_argument; 26, identifier:FLAGS; 27, identifier:window_size; 28, identifier:os; 29, identifier:path; 30, attribute; 31, argument_list; 32, string_content:*.zz; 33, attribute; 34, argument_list; 35, identifier:reverse; 36, True; 37, identifier:fsdb; 38, identifier:golden_chunk_dir; 39, attribute; 40, identifier:Glob; 41, identifier:pattern; 42, identifier:tf; 43, identifier:gfile
0, 1; 1, 2; 1, 3; 1, 4; 4, 5; 4, 6; 4, 7; 5, 8; 6, 9; 7, 10; 9, 11; 9, 12; 10, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 15, 20; 15, 21; 16, 22; 16, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 20, 29; 22, 30; 22, 31; 23, 32; 24, 33; 24, 34; 25, 35; 25, 36; 30, 37; 30, 38; 33, 39; 33, 40; 34, 41; 39, 42; 39, 43
def get_golden_chunk_records(): """Return up to num_records of golden chunks to train on. Returns: A list of golden chunks up to num_records in length, sorted by path. """ pattern = os.path.join(fsdb.golden_chunk_dir(), '*.zz') return sorted(tf.gfile.Glob(pattern), reverse=True)[:FLAGS.window_size]
0, module; 1, function_definition; 2, function_name:_sorted_results; 3, parameters; 4, block; 5, identifier:self; 6, identifier:results_dicts; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, for_statement; 12, return_statement; 13, comment:"""Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first. """; 14, call; 15, assignment; 16, assignment; 17, identifier:entry; 18, identifier:sorted_dict; 19, block; 20, identifier:results; 21, identifier:print; 22, argument_list; 23, identifier:sorted_dict; 24, call; 25, identifier:results; 26, list; 27, expression_statement; 28, string; 29, identifier:results_dicts; 30, identifier:sorted; 31, argument_list; 32, call; 33, string_content:results dicts:; 34, identifier:results_dicts; 35, keyword_argument; 36, attribute; 37, argument_list; 38, identifier:key; 39, lambda; 40, identifier:results; 41, identifier:append; 42, subscript; 43, lambda_parameters; 44, subscript; 45, identifier:entry; 46, string; 47, identifier:k; 48, identifier:k; 49, string; 50, string_content:dt; 51, string_content:start_time
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 8, 14; 9, 15; 10, 16; 11, 17; 11, 18; 11, 19; 12, 20; 14, 21; 14, 22; 15, 23; 15, 24; 16, 25; 16, 26; 19, 27; 22, 28; 22, 29; 24, 30; 24, 31; 27, 32; 28, 33; 31, 34; 31, 35; 32, 36; 32, 37; 35, 38; 35, 39; 36, 40; 36, 41; 37, 42; 39, 43; 39, 44; 42, 45; 42, 46; 43, 47; 44, 48; 44, 49; 46, 50; 49, 51
def _sorted_results(self, results_dicts): """Sorts dict of results based on log start_time. Sorts the results and returns an array with only the values but sorted by oldest value first.value Args: results_dicts: List of result dicts Returns: List of only the time but sorted oldest first. """ print('results dicts:', results_dicts) sorted_dict = sorted(results_dicts, key=lambda k: k['start_time']) results = [] for entry in sorted_dict: results.append(entry['dt']) return results
0, module; 1, function_definition; 2, function_name:get_models; 3, parameters; 4, block; 5, expression_statement; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, comment:"""Finds all models, returning a list of model number and names sorted increasing. Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] """; 11, assignment; 12, assignment; 13, assignment; 14, identifier:model_numbers_names; 15, identifier:all_models; 16, call; 17, identifier:model_filenames; 18, list_comprehension; 19, identifier:model_numbers_names; 20, call; 21, attribute; 22, argument_list; 23, call; 24, for_in_clause; 25, identifier:sorted; 26, argument_list; 27, identifier:gfile; 28, identifier:Glob; 29, call; 30, attribute; 31, argument_list; 32, identifier:m; 33, identifier:all_models; 34, list_comprehension; 35, attribute; 36, argument_list; 37, attribute; 38, identifier:basename; 39, identifier:m; 40, tuple; 41, for_in_clause; 42, attribute; 43, identifier:join; 44, call; 45, string; 46, identifier:os; 47, identifier:path; 48, call; 49, call; 50, identifier:m; 51, identifier:model_filenames; 52, identifier:os; 53, identifier:path; 54, identifier:models_dir; 55, argument_list; 56, string_content:*.meta; 57, attribute; 58, argument_list; 59, attribute; 60, argument_list; 61, identifier:shipname; 62, identifier:detect_model_num; 63, identifier:m; 64, identifier:shipname; 65, identifier:detect_model_name; 66, identifier:m
0, 1; 1, 2; 1, 3; 1, 4; 4, 5; 4, 6; 4, 7; 4, 8; 4, 9; 5, 10; 6, 11; 7, 12; 8, 13; 9, 14; 11, 15; 11, 16; 12, 17; 12, 18; 13, 19; 13, 20; 16, 21; 16, 22; 18, 23; 18, 24; 20, 25; 20, 26; 21, 27; 21, 28; 22, 29; 23, 30; 23, 31; 24, 32; 24, 33; 26, 34; 29, 35; 29, 36; 30, 37; 30, 38; 31, 39; 34, 40; 34, 41; 35, 42; 35, 43; 36, 44; 36, 45; 37, 46; 37, 47; 40, 48; 40, 49; 41, 50; 41, 51; 42, 52; 42, 53; 44, 54; 44, 55; 45, 56; 48, 57; 48, 58; 49, 59; 49, 60; 57, 61; 57, 62; 58, 63; 59, 64; 59, 65; 60, 66
def get_models(): """Finds all models, returning a list of model number and names sorted increasing. Returns: [(13, 000013-modelname), (17, 000017-modelname), ...etc] """ all_models = gfile.Glob(os.path.join(models_dir(), '*.meta')) model_filenames = [os.path.basename(m) for m in all_models] model_numbers_names = sorted([ (shipname.detect_model_num(m), shipname.detect_model_name(m)) for m in model_filenames]) return model_numbers_names
0, module; 1, function_definition; 2, function_name:sorted_by; 3, parameters; 4, type; 5, block; 6, typed_parameter; 7, string; 8, expression_statement; 9, return_statement; 10, identifier:key; 11, type; 12, string_content:QubitOrder; 13, comment:"""A basis that orders qubits ascending based on a key function. Args: key: A function that takes a qubit and returns a key value. The basis will be ordered ascending according to these key values. Returns: A basis that orders qubits ascending based on a key function. """; 14, call; 15, generic_type; 16, identifier:QubitOrder; 17, argument_list; 18, identifier:Callable; 19, type_parameter; 20, lambda; 21, type; 22, type; 23, lambda_parameters; 24, call; 25, list; 26, identifier:Any; 27, identifier:qubits; 28, identifier:tuple; 29, argument_list; 30, attribute; 31, call; 32, identifier:raw_types; 33, identifier:Qid; 34, identifier:sorted; 35, argument_list; 36, identifier:qubits; 37, keyword_argument; 38, identifier:key; 39, identifier:key
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 6, 10; 6, 11; 7, 12; 8, 13; 9, 14; 11, 15; 14, 16; 14, 17; 15, 18; 15, 19; 17, 20; 19, 21; 19, 22; 20, 23; 20, 24; 21, 25; 22, 26; 23, 27; 24, 28; 24, 29; 25, 30; 29, 31; 30, 32; 30, 33; 31, 34; 31, 35; 35, 36; 35, 37; 37, 38; 37, 39
def sorted_by(key: Callable[[raw_types.Qid], Any]) -> 'QubitOrder': """A basis that orders qubits ascending based on a key function. Args: key: A function that takes a qubit and returns a key value. The basis will be ordered ascending according to these key values. Returns: A basis that orders qubits ascending based on a key function. """ return QubitOrder(lambda qubits: tuple(sorted(qubits, key=key)))
0, module; 1, function_definition; 2, function_name:diagonalize_real_symmetric_and_sorted_diagonal_matrices; 3, parameters; 4, type; 5, block; 6, typed_parameter; 7, typed_parameter; 8, keyword_separator; 9, typed_default_parameter; 10, typed_default_parameter; 11, typed_default_parameter; 12, attribute; 13, expression_statement; 14, comment:# Verify preconditions.; 15, if_statement; 16, function_definition; 17, comment:# Because the symmetric matrix commutes with the diagonal singulars matrix,; 18, comment:# the symmetric matrix should be block-diagonal with a block boundary; 19, comment:# wherever the singular values happen change. So we can use the singular; 20, comment:# values to extract blocks that can be independently diagonalized.; 21, expression_statement; 22, comment:# Build the overall diagonalization by diagonalizing each block.; 23, expression_statement; 24, for_statement; 25, return_statement; 26, identifier:symmetric_matrix; 27, type; 28, identifier:diagonal_matrix; 29, type; 30, identifier:rtol; 31, type; 32, float:1e-5; 33, identifier:atol; 34, type; 35, float:1e-8; 36, identifier:check_preconditions; 37, type; 38, True; 39, identifier:np; 40, identifier:ndarray; 41, comment:"""Returns an orthogonal matrix that diagonalizes both given matrices. The given matrices must commute. Guarantees that the sorted diagonal matrix is not permuted by the diagonalization (except for nearly-equal values). Args: symmetric_matrix: A real symmetric matrix. diagonal_matrix: A real diagonal matrix with entries along the diagonal sorted into descending order. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the input matrices commute and are respectively symmetric and diagonal descending. Returns: An orthogonal matrix P such that P.T @ symmetric_matrix @ P is diagonal and P.T @ diagonal_matrix @ P = diagonal_matrix (up to tolerance). Raises: ValueError: Matrices don't meet preconditions (e.g. not symmetric). """; 42, identifier:check_preconditions; 43, block; 44, function_name:similar_singular; 45, parameters; 46, block; 47, assignment; 48, assignment; 49, pattern_list; 50, identifier:ranges; 51, block; 52, identifier:p; 53, attribute; 54, attribute; 55, identifier:float; 56, identifier:float; 57, identifier:bool; 58, if_statement; 59, if_statement; 60, if_statement; 61, identifier:i; 62, identifier:j; 63, return_statement; 64, identifier:ranges; 65, call; 66, identifier:p; 67, call; 68, identifier:start; 69, identifier:end; 70, expression_statement; 71, expression_statement; 72, identifier:np; 73, identifier:ndarray; 74, identifier:np; 75, identifier:ndarray; 76, parenthesized_expression; 77, block; 78, parenthesized_expression; 79, block; 80, not_operator; 81, block; 82, call; 83, identifier:_contiguous_groups; 84, argument_list; 85, attribute; 86, argument_list; 87, assignment; 88, assignment; 89, boolean_operator; 90, raise_statement; 91, boolean_operator; 92, raise_statement; 93, call; 94, raise_statement; 95, attribute; 96, argument_list; 97, subscript; 98, identifier:similar_singular; 99, identifier:np; 100, identifier:zeros; 101, attribute; 102, keyword_argument; 103, identifier:block; 104, subscript; 105, subscript; 106, call; 107, call; 108, not_operator; 109, call; 110, boolean_operator; 111, call; 112, call; 113, attribute; 114, argument_list; 115, call; 116, identifier:np; 117, identifier:allclose; 118, subscript; 119, subscript; 120, keyword_argument; 121, attribute; 122, integer:0; 123, identifier:symmetric_matrix; 124, identifier:shape; 125, identifier:dtype; 126, attribute; 127, identifier:symmetric_matrix; 128, slice; 129, slice; 130, identifier:p; 131, slice; 132, slice; 133, identifier:diagonalize_real_symmetric_matrix; 134, argument_list; 135, attribute; 136, argument_list; 137, call; 138, identifier:ValueError; 139, argument_list; 140, not_operator; 141, call; 142, attribute; 143, argument_list; 144, identifier:ValueError; 145, argument_list; 146, identifier:predicates; 147, identifier:commutes; 148, identifier:diagonal_matrix; 149, identifier:symmetric_matrix; 150, keyword_argument; 151, keyword_argument; 152, identifier:ValueError; 153, argument_list; 154, identifier:diagonal_matrix; 155, identifier:i; 156, identifier:i; 157, identifier:diagonal_matrix; 158, identifier:j; 159, identifier:j; 160, identifier:rtol; 161, identifier:rtol; 162, identifier:diagonal_matrix; 163, identifier:shape; 164, identifier:np; 165, identifier:float64; 166, identifier:start; 167, identifier:end; 168, identifier:start; 169, identifier:end; 170, identifier:start; 171, identifier:end; 172, identifier:start; 173, identifier:end; 174, identifier:block; 175, keyword_argument; 176, keyword_argument; 177, identifier:np; 178, identifier:any; 179, call; 180, attribute; 181, argument_list; 182, string; 183, call; 184, attribute; 185, argument_list; 186, identifier:np; 187, identifier:any; 188, comparison_operator:diagonal_matrix[:-1, :-1] < diagonal_matrix[1:, 1:]; 189, string; 190, identifier:rtol; 191, identifier:rtol; 192, identifier:atol; 193, identifier:atol; 194, string; 195, identifier:rtol; 196, identifier:rtol; 197, identifier:atol; 198, identifier:atol; 199, attribute; 200, argument_list; 201, identifier:predicates; 202, identifier:is_hermitian; 203, identifier:symmetric_matrix; 204, keyword_argument; 205, keyword_argument; 206, string_content:symmetric_matrix must be real symmetric.; 207, attribute; 208, argument_list; 209, identifier:np; 210, identifier:any; 211, call; 212, subscript; 213, subscript; 214, string_content:diagonal_matrix must be real diagonal descending.; 215, string_content:Given matrices must commute.; 216, identifier:np; 217, identifier:imag; 218, identifier:symmetric_matrix; 219, identifier:rtol; 220, identifier:rtol; 221, identifier:atol; 222, identifier:atol; 223, identifier:predicates; 224, identifier:is_diagonal; 225, identifier:diagonal_matrix; 226, keyword_argument; 227, attribute; 228, argument_list; 229, identifier:diagonal_matrix; 230, slice; 231, slice; 232, identifier:diagonal_matrix; 233, slice; 234, slice; 235, identifier:atol; 236, identifier:atol; 237, identifier:np; 238, identifier:imag; 239, identifier:diagonal_matrix; 240, unary_operator; 241, unary_operator; 242, integer:1; 243, integer:1; 244, integer:1; 245, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 5, 13; 5, 14; 5, 15; 5, 16; 5, 17; 5, 18; 5, 19; 5, 20; 5, 21; 5, 22; 5, 23; 5, 24; 5, 25; 6, 26; 6, 27; 7, 28; 7, 29; 9, 30; 9, 31; 9, 32; 10, 33; 10, 34; 10, 35; 11, 36; 11, 37; 11, 38; 12, 39; 12, 40; 13, 41; 15, 42; 15, 43; 16, 44; 16, 45; 16, 46; 21, 47; 23, 48; 24, 49; 24, 50; 24, 51; 25, 52; 27, 53; 29, 54; 31, 55; 34, 56; 37, 57; 43, 58; 43, 59; 43, 60; 45, 61; 45, 62; 46, 63; 47, 64; 47, 65; 48, 66; 48, 67; 49, 68; 49, 69; 51, 70; 51, 71; 53, 72; 53, 73; 54, 74; 54, 75; 58, 76; 58, 77; 59, 78; 59, 79; 60, 80; 60, 81; 63, 82; 65, 83; 65, 84; 67, 85; 67, 86; 70, 87; 71, 88; 76, 89; 77, 90; 78, 91; 79, 92; 80, 93; 81, 94; 82, 95; 82, 96; 84, 97; 84, 98; 85, 99; 85, 100; 86, 101; 86, 102; 87, 103; 87, 104; 88, 105; 88, 106; 89, 107; 89, 108; 90, 109; 91, 110; 91, 111; 92, 112; 93, 113; 93, 114; 94, 115; 95, 116; 95, 117; 96, 118; 96, 119; 96, 120; 97, 121; 97, 122; 101, 123; 101, 124; 102, 125; 102, 126; 104, 127; 104, 128; 104, 129; 105, 130; 105, 131; 105, 132; 106, 133; 106, 134; 107, 135; 107, 136; 108, 137; 109, 138; 109, 139; 110, 140; 110, 141; 111, 142; 111, 143; 112, 144; 112, 145; 113, 146; 113, 147; 114, 148; 114, 149; 114, 150; 114, 151; 115, 152; 115, 153; 118, 154; 118, 155; 118, 156; 119, 157; 119, 158; 119, 159; 120, 160; 120, 161; 121, 162; 121, 163; 126, 164; 126, 165; 128, 166; 128, 167; 129, 168; 129, 169; 131, 170; 131, 171; 132, 172; 132, 173; 134, 174; 134, 175; 134, 176; 135, 177; 135, 178; 136, 179; 137, 180; 137, 181; 139, 182; 140, 183; 141, 184; 141, 185; 142, 186; 142, 187; 143, 188; 145, 189; 150, 190; 150, 191; 151, 192; 151, 193; 153, 194; 175, 195; 175, 196; 176, 197; 176, 198; 179, 199; 179, 200; 180, 201; 180, 202; 181, 203; 181, 204; 181, 205; 182, 206; 183, 207; 183, 208; 184, 209; 184, 210; 185, 211; 188, 212; 188, 213; 189, 214; 194, 215; 199, 216; 199, 217; 200, 218; 204, 219; 204, 220; 205, 221; 205, 222; 207, 223; 207, 224; 208, 225; 208, 226; 211, 227; 211, 228; 212, 229; 212, 230; 212, 231; 213, 232; 213, 233; 213, 234; 226, 235; 226, 236; 227, 237; 227, 238; 228, 239; 230, 240; 231, 241; 233, 242; 234, 243; 240, 244; 241, 245
def diagonalize_real_symmetric_and_sorted_diagonal_matrices( symmetric_matrix: np.ndarray, diagonal_matrix: np.ndarray, *, rtol: float = 1e-5, atol: float = 1e-8, check_preconditions: bool = True) -> np.ndarray: """Returns an orthogonal matrix that diagonalizes both given matrices. The given matrices must commute. Guarantees that the sorted diagonal matrix is not permuted by the diagonalization (except for nearly-equal values). Args: symmetric_matrix: A real symmetric matrix. diagonal_matrix: A real diagonal matrix with entries along the diagonal sorted into descending order. rtol: Relative numeric error threshold. atol: Absolute numeric error threshold. check_preconditions: If set, verifies that the input matrices commute and are respectively symmetric and diagonal descending. Returns: An orthogonal matrix P such that P.T @ symmetric_matrix @ P is diagonal and P.T @ diagonal_matrix @ P = diagonal_matrix (up to tolerance). Raises: ValueError: Matrices don't meet preconditions (e.g. not symmetric). """ # Verify preconditions. if check_preconditions: if (np.any(np.imag(symmetric_matrix)) or not predicates.is_hermitian(symmetric_matrix, rtol=rtol, atol=atol)): raise ValueError('symmetric_matrix must be real symmetric.') if (not predicates.is_diagonal(diagonal_matrix, atol=atol) or np.any(np.imag(diagonal_matrix)) or np.any(diagonal_matrix[:-1, :-1] < diagonal_matrix[1:, 1:])): raise ValueError( 'diagonal_matrix must be real diagonal descending.') if not predicates.commutes(diagonal_matrix, symmetric_matrix, rtol=rtol, atol=atol): raise ValueError('Given matrices must commute.') def similar_singular(i, j): return np.allclose(diagonal_matrix[i, i], diagonal_matrix[j, j], rtol=rtol) # Because the symmetric matrix commutes with the diagonal singulars matrix, # the symmetric matrix should be block-diagonal with a block boundary # wherever the singular values happen change. So we can use the singular # values to extract blocks that can be independently diagonalized. ranges = _contiguous_groups(diagonal_matrix.shape[0], similar_singular) # Build the overall diagonalization by diagonalizing each block. p = np.zeros(symmetric_matrix.shape, dtype=np.float64) for start, end in ranges: block = symmetric_matrix[start:end, start:end] p[start:end, start:end] = diagonalize_real_symmetric_matrix( block, rtol=rtol, atol=atol) return p
0, module; 1, function_definition; 2, function_name:findall_operations_between; 3, parameters; 4, type; 5, block; 6, identifier:self; 7, typed_parameter; 8, typed_parameter; 9, typed_default_parameter; 10, generic_type; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, comment:# Note: only sorted to ensure a deterministic result ordering.; 15, for_statement; 16, return_statement; 17, identifier:start_frontier; 18, type; 19, identifier:end_frontier; 20, type; 21, identifier:omit_crossing_operations; 22, type; 23, False; 24, identifier:List; 25, type_parameter; 26, comment:"""Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically. """; 27, assignment; 28, assignment; 29, identifier:q; 30, call; 31, block; 32, call; 33, generic_type; 34, generic_type; 35, identifier:bool; 36, type; 37, identifier:result; 38, call; 39, identifier:involved_qubits; 40, binary_operator:set(start_frontier.keys()) | set(end_frontier.keys()); 41, identifier:sorted; 42, argument_list; 43, for_statement; 44, identifier:list; 45, argument_list; 46, identifier:Dict; 47, type_parameter; 48, identifier:Dict; 49, type_parameter; 50, generic_type; 51, subscript; 52, argument_list; 53, call; 54, call; 55, identifier:involved_qubits; 56, identifier:i; 57, call; 58, block; 59, identifier:result; 60, type; 61, type; 62, type; 63, type; 64, identifier:Tuple; 65, type_parameter; 66, identifier:BucketPriorityQueue; 67, attribute; 68, keyword_argument; 69, identifier:set; 70, argument_list; 71, identifier:set; 72, argument_list; 73, identifier:range; 74, argument_list; 75, expression_statement; 76, if_statement; 77, if_statement; 78, expression_statement; 79, attribute; 80, identifier:int; 81, attribute; 82, identifier:int; 83, type; 84, type; 85, identifier:ops; 86, identifier:Operation; 87, identifier:drop_duplicate_entries; 88, True; 89, call; 90, call; 91, call; 92, call; 93, assignment; 94, comparison_operator:op is None; 95, block; 96, parenthesized_expression; 97, block; 98, call; 99, identifier:ops; 100, identifier:Qid; 101, identifier:ops; 102, identifier:Qid; 103, identifier:int; 104, attribute; 105, attribute; 106, argument_list; 107, attribute; 108, argument_list; 109, attribute; 110, argument_list; 111, attribute; 112, argument_list; 113, identifier:op; 114, call; 115, identifier:op; 116, None; 117, continue_statement; 118, boolean_operator; 119, continue_statement; 120, attribute; 121, argument_list; 122, identifier:ops; 123, identifier:Operation; 124, identifier:start_frontier; 125, identifier:keys; 126, identifier:end_frontier; 127, identifier:keys; 128, identifier:start_frontier; 129, identifier:get; 130, identifier:q; 131, integer:0; 132, identifier:end_frontier; 133, identifier:get; 134, identifier:q; 135, call; 136, attribute; 137, argument_list; 138, identifier:omit_crossing_operations; 139, not_operator; 140, identifier:result; 141, identifier:enqueue; 142, identifier:i; 143, identifier:op; 144, identifier:len; 145, argument_list; 146, identifier:self; 147, identifier:operation_at; 148, identifier:q; 149, identifier:i; 150, call; 151, identifier:self; 152, attribute; 153, argument_list; 154, identifier:involved_qubits; 155, identifier:issuperset; 156, attribute; 157, identifier:op; 158, identifier:qubits
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 5, 11; 5, 12; 5, 13; 5, 14; 5, 15; 5, 16; 7, 17; 7, 18; 8, 19; 8, 20; 9, 21; 9, 22; 9, 23; 10, 24; 10, 25; 11, 26; 12, 27; 13, 28; 15, 29; 15, 30; 15, 31; 16, 32; 18, 33; 20, 34; 22, 35; 25, 36; 27, 37; 27, 38; 28, 39; 28, 40; 30, 41; 30, 42; 31, 43; 32, 44; 32, 45; 33, 46; 33, 47; 34, 48; 34, 49; 36, 50; 38, 51; 38, 52; 40, 53; 40, 54; 42, 55; 43, 56; 43, 57; 43, 58; 45, 59; 47, 60; 47, 61; 49, 62; 49, 63; 50, 64; 50, 65; 51, 66; 51, 67; 52, 68; 53, 69; 53, 70; 54, 71; 54, 72; 57, 73; 57, 74; 58, 75; 58, 76; 58, 77; 58, 78; 60, 79; 61, 80; 62, 81; 63, 82; 65, 83; 65, 84; 67, 85; 67, 86; 68, 87; 68, 88; 70, 89; 72, 90; 74, 91; 74, 92; 75, 93; 76, 94; 76, 95; 77, 96; 77, 97; 78, 98; 79, 99; 79, 100; 81, 101; 81, 102; 83, 103; 84, 104; 89, 105; 89, 106; 90, 107; 90, 108; 91, 109; 91, 110; 92, 111; 92, 112; 93, 113; 93, 114; 94, 115; 94, 116; 95, 117; 96, 118; 97, 119; 98, 120; 98, 121; 104, 122; 104, 123; 105, 124; 105, 125; 107, 126; 107, 127; 109, 128; 109, 129; 110, 130; 110, 131; 111, 132; 111, 133; 112, 134; 112, 135; 114, 136; 114, 137; 118, 138; 118, 139; 120, 140; 120, 141; 121, 142; 121, 143; 135, 144; 135, 145; 136, 146; 136, 147; 137, 148; 137, 149; 139, 150; 145, 151; 150, 152; 150, 153; 152, 154; 152, 155; 153, 156; 156, 157; 156, 158
def findall_operations_between(self, start_frontier: Dict[ops.Qid, int], end_frontier: Dict[ops.Qid, int], omit_crossing_operations: bool = False ) -> List[Tuple[int, ops.Operation]]: """Finds operations between the two given frontiers. If a qubit is in `start_frontier` but not `end_frontier`, its end index defaults to the end of the circuit. If a qubit is in `end_frontier` but not `start_frontier`, its start index defaults to the start of the circuit. Operations on qubits not mentioned in either frontier are not included in the results. Args: start_frontier: Just before where to start searching for operations, for each qubit of interest. Start frontier indices are inclusive. end_frontier: Just before where to stop searching for operations, for each qubit of interest. End frontier indices are exclusive. omit_crossing_operations: Determines whether or not operations that cross from a location between the two frontiers to a location outside the two frontiers are included or excluded. (Operations completely inside are always included, and operations completely outside are always excluded.) Returns: A list of tuples. Each tuple describes an operation found between the two frontiers. The first item of each tuple is the index of the moment containing the operation, and the second item is the operation itself. The list is sorted so that the moment index increases monotonically. """ result = BucketPriorityQueue[ops.Operation]( drop_duplicate_entries=True) involved_qubits = set(start_frontier.keys()) | set(end_frontier.keys()) # Note: only sorted to ensure a deterministic result ordering. for q in sorted(involved_qubits): for i in range(start_frontier.get(q, 0), end_frontier.get(q, len(self))): op = self.operation_at(q, i) if op is None: continue if (omit_crossing_operations and not involved_qubits.issuperset(op.qubits)): continue result.enqueue(i, op) return list(result)
0, module; 1, function_definition; 2, function_name:_GetUnsortedNotifications; 3, parameters; 4, block; 5, identifier:self; 6, identifier:queue_shard; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, expression_statement; 11, for_statement; 12, return_statement; 13, identifier:notifications_by_session_id; 14, None; 15, comment:"""Returns all the available notifications for a queue_shard. Args: queue_shard: urn of queue shard notifications_by_session_id: store notifications in this dict rather than creating a new one Returns: dict of notifications. keys are session ids. """; 16, comparison_operator:notifications_by_session_id is None; 17, block; 18, assignment; 19, identifier:notification; 20, call; 21, block; 22, identifier:notifications_by_session_id; 23, identifier:notifications_by_session_id; 24, None; 25, expression_statement; 26, identifier:end_time; 27, boolean_operator; 28, attribute; 29, argument_list; 30, expression_statement; 31, if_statement; 32, assignment; 33, attribute; 34, call; 35, attribute; 36, identifier:GetNotifications; 37, identifier:queue_shard; 38, identifier:end_time; 39, assignment; 40, identifier:existing; 41, comment:# If we have a notification for this session_id already, we only store; 42, comment:# the one that was scheduled last.; 43, block; 44, else_clause; 45, identifier:notifications_by_session_id; 46, dictionary; 47, identifier:self; 48, identifier:frozen_timestamp; 49, attribute; 50, argument_list; 51, identifier:self; 52, identifier:data_store; 53, identifier:existing; 54, call; 55, if_statement; 56, block; 57, attribute; 58, identifier:Now; 59, attribute; 60, argument_list; 61, comparison_operator:notification.first_queued > existing.first_queued; 62, block; 63, elif_clause; 64, expression_statement; 65, identifier:rdfvalue; 66, identifier:RDFDatetime; 67, identifier:notifications_by_session_id; 68, identifier:get; 69, attribute; 70, attribute; 71, attribute; 72, expression_statement; 73, boolean_operator; 74, comment:# Multiple notifications with the same timestamp should not happen.; 75, comment:# We can still do the correct thing and use the latest one.; 76, block; 77, assignment; 78, identifier:notification; 79, identifier:session_id; 80, identifier:notification; 81, identifier:first_queued; 82, identifier:existing; 83, identifier:first_queued; 84, assignment; 85, comparison_operator:notification.first_queued == existing.first_queued; 86, parenthesized_expression; 87, expression_statement; 88, expression_statement; 89, subscript; 90, identifier:notification; 91, subscript; 92, identifier:notification; 93, attribute; 94, attribute; 95, comparison_operator:notification.last_status > existing.last_status; 96, call; 97, assignment; 98, identifier:notifications_by_session_id; 99, attribute; 100, identifier:notifications_by_session_id; 101, attribute; 102, identifier:notification; 103, identifier:first_queued; 104, identifier:existing; 105, identifier:first_queued; 106, attribute; 107, attribute; 108, attribute; 109, argument_list; 110, subscript; 111, identifier:notification; 112, identifier:notification; 113, identifier:session_id; 114, identifier:notification; 115, identifier:session_id; 116, identifier:notification; 117, identifier:last_status; 118, identifier:existing; 119, identifier:last_status; 120, identifier:logging; 121, identifier:warning; 122, string:"Notifications with equal first_queued fields detected: %s %s"; 123, identifier:notification; 124, identifier:existing; 125, identifier:notifications_by_session_id; 126, attribute; 127, identifier:notification; 128, identifier:session_id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 9, 17; 10, 18; 11, 19; 11, 20; 11, 21; 12, 22; 16, 23; 16, 24; 17, 25; 18, 26; 18, 27; 20, 28; 20, 29; 21, 30; 21, 31; 25, 32; 27, 33; 27, 34; 28, 35; 28, 36; 29, 37; 29, 38; 30, 39; 31, 40; 31, 41; 31, 42; 31, 43; 31, 44; 32, 45; 32, 46; 33, 47; 33, 48; 34, 49; 34, 50; 35, 51; 35, 52; 39, 53; 39, 54; 43, 55; 44, 56; 49, 57; 49, 58; 54, 59; 54, 60; 55, 61; 55, 62; 55, 63; 56, 64; 57, 65; 57, 66; 59, 67; 59, 68; 60, 69; 61, 70; 61, 71; 62, 72; 63, 73; 63, 74; 63, 75; 63, 76; 64, 77; 69, 78; 69, 79; 70, 80; 70, 81; 71, 82; 71, 83; 72, 84; 73, 85; 73, 86; 76, 87; 76, 88; 77, 89; 77, 90; 84, 91; 84, 92; 85, 93; 85, 94; 86, 95; 87, 96; 88, 97; 89, 98; 89, 99; 91, 100; 91, 101; 93, 102; 93, 103; 94, 104; 94, 105; 95, 106; 95, 107; 96, 108; 96, 109; 97, 110; 97, 111; 99, 112; 99, 113; 101, 114; 101, 115; 106, 116; 106, 117; 107, 118; 107, 119; 108, 120; 108, 121; 109, 122; 109, 123; 109, 124; 110, 125; 110, 126; 126, 127; 126, 128
def _GetUnsortedNotifications(self, queue_shard, notifications_by_session_id=None): """Returns all the available notifications for a queue_shard. Args: queue_shard: urn of queue shard notifications_by_session_id: store notifications in this dict rather than creating a new one Returns: dict of notifications. keys are session ids. """ if notifications_by_session_id is None: notifications_by_session_id = {} end_time = self.frozen_timestamp or rdfvalue.RDFDatetime.Now() for notification in self.data_store.GetNotifications(queue_shard, end_time): existing = notifications_by_session_id.get(notification.session_id) if existing: # If we have a notification for this session_id already, we only store # the one that was scheduled last. if notification.first_queued > existing.first_queued: notifications_by_session_id[notification.session_id] = notification elif notification.first_queued == existing.first_queued and ( notification.last_status > existing.last_status): # Multiple notifications with the same timestamp should not happen. # We can still do the correct thing and use the latest one. logging.warning( "Notifications with equal first_queued fields detected: %s %s", notification, existing) notifications_by_session_id[notification.session_id] = notification else: notifications_by_session_id[notification.session_id] = notification return notifications_by_session_id
0, module; 1, function_definition; 2, function_name:Dump; 3, parameters; 4, block; 5, identifier:obj; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, comment:# Python 2 json.dumps expects separators as a tuple of bytes, while; 10, comment:# Python 3 expects them to be a tuple of unicode strings. Pytype; 11, comment:# is too dumb to infer the result of the if statement that sets; 12, comment:# _SEPARATORS and complains when running in Python 3 mode.; 13, expression_statement; 14, comment:# pytype: disable=wrong-arg-types; 15, comment:# `text` is an instance of `bytes` if the object to serialize does not contain; 16, comment:# any unicode characters, otherwise it is `unicode`. See [1] for details.; 17, comment:#; 18, comment:# [1]: https://bugs.python.org/issue13769; 19, if_statement; 20, return_statement; 21, identifier:sort_keys; 22, False; 23, identifier:encoder; 24, None; 25, comment:"""Stringifies a Python object into its JSON representation. Args: obj: A Python object to convert to JSON. sort_keys: If True, output dictionaries keys in sorted (ascending) order. encoder: An (optional) encoder class to use. Returns: A JSON representation of the given object. """; 26, assignment; 27, boolean_operator; 28, block; 29, identifier:text; 30, identifier:text; 31, call; 32, attribute; 33, call; 34, expression_statement; 35, comment:# pytype: disable=attribute-error; 36, attribute; 37, argument_list; 38, identifier:compatibility; 39, identifier:PY2; 40, identifier:isinstance; 41, argument_list; 42, assignment; 43, identifier:json; 44, identifier:dumps; 45, identifier:obj; 46, keyword_argument; 47, keyword_argument; 48, keyword_argument; 49, keyword_argument; 50, keyword_argument; 51, identifier:text; 52, identifier:bytes; 53, identifier:text; 54, call; 55, identifier:indent; 56, integer:2; 57, identifier:sort_keys; 58, identifier:sort_keys; 59, identifier:ensure_ascii; 60, False; 61, identifier:cls; 62, identifier:encoder; 63, identifier:separators; 64, identifier:_SEPARATORS; 65, attribute; 66, argument_list; 67, identifier:text; 68, identifier:decode; 69, string:"utf-8"
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 6, 21; 6, 22; 7, 23; 7, 24; 8, 25; 13, 26; 19, 27; 19, 28; 20, 29; 26, 30; 26, 31; 27, 32; 27, 33; 28, 34; 28, 35; 31, 36; 31, 37; 32, 38; 32, 39; 33, 40; 33, 41; 34, 42; 36, 43; 36, 44; 37, 45; 37, 46; 37, 47; 37, 48; 37, 49; 37, 50; 41, 51; 41, 52; 42, 53; 42, 54; 46, 55; 46, 56; 47, 57; 47, 58; 48, 59; 48, 60; 49, 61; 49, 62; 50, 63; 50, 64; 54, 65; 54, 66; 65, 67; 65, 68; 66, 69
def Dump(obj, sort_keys = False, encoder = None): """Stringifies a Python object into its JSON representation. Args: obj: A Python object to convert to JSON. sort_keys: If True, output dictionaries keys in sorted (ascending) order. encoder: An (optional) encoder class to use. Returns: A JSON representation of the given object. """ # Python 2 json.dumps expects separators as a tuple of bytes, while # Python 3 expects them to be a tuple of unicode strings. Pytype # is too dumb to infer the result of the if statement that sets # _SEPARATORS and complains when running in Python 3 mode. text = json.dumps( obj, indent=2, sort_keys=sort_keys, ensure_ascii=False, cls=encoder, separators=_SEPARATORS) # pytype: disable=wrong-arg-types # `text` is an instance of `bytes` if the object to serialize does not contain # any unicode characters, otherwise it is `unicode`. See [1] for details. # # [1]: https://bugs.python.org/issue13769 if compatibility.PY2 and isinstance(text, bytes): text = text.decode("utf-8") # pytype: disable=attribute-error return text
0, module; 1, function_definition; 2, function_name:NamedPlaceholders; 3, parameters; 4, block; 5, identifier:iterable; 6, expression_statement; 7, expression_statement; 8, return_statement; 9, comment:"""Returns named placeholders from all elements of the given iterable. Use this function for VALUES of MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> NamedPlaceholders({"password": "foo", "name": "bar"}) u'(%(name)s, %(password)s)' Args: iterable: The iterable of strings to be used as placeholder keys. Returns: A string containing a tuple of comma-separated, sorted, named, placeholders. """; 10, assignment; 11, call; 12, identifier:placeholders; 13, call; 14, attribute; 15, argument_list; 16, attribute; 17, generator_expression; 18, string:"({})"; 19, identifier:format; 20, identifier:placeholders; 21, string:", "; 22, identifier:join; 23, call; 24, for_in_clause; 25, attribute; 26, argument_list; 27, identifier:key; 28, call; 29, string:"%({})s"; 30, identifier:format; 31, identifier:key; 32, identifier:sorted; 33, argument_list; 34, identifier:iterable
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 8, 11; 10, 12; 10, 13; 11, 14; 11, 15; 13, 16; 13, 17; 14, 18; 14, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 23, 25; 23, 26; 24, 27; 24, 28; 25, 29; 25, 30; 26, 31; 28, 32; 28, 33; 33, 34
def NamedPlaceholders(iterable): """Returns named placeholders from all elements of the given iterable. Use this function for VALUES of MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> NamedPlaceholders({"password": "foo", "name": "bar"}) u'(%(name)s, %(password)s)' Args: iterable: The iterable of strings to be used as placeholder keys. Returns: A string containing a tuple of comma-separated, sorted, named, placeholders. """ placeholders = ", ".join("%({})s".format(key) for key in sorted(iterable)) return "({})".format(placeholders)
0, module; 1, function_definition; 2, function_name:Columns; 3, parameters; 4, block; 5, identifier:iterable; 6, expression_statement; 7, expression_statement; 8, return_statement; 9, comment:"""Returns a string of column names for MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> Columns({"password": "foo", "name": "bar"}) u'(`name`, `password`)' Args: iterable: The iterable of strings to be used as column names. Returns: A string containing a tuple of sorted comma-separated column names. """; 10, assignment; 11, call; 12, identifier:columns; 13, call; 14, attribute; 15, argument_list; 16, identifier:sorted; 17, argument_list; 18, string:"({})"; 19, identifier:format; 20, call; 21, identifier:iterable; 22, attribute; 23, generator_expression; 24, string:", "; 25, identifier:join; 26, call; 27, for_in_clause; 28, attribute; 29, argument_list; 30, identifier:col; 31, identifier:columns; 32, string:"`{}`"; 33, identifier:format; 34, identifier:col
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 8, 11; 10, 12; 10, 13; 11, 14; 11, 15; 13, 16; 13, 17; 14, 18; 14, 19; 15, 20; 17, 21; 20, 22; 20, 23; 22, 24; 22, 25; 23, 26; 23, 27; 26, 28; 26, 29; 27, 30; 27, 31; 28, 32; 28, 33; 29, 34
def Columns(iterable): """Returns a string of column names for MySQL INSERTs. To account for Iterables with undefined order (dicts before Python 3.6), this function sorts column names. Examples: >>> Columns({"password": "foo", "name": "bar"}) u'(`name`, `password`)' Args: iterable: The iterable of strings to be used as column names. Returns: A string containing a tuple of sorted comma-separated column names. """ columns = sorted(iterable) return "({})".format(", ".join("`{}`".format(col) for col in columns))
0, module; 1, function_definition; 2, function_name:GetArtifactsForCollection; 3, parameters; 4, block; 5, identifier:os_name; 6, identifier:artifact_list; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved. """; 12, assignment; 13, assignment; 14, identifier:artifact_names; 15, identifier:artifact_arranger; 16, call; 17, identifier:artifact_names; 18, call; 19, identifier:ArtifactArranger; 20, argument_list; 21, attribute; 22, argument_list; 23, identifier:os_name; 24, identifier:artifact_list; 25, identifier:artifact_arranger; 26, identifier:GetArtifactsInProperOrder
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 16, 19; 16, 20; 18, 21; 18, 22; 20, 23; 20, 24; 21, 25; 21, 26
def GetArtifactsForCollection(os_name, artifact_list): """Wrapper for the ArtifactArranger. Extend the artifact list by dependencies and sort the artifacts to resolve the dependencies. Args: os_name: String specifying the OS name. artifact_list: List of requested artifact names. Returns: A list of artifacts such that if they are collected in the given order their dependencies are resolved. """ artifact_arranger = ArtifactArranger(os_name, artifact_list) artifact_names = artifact_arranger.GetArtifactsInProperOrder() return artifact_names
0, module; 1, function_definition; 2, function_name:_FilterOutPathInfoDuplicates; 3, parameters; 4, block; 5, identifier:path_infos; 6, expression_statement; 7, expression_statement; 8, for_statement; 9, function_definition; 10, for_statement; 11, return_statement; 12, comment:"""Filters out duplicates from passed PathInfo objects. Args: path_infos: An iterable with PathInfo objects. Returns: A list of PathInfo objects with duplicates removed. Duplicates are removed following this logic: they're sorted by (ctime, mtime, atime, inode number) in the descending order and then the first one is taken and the others are dropped. """; 13, assignment; 14, identifier:pi; 15, identifier:path_infos; 16, block; 17, function_name:_SortKey; 18, parameters; 19, block; 20, identifier:pi_values; 21, call; 22, block; 23, list_comprehension; 24, identifier:pi_dict; 25, dictionary; 26, expression_statement; 27, expression_statement; 28, identifier:pi; 29, return_statement; 30, attribute; 31, argument_list; 32, if_statement; 33, subscript; 34, for_in_clause; 35, assignment; 36, call; 37, tuple; 38, identifier:pi_dict; 39, identifier:values; 40, comparison_operator:len(pi_values) > 1; 41, block; 42, identifier:v; 43, integer:0; 44, identifier:v; 45, call; 46, identifier:path_key; 47, tuple; 48, attribute; 49, argument_list; 50, attribute; 51, attribute; 52, attribute; 53, attribute; 54, call; 55, integer:1; 56, expression_statement; 57, attribute; 58, argument_list; 59, attribute; 60, call; 61, call; 62, identifier:append; 63, identifier:pi; 64, attribute; 65, identifier:st_ctime; 66, attribute; 67, identifier:st_mtime; 68, attribute; 69, identifier:st_atime; 70, attribute; 71, identifier:st_ino; 72, identifier:len; 73, argument_list; 74, call; 75, identifier:pi_dict; 76, identifier:values; 77, identifier:pi; 78, identifier:path_type; 79, attribute; 80, argument_list; 81, attribute; 82, argument_list; 83, identifier:pi; 84, identifier:stat_entry; 85, identifier:pi; 86, identifier:stat_entry; 87, identifier:pi; 88, identifier:stat_entry; 89, identifier:pi; 90, identifier:stat_entry; 91, identifier:pi_values; 92, attribute; 93, argument_list; 94, identifier:pi; 95, identifier:GetPathID; 96, identifier:pi_dict; 97, identifier:setdefault; 98, identifier:path_key; 99, list; 100, identifier:pi_values; 101, identifier:sort; 102, keyword_argument; 103, keyword_argument; 104, identifier:key; 105, identifier:_SortKey; 106, identifier:reverse; 107, True
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 8, 14; 8, 15; 8, 16; 9, 17; 9, 18; 9, 19; 10, 20; 10, 21; 10, 22; 11, 23; 13, 24; 13, 25; 16, 26; 16, 27; 18, 28; 19, 29; 21, 30; 21, 31; 22, 32; 23, 33; 23, 34; 26, 35; 27, 36; 29, 37; 30, 38; 30, 39; 32, 40; 32, 41; 33, 42; 33, 43; 34, 44; 34, 45; 35, 46; 35, 47; 36, 48; 36, 49; 37, 50; 37, 51; 37, 52; 37, 53; 40, 54; 40, 55; 41, 56; 45, 57; 45, 58; 47, 59; 47, 60; 48, 61; 48, 62; 49, 63; 50, 64; 50, 65; 51, 66; 51, 67; 52, 68; 52, 69; 53, 70; 53, 71; 54, 72; 54, 73; 56, 74; 57, 75; 57, 76; 59, 77; 59, 78; 60, 79; 60, 80; 61, 81; 61, 82; 64, 83; 64, 84; 66, 85; 66, 86; 68, 87; 68, 88; 70, 89; 70, 90; 73, 91; 74, 92; 74, 93; 79, 94; 79, 95; 81, 96; 81, 97; 82, 98; 82, 99; 92, 100; 92, 101; 93, 102; 93, 103; 102, 104; 102, 105; 103, 106; 103, 107
def _FilterOutPathInfoDuplicates(path_infos): """Filters out duplicates from passed PathInfo objects. Args: path_infos: An iterable with PathInfo objects. Returns: A list of PathInfo objects with duplicates removed. Duplicates are removed following this logic: they're sorted by (ctime, mtime, atime, inode number) in the descending order and then the first one is taken and the others are dropped. """ pi_dict = {} for pi in path_infos: path_key = (pi.path_type, pi.GetPathID()) pi_dict.setdefault(path_key, []).append(pi) def _SortKey(pi): return ( pi.stat_entry.st_ctime, pi.stat_entry.st_mtime, pi.stat_entry.st_atime, pi.stat_entry.st_ino, ) for pi_values in pi_dict.values(): if len(pi_values) > 1: pi_values.sort(key=_SortKey, reverse=True) return [v[0] for v in pi_dict.values()]
0, module; 1, function_definition; 2, function_name:DrainTaskSchedulerQueueForClient; 3, parameters; 4, block; 5, identifier:self; 6, identifier:client; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, if_statement; 11, expression_statement; 12, expression_statement; 13, comment:# Drain the queue for this client; 14, if_statement; 15, expression_statement; 16, if_statement; 17, return_statement; 18, identifier:max_count; 19, None; 20, comment:"""Drains the client's Task Scheduler queue. 1) Get all messages in the client queue. 2) Sort these into a set of session_ids. 3) Use data_store.DB.ResolvePrefix() to query all requests. 4) Delete all responses for retransmitted messages (if needed). Args: client: The ClientURN object specifying this client. max_count: The maximum number of messages we will issue for the client. If not given, uses self.max_queue_size . Returns: The tasks respresenting the messages returned. If we can not send them, we can reschedule them for later. """; 21, comparison_operator:max_count is None; 22, block; 23, comparison_operator:max_count <= 0; 24, block; 25, assignment; 26, assignment; 27, call; 28, block; 29, else_clause; 30, call; 31, identifier:result; 32, block; 33, identifier:result; 34, identifier:max_count; 35, None; 36, expression_statement; 37, identifier:max_count; 38, integer:0; 39, return_statement; 40, identifier:client; 41, call; 42, identifier:start_time; 43, call; 44, attribute; 45, argument_list; 46, expression_statement; 47, expression_statement; 48, block; 49, attribute; 50, argument_list; 51, expression_statement; 52, assignment; 53, list; 54, attribute; 55, argument_list; 56, attribute; 57, argument_list; 58, identifier:data_store; 59, identifier:RelationalDBEnabled; 60, assignment; 61, assignment; 62, expression_statement; 63, expression_statement; 64, expression_statement; 65, expression_statement; 66, for_statement; 67, if_statement; 68, call; 69, identifier:IncrementCounter; 70, string:"grr_messages_sent"; 71, call; 72, call; 73, identifier:max_count; 74, attribute; 75, identifier:rdf_client; 76, identifier:ClientURN; 77, identifier:client; 78, identifier:time; 79, identifier:time; 80, identifier:action_requests; 81, call; 82, identifier:result; 83, list_comprehension; 84, assignment; 85, assignment; 86, assignment; 87, assignment; 88, identifier:task; 89, identifier:new_tasks; 90, block; 91, identifier:check_before_sending; 92, block; 93, attribute; 94, argument_list; 95, identifier:len; 96, argument_list; 97, attribute; 98, argument_list; 99, identifier:self; 100, identifier:max_queue_size; 101, attribute; 102, argument_list; 103, call; 104, for_in_clause; 105, identifier:new_tasks; 106, call; 107, identifier:initial_ttl; 108, attribute; 109, identifier:check_before_sending; 110, list; 111, identifier:result; 112, list; 113, if_statement; 114, with_statement; 115, identifier:stats_collector_instance; 116, identifier:Get; 117, identifier:result; 118, identifier:logging; 119, identifier:debug; 120, string:"Drained %d messages for %s in %s seconds."; 121, call; 122, identifier:client; 123, binary_operator:time.time() - start_time; 124, attribute; 125, identifier:LeaseClientActionRequests; 126, call; 127, keyword_argument; 128, keyword_argument; 129, attribute; 130, argument_list; 131, identifier:r; 132, identifier:action_requests; 133, attribute; 134, argument_list; 135, call; 136, identifier:task_ttl; 137, comparison_operator:task.task_ttl < initial_ttl - 1; 138, comment:# This message has been leased before.; 139, block; 140, else_clause; 141, with_clause; 142, block; 143, identifier:len; 144, argument_list; 145, call; 146, identifier:start_time; 147, identifier:data_store; 148, identifier:REL_DB; 149, attribute; 150, argument_list; 151, identifier:lease_time; 152, call; 153, identifier:limit; 154, identifier:max_count; 155, identifier:rdf_flow_objects; 156, identifier:GRRMessageFromClientActionRequest; 157, identifier:r; 158, call; 159, identifier:QueryAndOwn; 160, keyword_argument; 161, keyword_argument; 162, keyword_argument; 163, attribute; 164, argument_list; 165, attribute; 166, binary_operator:initial_ttl - 1; 167, expression_statement; 168, block; 169, with_item; 170, expression_statement; 171, comment:# All messages that don't have a status yet should be sent again.; 172, for_statement; 173, identifier:result; 174, attribute; 175, argument_list; 176, identifier:client; 177, identifier:Basename; 178, attribute; 179, argument_list; 180, attribute; 181, argument_list; 182, identifier:queue; 183, call; 184, identifier:limit; 185, identifier:max_count; 186, identifier:lease_seconds; 187, attribute; 188, identifier:rdf_flows; 189, identifier:GrrMessage; 190, identifier:task; 191, identifier:task_ttl; 192, identifier:initial_ttl; 193, integer:1; 194, call; 195, expression_statement; 196, as_pattern; 197, assignment; 198, identifier:task; 199, identifier:check_before_sending; 200, block; 201, identifier:time; 202, identifier:time; 203, attribute; 204, identifier:FromSeconds; 205, attribute; 206, identifier:queue_manager; 207, identifier:QueueManager; 208, keyword_argument; 209, attribute; 210, argument_list; 211, identifier:self; 212, identifier:message_expiry_time; 213, attribute; 214, argument_list; 215, call; 216, call; 217, as_pattern_target; 218, identifier:status_found; 219, call; 220, if_statement; 221, identifier:rdfvalue; 222, identifier:Duration; 223, identifier:self; 224, identifier:message_expiry_time; 225, identifier:token; 226, attribute; 227, identifier:client; 228, identifier:Queue; 229, identifier:check_before_sending; 230, identifier:append; 231, identifier:task; 232, attribute; 233, argument_list; 234, attribute; 235, argument_list; 236, identifier:manager; 237, attribute; 238, argument_list; 239, comparison_operator:task not in status_found; 240, block; 241, else_clause; 242, identifier:self; 243, identifier:token; 244, identifier:result; 245, identifier:append; 246, identifier:task; 247, identifier:queue_manager; 248, identifier:QueueManager; 249, keyword_argument; 250, identifier:manager; 251, identifier:MultiCheckStatus; 252, identifier:check_before_sending; 253, identifier:task; 254, identifier:status_found; 255, expression_statement; 256, block; 257, identifier:token; 258, attribute; 259, call; 260, expression_statement; 261, identifier:self; 262, identifier:token; 263, attribute; 264, argument_list; 265, call; 266, identifier:result; 267, identifier:append; 268, identifier:task; 269, attribute; 270, argument_list; 271, identifier:manager; 272, identifier:DeQueueClientRequest; 273, identifier:task
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 7, 18; 7, 19; 8, 20; 9, 21; 9, 22; 10, 23; 10, 24; 11, 25; 12, 26; 14, 27; 14, 28; 14, 29; 15, 30; 16, 31; 16, 32; 17, 33; 21, 34; 21, 35; 22, 36; 23, 37; 23, 38; 24, 39; 25, 40; 25, 41; 26, 42; 26, 43; 27, 44; 27, 45; 28, 46; 28, 47; 29, 48; 30, 49; 30, 50; 32, 51; 36, 52; 39, 53; 41, 54; 41, 55; 43, 56; 43, 57; 44, 58; 44, 59; 46, 60; 47, 61; 48, 62; 48, 63; 48, 64; 48, 65; 48, 66; 48, 67; 49, 68; 49, 69; 50, 70; 50, 71; 51, 72; 52, 73; 52, 74; 54, 75; 54, 76; 55, 77; 56, 78; 56, 79; 60, 80; 60, 81; 61, 82; 61, 83; 62, 84; 63, 85; 64, 86; 65, 87; 66, 88; 66, 89; 66, 90; 67, 91; 67, 92; 68, 93; 68, 94; 71, 95; 71, 96; 72, 97; 72, 98; 74, 99; 74, 100; 81, 101; 81, 102; 83, 103; 83, 104; 84, 105; 84, 106; 85, 107; 85, 108; 86, 109; 86, 110; 87, 111; 87, 112; 90, 113; 92, 114; 93, 115; 93, 116; 96, 117; 97, 118; 97, 119; 98, 120; 98, 121; 98, 122; 98, 123; 101, 124; 101, 125; 102, 126; 102, 127; 102, 128; 103, 129; 103, 130; 104, 131; 104, 132; 106, 133; 106, 134; 108, 135; 108, 136; 113, 137; 113, 138; 113, 139; 113, 140; 114, 141; 114, 142; 121, 143; 121, 144; 123, 145; 123, 146; 124, 147; 124, 148; 126, 149; 126, 150; 127, 151; 127, 152; 128, 153; 128, 154; 129, 155; 129, 156; 130, 157; 133, 158; 133, 159; 134, 160; 134, 161; 134, 162; 135, 163; 135, 164; 137, 165; 137, 166; 139, 167; 140, 168; 141, 169; 142, 170; 142, 171; 142, 172; 144, 173; 145, 174; 145, 175; 149, 176; 149, 177; 152, 178; 152, 179; 158, 180; 158, 181; 160, 182; 160, 183; 161, 184; 161, 185; 162, 186; 162, 187; 163, 188; 163, 189; 165, 190; 165, 191; 166, 192; 166, 193; 167, 194; 168, 195; 169, 196; 170, 197; 172, 198; 172, 199; 172, 200; 174, 201; 174, 202; 178, 203; 178, 204; 179, 205; 180, 206; 180, 207; 181, 208; 183, 209; 183, 210; 187, 211; 187, 212; 194, 213; 194, 214; 195, 215; 196, 216; 196, 217; 197, 218; 197, 219; 200, 220; 203, 221; 203, 222; 205, 223; 205, 224; 208, 225; 208, 226; 209, 227; 209, 228; 213, 229; 213, 230; 214, 231; 215, 232; 215, 233; 216, 234; 216, 235; 217, 236; 219, 237; 219, 238; 220, 239; 220, 240; 220, 241; 226, 242; 226, 243; 232, 244; 232, 245; 233, 246; 234, 247; 234, 248; 235, 249; 237, 250; 237, 251; 238, 252; 239, 253; 239, 254; 240, 255; 241, 256; 249, 257; 249, 258; 255, 259; 256, 260; 258, 261; 258, 262; 259, 263; 259, 264; 260, 265; 263, 266; 263, 267; 264, 268; 265, 269; 265, 270; 269, 271; 269, 272; 270, 273
def DrainTaskSchedulerQueueForClient(self, client, max_count=None): """Drains the client's Task Scheduler queue. 1) Get all messages in the client queue. 2) Sort these into a set of session_ids. 3) Use data_store.DB.ResolvePrefix() to query all requests. 4) Delete all responses for retransmitted messages (if needed). Args: client: The ClientURN object specifying this client. max_count: The maximum number of messages we will issue for the client. If not given, uses self.max_queue_size . Returns: The tasks respresenting the messages returned. If we can not send them, we can reschedule them for later. """ if max_count is None: max_count = self.max_queue_size if max_count <= 0: return [] client = rdf_client.ClientURN(client) start_time = time.time() # Drain the queue for this client if data_store.RelationalDBEnabled(): action_requests = data_store.REL_DB.LeaseClientActionRequests( client.Basename(), lease_time=rdfvalue.Duration.FromSeconds(self.message_expiry_time), limit=max_count) result = [ rdf_flow_objects.GRRMessageFromClientActionRequest(r) for r in action_requests ] else: new_tasks = queue_manager.QueueManager(token=self.token).QueryAndOwn( queue=client.Queue(), limit=max_count, lease_seconds=self.message_expiry_time) initial_ttl = rdf_flows.GrrMessage().task_ttl check_before_sending = [] result = [] for task in new_tasks: if task.task_ttl < initial_ttl - 1: # This message has been leased before. check_before_sending.append(task) else: result.append(task) if check_before_sending: with queue_manager.QueueManager(token=self.token) as manager: status_found = manager.MultiCheckStatus(check_before_sending) # All messages that don't have a status yet should be sent again. for task in check_before_sending: if task not in status_found: result.append(task) else: manager.DeQueueClientRequest(task) stats_collector_instance.Get().IncrementCounter("grr_messages_sent", len(result)) if result: logging.debug("Drained %d messages for %s in %s seconds.", len(result), client, time.time() - start_time) return result
0, module; 1, function_definition; 2, function_name:federated_query; 3, parameters; 4, block; 5, identifier:self; 6, identifier:environment_id; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, default_parameter; 19, default_parameter; 20, default_parameter; 21, default_parameter; 22, default_parameter; 23, default_parameter; 24, default_parameter; 25, default_parameter; 26, default_parameter; 27, default_parameter; 28, dictionary_splat_pattern; 29, expression_statement; 30, if_statement; 31, expression_statement; 32, if_statement; 33, expression_statement; 34, expression_statement; 35, expression_statement; 36, expression_statement; 37, expression_statement; 38, expression_statement; 39, return_statement; 40, identifier:filter; 41, None; 42, identifier:query; 43, None; 44, identifier:natural_language_query; 45, None; 46, identifier:passages; 47, None; 48, identifier:aggregation; 49, None; 50, identifier:count; 51, None; 52, identifier:return_fields; 53, None; 54, identifier:offset; 55, None; 56, identifier:sort; 57, None; 58, identifier:highlight; 59, None; 60, identifier:passages_fields; 61, None; 62, identifier:passages_count; 63, None; 64, identifier:passages_characters; 65, None; 66, identifier:deduplicate; 67, None; 68, identifier:deduplicate_field; 69, None; 70, identifier:collection_ids; 71, None; 72, identifier:similar; 73, None; 74, identifier:similar_document_ids; 75, None; 76, identifier:similar_fields; 77, None; 78, identifier:bias; 79, None; 80, identifier:logging_opt_out; 81, None; 82, identifier:kwargs; 83, comment:""" Long environment queries. Complex queries might be too long for a standard method query. By using this method, you can construct longer queries. However, these queries may take longer to complete than the standard method. For details, see the [Discovery service documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). :param str environment_id: The ID of the environment. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param str natural_language_query: A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use **natural_language_query** and **query** at the same time. :param bool passages: A passages query that returns the most relevant passages from the results. :param str aggregation: An aggregation search that returns an exact answer by combining query search with filters. Useful for applications to build lists, tables, and time series. For a full list of possible aggregations, see the Query reference. :param int count: Number of results to return. :param str return_fields: A comma-separated list of the portion of the document hierarchy to return. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. This parameter cannot be used in the same query as the **bias** parameter. :param bool highlight: When true, a highlight field is returned for each result which contains the fields which match the query with `<em></em>` tags around the matching query terms. :param str passages_fields: A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. :param bool deduplicate: When `true`, and used with a Watson Discovery News collection, duplicate results (based on the contents of the **title** field) are removed. Duplicate comparison is limited to the current query only; **offset** is not considered. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not considered. This parameter is currently Beta functionality. :param str collection_ids: A comma-separated list of collection IDs to be queried against. Required when querying multiple collections, invalid when performing a single collection query. :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the **similar.document_ids** parameter. :param str similar_document_ids: A comma-separated list of document IDs to find similar documents. **Tip:** Include the **natural_language_query** parameter to expand the scope of the document similarity search with the natural language query. Other query parameters, such as **filter** and **query**, are subsequently applied and reduce the scope. :param str similar_fields: A comma-separated list of field names that are used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :param str bias: Field which the returned results will be biased against. The specified field must be either a **date** or **number** format. When a **date** type field is specified returned results are biased towards field values closer to the current date. When a **number** type field is specified, returned results are biased towards higher field values. This parameter cannot be used in the same query as the **sort** parameter. :param bool logging_opt_out: If `true`, queries are not stored in the Discovery **Logs** endpoint. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """; 84, comparison_operator:environment_id is None; 85, block; 86, assignment; 87, comparison_operator:'headers' in kwargs; 88, block; 89, assignment; 90, call; 91, assignment; 92, assignment; 93, assignment; 94, assignment; 95, identifier:response; 96, identifier:environment_id; 97, None; 98, raise_statement; 99, identifier:headers; 100, dictionary; 101, string; 102, identifier:kwargs; 103, expression_statement; 104, identifier:sdk_headers; 105, call; 106, attribute; 107, argument_list; 108, identifier:params; 109, dictionary; 110, identifier:data; 111, dictionary; 112, identifier:url; 113, call; 114, identifier:response; 115, call; 116, call; 117, pair; 118, string_content:headers; 119, call; 120, identifier:get_sdk_headers; 121, argument_list; 122, identifier:headers; 123, identifier:update; 124, identifier:sdk_headers; 125, pair; 126, pair; 127, pair; 128, pair; 129, pair; 130, pair; 131, pair; 132, pair; 133, pair; 134, pair; 135, pair; 136, pair; 137, pair; 138, pair; 139, pair; 140, pair; 141, pair; 142, pair; 143, pair; 144, pair; 145, pair; 146, attribute; 147, argument_list; 148, attribute; 149, argument_list; 150, identifier:ValueError; 151, argument_list; 152, string; 153, identifier:logging_opt_out; 154, attribute; 155, argument_list; 156, string; 157, string; 158, string; 159, string; 160, attribute; 161, string; 162, identifier:filter; 163, string; 164, identifier:query; 165, string; 166, identifier:natural_language_query; 167, string; 168, identifier:passages; 169, string; 170, identifier:aggregation; 171, string; 172, identifier:count; 173, string; 174, identifier:return_fields; 175, string; 176, identifier:offset; 177, string; 178, identifier:sort; 179, string; 180, identifier:highlight; 181, string; 182, identifier:passages_fields; 183, string; 184, identifier:passages_count; 185, string; 186, identifier:passages_characters; 187, string; 188, identifier:deduplicate; 189, string; 190, identifier:deduplicate_field; 191, string; 192, identifier:collection_ids; 193, string; 194, identifier:similar; 195, string; 196, identifier:similar_document_ids; 197, string; 198, identifier:similar_fields; 199, string; 200, identifier:bias; 201, string; 202, identifier:format; 203, list_splat; 204, identifier:self; 205, identifier:request; 206, keyword_argument; 207, keyword_argument; 208, keyword_argument; 209, keyword_argument; 210, keyword_argument; 211, keyword_argument; 212, string; 213, string_content:X-Watson-Logging-Opt-Out; 214, identifier:headers; 215, identifier:update; 216, call; 217, string_content:discovery; 218, string_content:V1; 219, string_content:federated_query; 220, string_content:version; 221, identifier:self; 222, identifier:version; 223, string_content:filter; 224, string_content:query; 225, string_content:natural_language_query; 226, string_content:passages; 227, string_content:aggregation; 228, string_content:count; 229, string_content:return; 230, string_content:offset; 231, string_content:sort; 232, string_content:highlight; 233, string_content:passages.fields; 234, string_content:passages.count; 235, string_content:passages.characters; 236, string_content:deduplicate; 237, string_content:deduplicate.field; 238, string_content:collection_ids; 239, string_content:similar; 240, string_content:similar.document_ids; 241, string_content:similar.fields; 242, string_content:bias; 243, string_content:/v1/environments/{0}/query; 244, call; 245, identifier:method; 246, string; 247, identifier:url; 248, identifier:url; 249, identifier:headers; 250, identifier:headers; 251, identifier:params; 252, identifier:params; 253, identifier:json; 254, identifier:data; 255, identifier:accept_json; 256, True; 257, string_content:environment_id must be provided; 258, attribute; 259, argument_list; 260, attribute; 261, argument_list; 262, string_content:POST; 263, identifier:kwargs; 264, identifier:get; 265, string; 266, identifier:self; 267, identifier:_encode_path_vars; 268, identifier:environment_id; 269, string_content:headers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 3, 18; 3, 19; 3, 20; 3, 21; 3, 22; 3, 23; 3, 24; 3, 25; 3, 26; 3, 27; 3, 28; 4, 29; 4, 30; 4, 31; 4, 32; 4, 33; 4, 34; 4, 35; 4, 36; 4, 37; 4, 38; 4, 39; 7, 40; 7, 41; 8, 42; 8, 43; 9, 44; 9, 45; 10, 46; 10, 47; 11, 48; 11, 49; 12, 50; 12, 51; 13, 52; 13, 53; 14, 54; 14, 55; 15, 56; 15, 57; 16, 58; 16, 59; 17, 60; 17, 61; 18, 62; 18, 63; 19, 64; 19, 65; 20, 66; 20, 67; 21, 68; 21, 69; 22, 70; 22, 71; 23, 72; 23, 73; 24, 74; 24, 75; 25, 76; 25, 77; 26, 78; 26, 79; 27, 80; 27, 81; 28, 82; 29, 83; 30, 84; 30, 85; 31, 86; 32, 87; 32, 88; 33, 89; 34, 90; 35, 91; 36, 92; 37, 93; 38, 94; 39, 95; 84, 96; 84, 97; 85, 98; 86, 99; 86, 100; 87, 101; 87, 102; 88, 103; 89, 104; 89, 105; 90, 106; 90, 107; 91, 108; 91, 109; 92, 110; 92, 111; 93, 112; 93, 113; 94, 114; 94, 115; 98, 116; 100, 117; 101, 118; 103, 119; 105, 120; 105, 121; 106, 122; 106, 123; 107, 124; 109, 125; 111, 126; 111, 127; 111, 128; 111, 129; 111, 130; 111, 131; 111, 132; 111, 133; 111, 134; 111, 135; 111, 136; 111, 137; 111, 138; 111, 139; 111, 140; 111, 141; 111, 142; 111, 143; 111, 144; 111, 145; 113, 146; 113, 147; 115, 148; 115, 149; 116, 150; 116, 151; 117, 152; 117, 153; 119, 154; 119, 155; 121, 156; 121, 157; 121, 158; 125, 159; 125, 160; 126, 161; 126, 162; 127, 163; 127, 164; 128, 165; 128, 166; 129, 167; 129, 168; 130, 169; 130, 170; 131, 171; 131, 172; 132, 173; 132, 174; 133, 175; 133, 176; 134, 177; 134, 178; 135, 179; 135, 180; 136, 181; 136, 182; 137, 183; 137, 184; 138, 185; 138, 186; 139, 187; 139, 188; 140, 189; 140, 190; 141, 191; 141, 192; 142, 193; 142, 194; 143, 195; 143, 196; 144, 197; 144, 198; 145, 199; 145, 200; 146, 201; 146, 202; 147, 203; 148, 204; 148, 205; 149, 206; 149, 207; 149, 208; 149, 209; 149, 210; 149, 211; 151, 212; 152, 213; 154, 214; 154, 215; 155, 216; 156, 217; 157, 218; 158, 219; 159, 220; 160, 221; 160, 222; 161, 223; 163, 224; 165, 225; 167, 226; 169, 227; 171, 228; 173, 229; 175, 230; 177, 231; 179, 232; 181, 233; 183, 234; 185, 235; 187, 236; 189, 237; 191, 238; 193, 239; 195, 240; 197, 241; 199, 242; 201, 243; 203, 244; 206, 245; 206, 246; 207, 247; 207, 248; 208, 249; 208, 250; 209, 251; 209, 252; 210, 253; 210, 254; 211, 255; 211, 256; 212, 257; 216, 258; 216, 259; 244, 260; 244, 261; 246, 262; 258, 263; 258, 264; 259, 265; 260, 266; 260, 267; 261, 268; 265, 269
def federated_query(self, environment_id, filter=None, query=None, natural_language_query=None, passages=None, aggregation=None, count=None, return_fields=None, offset=None, sort=None, highlight=None, passages_fields=None, passages_count=None, passages_characters=None, deduplicate=None, deduplicate_field=None, collection_ids=None, similar=None, similar_document_ids=None, similar_fields=None, bias=None, logging_opt_out=None, **kwargs): """ Long environment queries. Complex queries might be too long for a standard method query. By using this method, you can construct longer queries. However, these queries may take longer to complete than the standard method. For details, see the [Discovery service documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-query-concepts#query-concepts). :param str environment_id: The ID of the environment. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param str natural_language_query: A natural language query that returns relevant documents by utilizing training data and natural language understanding. You cannot use **natural_language_query** and **query** at the same time. :param bool passages: A passages query that returns the most relevant passages from the results. :param str aggregation: An aggregation search that returns an exact answer by combining query search with filters. Useful for applications to build lists, tables, and time series. For a full list of possible aggregations, see the Query reference. :param int count: Number of results to return. :param str return_fields: A comma-separated list of the portion of the document hierarchy to return. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. :param str sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. This parameter cannot be used in the same query as the **bias** parameter. :param bool highlight: When true, a highlight field is returned for each result which contains the fields which match the query with `<em></em>` tags around the matching query terms. :param str passages_fields: A comma-separated list of fields that passages are drawn from. If this parameter not specified, then all top-level fields are included. :param int passages_count: The maximum number of passages to return. The search returns fewer passages if the requested total is not found. The default is `10`. The maximum is `100`. :param int passages_characters: The approximate number of characters that any one passage will have. :param bool deduplicate: When `true`, and used with a Watson Discovery News collection, duplicate results (based on the contents of the **title** field) are removed. Duplicate comparison is limited to the current query only; **offset** is not considered. This parameter is currently Beta functionality. :param str deduplicate_field: When specified, duplicate results based on the field specified are removed from the returned results. Duplicate comparison is limited to the current query only, **offset** is not considered. This parameter is currently Beta functionality. :param str collection_ids: A comma-separated list of collection IDs to be queried against. Required when querying multiple collections, invalid when performing a single collection query. :param bool similar: When `true`, results are returned based on their similarity to the document IDs specified in the **similar.document_ids** parameter. :param str similar_document_ids: A comma-separated list of document IDs to find similar documents. **Tip:** Include the **natural_language_query** parameter to expand the scope of the document similarity search with the natural language query. Other query parameters, such as **filter** and **query**, are subsequently applied and reduce the scope. :param str similar_fields: A comma-separated list of field names that are used as a basis for comparison to identify similar documents. If not specified, the entire document is used for comparison. :param str bias: Field which the returned results will be biased against. The specified field must be either a **date** or **number** format. When a **date** type field is specified returned results are biased towards field values closer to the current date. When a **number** type field is specified, returned results are biased towards higher field values. This parameter cannot be used in the same query as the **sort** parameter. :param bool logging_opt_out: If `true`, queries are not stored in the Discovery **Logs** endpoint. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if environment_id is None: raise ValueError('environment_id must be provided') headers = {'X-Watson-Logging-Opt-Out': logging_opt_out} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'federated_query') headers.update(sdk_headers) params = {'version': self.version} data = { 'filter': filter, 'query': query, 'natural_language_query': natural_language_query, 'passages': passages, 'aggregation': aggregation, 'count': count, 'return': return_fields, 'offset': offset, 'sort': sort, 'highlight': highlight, 'passages.fields': passages_fields, 'passages.count': passages_count, 'passages.characters': passages_characters, 'deduplicate': deduplicate, 'deduplicate.field': deduplicate_field, 'collection_ids': collection_ids, 'similar': similar, 'similar.document_ids': similar_document_ids, 'similar.fields': similar_fields, 'bias': bias } url = '/v1/environments/{0}/query'.format( *self._encode_path_vars(environment_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
0, module; 1, function_definition; 2, function_name:query_relations; 3, parameters; 4, block; 5, identifier:self; 6, identifier:environment_id; 7, identifier:collection_id; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, dictionary_splat_pattern; 15, expression_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, expression_statement; 22, if_statement; 23, expression_statement; 24, expression_statement; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, return_statement; 30, identifier:entities; 31, None; 32, identifier:context; 33, None; 34, identifier:sort; 35, None; 36, identifier:filter; 37, None; 38, identifier:count; 39, None; 40, identifier:evidence_count; 41, None; 42, identifier:kwargs; 43, comment:""" Knowledge Graph relationship query. See the [Knowledge Graph documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg) for more details. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param list[QueryRelationsEntity] entities: An array of entities to find relationships for. :param QueryEntitiesContext context: Entity text to provide context for the queried entity and rank based on that association. For example, if you wanted to query the city of London in England your query would look for `London` with the context of `England`. :param str sort: The sorting method for the relationships, can be `score` or `frequency`. `frequency` is the number of unique times each entity is identified. The default is `score`. This parameter cannot be used in the same query as the **bias** parameter. :param QueryRelationsFilter filter: :param int count: The number of results to return. The default is `10`. The maximum is `1000`. :param int evidence_count: The number of evidence items to return for each result. The default is `0`. The maximum number of evidence items per query is 10,000. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """; 44, comparison_operator:environment_id is None; 45, block; 46, comparison_operator:collection_id is None; 47, block; 48, comparison_operator:entities is not None; 49, block; 50, comparison_operator:context is not None; 51, block; 52, comparison_operator:filter is not None; 53, block; 54, assignment; 55, comparison_operator:'headers' in kwargs; 56, block; 57, assignment; 58, call; 59, assignment; 60, assignment; 61, assignment; 62, assignment; 63, identifier:response; 64, identifier:environment_id; 65, None; 66, raise_statement; 67, identifier:collection_id; 68, None; 69, raise_statement; 70, identifier:entities; 71, None; 72, expression_statement; 73, identifier:context; 74, None; 75, expression_statement; 76, identifier:filter; 77, None; 78, expression_statement; 79, identifier:headers; 80, dictionary; 81, string; 82, identifier:kwargs; 83, expression_statement; 84, identifier:sdk_headers; 85, call; 86, attribute; 87, argument_list; 88, identifier:params; 89, dictionary; 90, identifier:data; 91, dictionary; 92, identifier:url; 93, call; 94, identifier:response; 95, call; 96, call; 97, call; 98, assignment; 99, assignment; 100, assignment; 101, string_content:headers; 102, call; 103, identifier:get_sdk_headers; 104, argument_list; 105, identifier:headers; 106, identifier:update; 107, identifier:sdk_headers; 108, pair; 109, pair; 110, pair; 111, pair; 112, pair; 113, pair; 114, pair; 115, attribute; 116, argument_list; 117, attribute; 118, argument_list; 119, identifier:ValueError; 120, argument_list; 121, identifier:ValueError; 122, argument_list; 123, identifier:entities; 124, list_comprehension; 125, identifier:context; 126, call; 127, identifier:filter; 128, call; 129, attribute; 130, argument_list; 131, string; 132, string; 133, string; 134, string; 135, attribute; 136, string; 137, identifier:entities; 138, string; 139, identifier:context; 140, string; 141, identifier:sort; 142, string; 143, identifier:filter; 144, string; 145, identifier:count; 146, string; 147, identifier:evidence_count; 148, string; 149, identifier:format; 150, list_splat; 151, identifier:self; 152, identifier:request; 153, keyword_argument; 154, keyword_argument; 155, keyword_argument; 156, keyword_argument; 157, keyword_argument; 158, keyword_argument; 159, string; 160, string; 161, call; 162, for_in_clause; 163, attribute; 164, argument_list; 165, attribute; 166, argument_list; 167, identifier:headers; 168, identifier:update; 169, call; 170, string_content:discovery; 171, string_content:V1; 172, string_content:query_relations; 173, string_content:version; 174, identifier:self; 175, identifier:version; 176, string_content:entities; 177, string_content:context; 178, string_content:sort; 179, string_content:filter; 180, string_content:count; 181, string_content:evidence_count; 182, string_content:/v1/environments/{0}/collections/{1}/query_relations; 183, call; 184, identifier:method; 185, string; 186, identifier:url; 187, identifier:url; 188, identifier:headers; 189, identifier:headers; 190, identifier:params; 191, identifier:params; 192, identifier:json; 193, identifier:data; 194, identifier:accept_json; 195, True; 196, string_content:environment_id must be provided; 197, string_content:collection_id must be provided; 198, attribute; 199, argument_list; 200, identifier:x; 201, identifier:entities; 202, identifier:self; 203, identifier:_convert_model; 204, identifier:context; 205, identifier:QueryEntitiesContext; 206, identifier:self; 207, identifier:_convert_model; 208, identifier:filter; 209, identifier:QueryRelationsFilter; 210, attribute; 211, argument_list; 212, attribute; 213, argument_list; 214, string_content:POST; 215, identifier:self; 216, identifier:_convert_model; 217, identifier:x; 218, identifier:QueryRelationsEntity; 219, identifier:kwargs; 220, identifier:get; 221, string; 222, identifier:self; 223, identifier:_encode_path_vars; 224, identifier:environment_id; 225, identifier:collection_id; 226, string_content:headers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 8, 30; 8, 31; 9, 32; 9, 33; 10, 34; 10, 35; 11, 36; 11, 37; 12, 38; 12, 39; 13, 40; 13, 41; 14, 42; 15, 43; 16, 44; 16, 45; 17, 46; 17, 47; 18, 48; 18, 49; 19, 50; 19, 51; 20, 52; 20, 53; 21, 54; 22, 55; 22, 56; 23, 57; 24, 58; 25, 59; 26, 60; 27, 61; 28, 62; 29, 63; 44, 64; 44, 65; 45, 66; 46, 67; 46, 68; 47, 69; 48, 70; 48, 71; 49, 72; 50, 73; 50, 74; 51, 75; 52, 76; 52, 77; 53, 78; 54, 79; 54, 80; 55, 81; 55, 82; 56, 83; 57, 84; 57, 85; 58, 86; 58, 87; 59, 88; 59, 89; 60, 90; 60, 91; 61, 92; 61, 93; 62, 94; 62, 95; 66, 96; 69, 97; 72, 98; 75, 99; 78, 100; 81, 101; 83, 102; 85, 103; 85, 104; 86, 105; 86, 106; 87, 107; 89, 108; 91, 109; 91, 110; 91, 111; 91, 112; 91, 113; 91, 114; 93, 115; 93, 116; 95, 117; 95, 118; 96, 119; 96, 120; 97, 121; 97, 122; 98, 123; 98, 124; 99, 125; 99, 126; 100, 127; 100, 128; 102, 129; 102, 130; 104, 131; 104, 132; 104, 133; 108, 134; 108, 135; 109, 136; 109, 137; 110, 138; 110, 139; 111, 140; 111, 141; 112, 142; 112, 143; 113, 144; 113, 145; 114, 146; 114, 147; 115, 148; 115, 149; 116, 150; 117, 151; 117, 152; 118, 153; 118, 154; 118, 155; 118, 156; 118, 157; 118, 158; 120, 159; 122, 160; 124, 161; 124, 162; 126, 163; 126, 164; 128, 165; 128, 166; 129, 167; 129, 168; 130, 169; 131, 170; 132, 171; 133, 172; 134, 173; 135, 174; 135, 175; 136, 176; 138, 177; 140, 178; 142, 179; 144, 180; 146, 181; 148, 182; 150, 183; 153, 184; 153, 185; 154, 186; 154, 187; 155, 188; 155, 189; 156, 190; 156, 191; 157, 192; 157, 193; 158, 194; 158, 195; 159, 196; 160, 197; 161, 198; 161, 199; 162, 200; 162, 201; 163, 202; 163, 203; 164, 204; 164, 205; 165, 206; 165, 207; 166, 208; 166, 209; 169, 210; 169, 211; 183, 212; 183, 213; 185, 214; 198, 215; 198, 216; 199, 217; 199, 218; 210, 219; 210, 220; 211, 221; 212, 222; 212, 223; 213, 224; 213, 225; 221, 226
def query_relations(self, environment_id, collection_id, entities=None, context=None, sort=None, filter=None, count=None, evidence_count=None, **kwargs): """ Knowledge Graph relationship query. See the [Knowledge Graph documentation](https://cloud.ibm.com/docs/services/discovery?topic=discovery-kg#kg) for more details. :param str environment_id: The ID of the environment. :param str collection_id: The ID of the collection. :param list[QueryRelationsEntity] entities: An array of entities to find relationships for. :param QueryEntitiesContext context: Entity text to provide context for the queried entity and rank based on that association. For example, if you wanted to query the city of London in England your query would look for `London` with the context of `England`. :param str sort: The sorting method for the relationships, can be `score` or `frequency`. `frequency` is the number of unique times each entity is identified. The default is `score`. This parameter cannot be used in the same query as the **bias** parameter. :param QueryRelationsFilter filter: :param int count: The number of results to return. The default is `10`. The maximum is `1000`. :param int evidence_count: The number of evidence items to return for each result. The default is `0`. The maximum number of evidence items per query is 10,000. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ if environment_id is None: raise ValueError('environment_id must be provided') if collection_id is None: raise ValueError('collection_id must be provided') if entities is not None: entities = [ self._convert_model(x, QueryRelationsEntity) for x in entities ] if context is not None: context = self._convert_model(context, QueryEntitiesContext) if filter is not None: filter = self._convert_model(filter, QueryRelationsFilter) headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'query_relations') headers.update(sdk_headers) params = {'version': self.version} data = { 'entities': entities, 'context': context, 'sort': sort, 'filter': filter, 'count': count, 'evidence_count': evidence_count } url = '/v1/environments/{0}/collections/{1}/query_relations'.format( *self._encode_path_vars(environment_id, collection_id)) response = self.request( method='POST', url=url, headers=headers, params=params, json=data, accept_json=True) return response
0, module; 1, function_definition; 2, function_name:query_log; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, dictionary_splat_pattern; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, expression_statement; 20, return_statement; 21, identifier:filter; 22, None; 23, identifier:query; 24, None; 25, identifier:count; 26, None; 27, identifier:offset; 28, None; 29, identifier:sort; 30, None; 31, identifier:kwargs; 32, comment:""" Search the query and event log. Searches the query and event log to find query sessions that match the specified criteria. Searching the **logs** endpoint uses the standard Discovery query syntax for the parameters that are supported. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param int count: Number of results to return. The maximum for the **count** and **offset** values together in any one query is **10000**. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. The maximum for the **count** and **offset** values together in any one query is **10000**. :param list[str] sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """; 33, assignment; 34, comparison_operator:'headers' in kwargs; 35, block; 36, assignment; 37, call; 38, assignment; 39, assignment; 40, assignment; 41, identifier:response; 42, identifier:headers; 43, dictionary; 44, string; 45, identifier:kwargs; 46, expression_statement; 47, identifier:sdk_headers; 48, call; 49, attribute; 50, argument_list; 51, identifier:params; 52, dictionary; 53, identifier:url; 54, string; 55, identifier:response; 56, call; 57, string_content:headers; 58, call; 59, identifier:get_sdk_headers; 60, argument_list; 61, identifier:headers; 62, identifier:update; 63, identifier:sdk_headers; 64, pair; 65, pair; 66, pair; 67, pair; 68, pair; 69, pair; 70, string_content:/v1/logs; 71, attribute; 72, argument_list; 73, attribute; 74, argument_list; 75, string; 76, string; 77, string; 78, string; 79, attribute; 80, string; 81, identifier:filter; 82, string; 83, identifier:query; 84, string; 85, identifier:count; 86, string; 87, identifier:offset; 88, string; 89, call; 90, identifier:self; 91, identifier:request; 92, keyword_argument; 93, keyword_argument; 94, keyword_argument; 95, keyword_argument; 96, keyword_argument; 97, identifier:headers; 98, identifier:update; 99, call; 100, string_content:discovery; 101, string_content:V1; 102, string_content:query_log; 103, string_content:version; 104, identifier:self; 105, identifier:version; 106, string_content:filter; 107, string_content:query; 108, string_content:count; 109, string_content:offset; 110, string_content:sort; 111, attribute; 112, argument_list; 113, identifier:method; 114, string; 115, identifier:url; 116, identifier:url; 117, identifier:headers; 118, identifier:headers; 119, identifier:params; 120, identifier:params; 121, identifier:accept_json; 122, True; 123, attribute; 124, argument_list; 125, identifier:self; 126, identifier:_convert_list; 127, identifier:sort; 128, string_content:GET; 129, identifier:kwargs; 130, identifier:get; 131, string; 132, string_content:headers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 6, 21; 6, 22; 7, 23; 7, 24; 8, 25; 8, 26; 9, 27; 9, 28; 10, 29; 10, 30; 11, 31; 12, 32; 13, 33; 14, 34; 14, 35; 15, 36; 16, 37; 17, 38; 18, 39; 19, 40; 20, 41; 33, 42; 33, 43; 34, 44; 34, 45; 35, 46; 36, 47; 36, 48; 37, 49; 37, 50; 38, 51; 38, 52; 39, 53; 39, 54; 40, 55; 40, 56; 44, 57; 46, 58; 48, 59; 48, 60; 49, 61; 49, 62; 50, 63; 52, 64; 52, 65; 52, 66; 52, 67; 52, 68; 52, 69; 54, 70; 56, 71; 56, 72; 58, 73; 58, 74; 60, 75; 60, 76; 60, 77; 64, 78; 64, 79; 65, 80; 65, 81; 66, 82; 66, 83; 67, 84; 67, 85; 68, 86; 68, 87; 69, 88; 69, 89; 71, 90; 71, 91; 72, 92; 72, 93; 72, 94; 72, 95; 72, 96; 73, 97; 73, 98; 74, 99; 75, 100; 76, 101; 77, 102; 78, 103; 79, 104; 79, 105; 80, 106; 82, 107; 84, 108; 86, 109; 88, 110; 89, 111; 89, 112; 92, 113; 92, 114; 93, 115; 93, 116; 94, 117; 94, 118; 95, 119; 95, 120; 96, 121; 96, 122; 99, 123; 99, 124; 111, 125; 111, 126; 112, 127; 114, 128; 123, 129; 123, 130; 124, 131; 131, 132
def query_log(self, filter=None, query=None, count=None, offset=None, sort=None, **kwargs): """ Search the query and event log. Searches the query and event log to find query sessions that match the specified criteria. Searching the **logs** endpoint uses the standard Discovery query syntax for the parameters that are supported. :param str filter: A cacheable query that excludes documents that don't mention the query content. Filter searches are better for metadata-type searches and for assessing the concepts in the data set. :param str query: A query search returns all documents in your data set with full enrichments and full text, but with the most relevant documents listed first. Use a query search when you want to find the most relevant search results. You cannot use **natural_language_query** and **query** at the same time. :param int count: Number of results to return. The maximum for the **count** and **offset** values together in any one query is **10000**. :param int offset: The number of query results to skip at the beginning. For example, if the total number of results that are returned is 10 and the offset is 8, it returns the last two results. The maximum for the **count** and **offset** values together in any one query is **10000**. :param list[str] sort: A comma-separated list of fields in the document to sort on. You can optionally specify a sort direction by prefixing the field with `-` for descending or `+` for ascending. Ascending is the default sort direction if no prefix is specified. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('discovery', 'V1', 'query_log') headers.update(sdk_headers) params = { 'version': self.version, 'filter': filter, 'query': query, 'count': count, 'offset': offset, 'sort': self._convert_list(sort) } url = '/v1/logs' response = self.request( method='GET', url=url, headers=headers, params=params, accept_json=True) return response
0, module; 1, function_definition; 2, function_name:list_workspaces; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, dictionary_splat_pattern; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, expression_statement; 20, return_statement; 21, identifier:page_limit; 22, None; 23, identifier:include_count; 24, None; 25, identifier:sort; 26, None; 27, identifier:cursor; 28, None; 29, identifier:include_audit; 30, None; 31, identifier:kwargs; 32, comment:""" List workspaces. List the workspaces associated with a Watson Assistant service instance. This operation is limited to 500 requests per 30 minutes. For more information, see **Rate limiting**. :param int page_limit: The number of records to return in each page of results. :param bool include_count: Whether to include information about the number of records returned. :param str sort: The attribute by which returned workspaces will be sorted. To reverse the sort order, prefix the value with a minus sign (`-`). :param str cursor: A token identifying the page of results to retrieve. :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """; 33, assignment; 34, comparison_operator:'headers' in kwargs; 35, block; 36, assignment; 37, call; 38, assignment; 39, assignment; 40, assignment; 41, identifier:response; 42, identifier:headers; 43, dictionary; 44, string; 45, identifier:kwargs; 46, expression_statement; 47, identifier:sdk_headers; 48, call; 49, attribute; 50, argument_list; 51, identifier:params; 52, dictionary; 53, identifier:url; 54, string; 55, identifier:response; 56, call; 57, string_content:headers; 58, call; 59, identifier:get_sdk_headers; 60, argument_list; 61, identifier:headers; 62, identifier:update; 63, identifier:sdk_headers; 64, pair; 65, pair; 66, pair; 67, pair; 68, pair; 69, pair; 70, string_content:/v1/workspaces; 71, attribute; 72, argument_list; 73, attribute; 74, argument_list; 75, string; 76, string; 77, string; 78, string; 79, attribute; 80, string; 81, identifier:page_limit; 82, string; 83, identifier:include_count; 84, string; 85, identifier:sort; 86, string; 87, identifier:cursor; 88, string; 89, identifier:include_audit; 90, identifier:self; 91, identifier:request; 92, keyword_argument; 93, keyword_argument; 94, keyword_argument; 95, keyword_argument; 96, keyword_argument; 97, identifier:headers; 98, identifier:update; 99, call; 100, string_content:conversation; 101, string_content:V1; 102, string_content:list_workspaces; 103, string_content:version; 104, identifier:self; 105, identifier:version; 106, string_content:page_limit; 107, string_content:include_count; 108, string_content:sort; 109, string_content:cursor; 110, string_content:include_audit; 111, identifier:method; 112, string; 113, identifier:url; 114, identifier:url; 115, identifier:headers; 116, identifier:headers; 117, identifier:params; 118, identifier:params; 119, identifier:accept_json; 120, True; 121, attribute; 122, argument_list; 123, string_content:GET; 124, identifier:kwargs; 125, identifier:get; 126, string; 127, string_content:headers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 6, 21; 6, 22; 7, 23; 7, 24; 8, 25; 8, 26; 9, 27; 9, 28; 10, 29; 10, 30; 11, 31; 12, 32; 13, 33; 14, 34; 14, 35; 15, 36; 16, 37; 17, 38; 18, 39; 19, 40; 20, 41; 33, 42; 33, 43; 34, 44; 34, 45; 35, 46; 36, 47; 36, 48; 37, 49; 37, 50; 38, 51; 38, 52; 39, 53; 39, 54; 40, 55; 40, 56; 44, 57; 46, 58; 48, 59; 48, 60; 49, 61; 49, 62; 50, 63; 52, 64; 52, 65; 52, 66; 52, 67; 52, 68; 52, 69; 54, 70; 56, 71; 56, 72; 58, 73; 58, 74; 60, 75; 60, 76; 60, 77; 64, 78; 64, 79; 65, 80; 65, 81; 66, 82; 66, 83; 67, 84; 67, 85; 68, 86; 68, 87; 69, 88; 69, 89; 71, 90; 71, 91; 72, 92; 72, 93; 72, 94; 72, 95; 72, 96; 73, 97; 73, 98; 74, 99; 75, 100; 76, 101; 77, 102; 78, 103; 79, 104; 79, 105; 80, 106; 82, 107; 84, 108; 86, 109; 88, 110; 92, 111; 92, 112; 93, 113; 93, 114; 94, 115; 94, 116; 95, 117; 95, 118; 96, 119; 96, 120; 99, 121; 99, 122; 112, 123; 121, 124; 121, 125; 122, 126; 126, 127
def list_workspaces(self, page_limit=None, include_count=None, sort=None, cursor=None, include_audit=None, **kwargs): """ List workspaces. List the workspaces associated with a Watson Assistant service instance. This operation is limited to 500 requests per 30 minutes. For more information, see **Rate limiting**. :param int page_limit: The number of records to return in each page of results. :param bool include_count: Whether to include information about the number of records returned. :param str sort: The attribute by which returned workspaces will be sorted. To reverse the sort order, prefix the value with a minus sign (`-`). :param str cursor: A token identifying the page of results to retrieve. :param bool include_audit: Whether to include the audit properties (`created` and `updated` timestamps) in the response. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('conversation', 'V1', 'list_workspaces') headers.update(sdk_headers) params = { 'version': self.version, 'page_limit': page_limit, 'include_count': include_count, 'sort': sort, 'cursor': cursor, 'include_audit': include_audit } url = '/v1/workspaces' response = self.request( method='GET', url=url, headers=headers, params=params, accept_json=True) return response
0, module; 1, function_definition; 2, function_name:list_feedback; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, default_parameter; 19, default_parameter; 20, default_parameter; 21, default_parameter; 22, dictionary_splat_pattern; 23, expression_statement; 24, expression_statement; 25, if_statement; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, expression_statement; 30, expression_statement; 31, return_statement; 32, identifier:feedback_type; 33, None; 34, identifier:before; 35, None; 36, identifier:after; 37, None; 38, identifier:document_title; 39, None; 40, identifier:model_id; 41, None; 42, identifier:model_version; 43, None; 44, identifier:category_removed; 45, None; 46, identifier:category_added; 47, None; 48, identifier:category_not_changed; 49, None; 50, identifier:type_removed; 51, None; 52, identifier:type_added; 53, None; 54, identifier:type_not_changed; 55, None; 56, identifier:page_limit; 57, None; 58, identifier:cursor; 59, None; 60, identifier:sort; 61, None; 62, identifier:include_total; 63, None; 64, identifier:kwargs; 65, comment:""" List the feedback in a document. Lists the feedback in a document. :param str feedback_type: An optional string that filters the output to include only feedback with the specified feedback type. The only permitted value is `element_classification`. :param date before: An optional string in the format `YYYY-MM-DD` that filters the output to include only feedback that was added before the specified date. :param date after: An optional string in the format `YYYY-MM-DD` that filters the output to include only feedback that was added after the specified date. :param str document_title: An optional string that filters the output to include only feedback from the document with the specified `document_title`. :param str model_id: An optional string that filters the output to include only feedback with the specified `model_id`. The only permitted value is `contracts`. :param str model_version: An optional string that filters the output to include only feedback with the specified `model_version`. :param str category_removed: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list removed. :param str category_added: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list added. :param str category_not_changed: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list unchanged. :param str type_removed: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list removed. :param str type_added: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list removed. :param str type_not_changed: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list unchanged. :param int page_limit: An optional integer specifying the number of documents that you want the service to return. :param str cursor: An optional string that returns the set of documents after the previous set. Use this parameter with the `page_limit` parameter. :param str sort: An optional comma-separated list of fields in the document to sort on. You can optionally specify the sort direction by prefixing the value of the field with `-` for descending order or `+` for ascending order (the default). Currently permitted sorting fields are `created`, `user_id`, and `document_title`. :param bool include_total: An optional boolean value. If specified as `true`, the `pagination` object in the output includes a value called `total` that gives the total count of feedback created. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """; 66, assignment; 67, comparison_operator:'headers' in kwargs; 68, block; 69, assignment; 70, call; 71, assignment; 72, assignment; 73, assignment; 74, identifier:response; 75, identifier:headers; 76, dictionary; 77, string; 78, identifier:kwargs; 79, expression_statement; 80, identifier:sdk_headers; 81, call; 82, attribute; 83, argument_list; 84, identifier:params; 85, dictionary; 86, identifier:url; 87, string; 88, identifier:response; 89, call; 90, string_content:headers; 91, call; 92, identifier:get_sdk_headers; 93, argument_list; 94, identifier:headers; 95, identifier:update; 96, identifier:sdk_headers; 97, pair; 98, pair; 99, pair; 100, pair; 101, pair; 102, pair; 103, pair; 104, pair; 105, pair; 106, pair; 107, pair; 108, pair; 109, pair; 110, pair; 111, pair; 112, pair; 113, pair; 114, string_content:/v1/feedback; 115, attribute; 116, argument_list; 117, attribute; 118, argument_list; 119, string; 120, string; 121, string; 122, string; 123, attribute; 124, string; 125, identifier:feedback_type; 126, string; 127, identifier:before; 128, string; 129, identifier:after; 130, string; 131, identifier:document_title; 132, string; 133, identifier:model_id; 134, string; 135, identifier:model_version; 136, string; 137, identifier:category_removed; 138, string; 139, identifier:category_added; 140, string; 141, identifier:category_not_changed; 142, string; 143, identifier:type_removed; 144, string; 145, identifier:type_added; 146, string; 147, identifier:type_not_changed; 148, string; 149, identifier:page_limit; 150, string; 151, identifier:cursor; 152, string; 153, identifier:sort; 154, string; 155, identifier:include_total; 156, identifier:self; 157, identifier:request; 158, keyword_argument; 159, keyword_argument; 160, keyword_argument; 161, keyword_argument; 162, keyword_argument; 163, identifier:headers; 164, identifier:update; 165, call; 166, string_content:compare-comply; 167, string_content:V1; 168, string_content:list_feedback; 169, string_content:version; 170, identifier:self; 171, identifier:version; 172, string_content:feedback_type; 173, string_content:before; 174, string_content:after; 175, string_content:document_title; 176, string_content:model_id; 177, string_content:model_version; 178, string_content:category_removed; 179, string_content:category_added; 180, string_content:category_not_changed; 181, string_content:type_removed; 182, string_content:type_added; 183, string_content:type_not_changed; 184, string_content:page_limit; 185, string_content:cursor; 186, string_content:sort; 187, string_content:include_total; 188, identifier:method; 189, string; 190, identifier:url; 191, identifier:url; 192, identifier:headers; 193, identifier:headers; 194, identifier:params; 195, identifier:params; 196, identifier:accept_json; 197, True; 198, attribute; 199, argument_list; 200, string_content:GET; 201, identifier:kwargs; 202, identifier:get; 203, string; 204, string_content:headers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 3, 18; 3, 19; 3, 20; 3, 21; 3, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 6, 32; 6, 33; 7, 34; 7, 35; 8, 36; 8, 37; 9, 38; 9, 39; 10, 40; 10, 41; 11, 42; 11, 43; 12, 44; 12, 45; 13, 46; 13, 47; 14, 48; 14, 49; 15, 50; 15, 51; 16, 52; 16, 53; 17, 54; 17, 55; 18, 56; 18, 57; 19, 58; 19, 59; 20, 60; 20, 61; 21, 62; 21, 63; 22, 64; 23, 65; 24, 66; 25, 67; 25, 68; 26, 69; 27, 70; 28, 71; 29, 72; 30, 73; 31, 74; 66, 75; 66, 76; 67, 77; 67, 78; 68, 79; 69, 80; 69, 81; 70, 82; 70, 83; 71, 84; 71, 85; 72, 86; 72, 87; 73, 88; 73, 89; 77, 90; 79, 91; 81, 92; 81, 93; 82, 94; 82, 95; 83, 96; 85, 97; 85, 98; 85, 99; 85, 100; 85, 101; 85, 102; 85, 103; 85, 104; 85, 105; 85, 106; 85, 107; 85, 108; 85, 109; 85, 110; 85, 111; 85, 112; 85, 113; 87, 114; 89, 115; 89, 116; 91, 117; 91, 118; 93, 119; 93, 120; 93, 121; 97, 122; 97, 123; 98, 124; 98, 125; 99, 126; 99, 127; 100, 128; 100, 129; 101, 130; 101, 131; 102, 132; 102, 133; 103, 134; 103, 135; 104, 136; 104, 137; 105, 138; 105, 139; 106, 140; 106, 141; 107, 142; 107, 143; 108, 144; 108, 145; 109, 146; 109, 147; 110, 148; 110, 149; 111, 150; 111, 151; 112, 152; 112, 153; 113, 154; 113, 155; 115, 156; 115, 157; 116, 158; 116, 159; 116, 160; 116, 161; 116, 162; 117, 163; 117, 164; 118, 165; 119, 166; 120, 167; 121, 168; 122, 169; 123, 170; 123, 171; 124, 172; 126, 173; 128, 174; 130, 175; 132, 176; 134, 177; 136, 178; 138, 179; 140, 180; 142, 181; 144, 182; 146, 183; 148, 184; 150, 185; 152, 186; 154, 187; 158, 188; 158, 189; 159, 190; 159, 191; 160, 192; 160, 193; 161, 194; 161, 195; 162, 196; 162, 197; 165, 198; 165, 199; 189, 200; 198, 201; 198, 202; 199, 203; 203, 204
def list_feedback(self, feedback_type=None, before=None, after=None, document_title=None, model_id=None, model_version=None, category_removed=None, category_added=None, category_not_changed=None, type_removed=None, type_added=None, type_not_changed=None, page_limit=None, cursor=None, sort=None, include_total=None, **kwargs): """ List the feedback in a document. Lists the feedback in a document. :param str feedback_type: An optional string that filters the output to include only feedback with the specified feedback type. The only permitted value is `element_classification`. :param date before: An optional string in the format `YYYY-MM-DD` that filters the output to include only feedback that was added before the specified date. :param date after: An optional string in the format `YYYY-MM-DD` that filters the output to include only feedback that was added after the specified date. :param str document_title: An optional string that filters the output to include only feedback from the document with the specified `document_title`. :param str model_id: An optional string that filters the output to include only feedback with the specified `model_id`. The only permitted value is `contracts`. :param str model_version: An optional string that filters the output to include only feedback with the specified `model_version`. :param str category_removed: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list removed. :param str category_added: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list added. :param str category_not_changed: An optional string in the form of a comma-separated list of categories. If this is specified, the service filters the output to include only feedback that has at least one category from the list unchanged. :param str type_removed: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list removed. :param str type_added: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list removed. :param str type_not_changed: An optional string of comma-separated `nature`:`party` pairs. If this is specified, the service filters the output to include only feedback that has at least one `nature`:`party` pair from the list unchanged. :param int page_limit: An optional integer specifying the number of documents that you want the service to return. :param str cursor: An optional string that returns the set of documents after the previous set. Use this parameter with the `page_limit` parameter. :param str sort: An optional comma-separated list of fields in the document to sort on. You can optionally specify the sort direction by prefixing the value of the field with `-` for descending order or `+` for ascending order (the default). Currently permitted sorting fields are `created`, `user_id`, and `document_title`. :param bool include_total: An optional boolean value. If specified as `true`, the `pagination` object in the output includes a value called `total` that gives the total count of feedback created. :param dict headers: A `dict` containing the request headers :return: A `DetailedResponse` containing the result, headers and HTTP status code. :rtype: DetailedResponse """ headers = {} if 'headers' in kwargs: headers.update(kwargs.get('headers')) sdk_headers = get_sdk_headers('compare-comply', 'V1', 'list_feedback') headers.update(sdk_headers) params = { 'version': self.version, 'feedback_type': feedback_type, 'before': before, 'after': after, 'document_title': document_title, 'model_id': model_id, 'model_version': model_version, 'category_removed': category_removed, 'category_added': category_added, 'category_not_changed': category_not_changed, 'type_removed': type_removed, 'type_added': type_added, 'type_not_changed': type_not_changed, 'page_limit': page_limit, 'cursor': cursor, 'sort': sort, 'include_total': include_total } url = '/v1/feedback' response = self.request( method='GET', url=url, headers=headers, params=params, accept_json=True) return response
0, module; 1, function_definition; 2, function_name:multi_index_insert_row; 3, parameters; 4, block; 5, identifier:df; 6, identifier:index_row; 7, identifier:values_row; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, comment:# The df wasn't sorted or the row has to be put in the middle somewhere; 14, return_statement; 15, comment:""" Return a new dataframe with a row inserted for a multi-index dataframe. This will sort the rows according to the ordered multi-index levels. """; 16, assignment; 17, assignment; 18, assignment; 19, boolean_operator; 20, comment:# We've just appended a row to an already-sorted dataframe; 21, block; 22, call; 23, identifier:row_index; 24, call; 25, identifier:row; 26, call; 27, identifier:df; 28, call; 29, comparison_operator:df.index.lexsort_depth == len(index_row); 30, comparison_operator:df.index[-2] < df.index[-1]; 31, return_statement; 32, attribute; 33, argument_list; 34, attribute; 35, argument_list; 36, attribute; 37, argument_list; 38, attribute; 39, argument_list; 40, attribute; 41, call; 42, subscript; 43, subscript; 44, identifier:df; 45, identifier:df; 46, identifier:sort_index; 47, identifier:pd; 48, identifier:MultiIndex; 49, keyword_argument; 50, keyword_argument; 51, identifier:pd; 52, identifier:DataFrame; 53, identifier:values_row; 54, keyword_argument; 55, keyword_argument; 56, identifier:pd; 57, identifier:concat; 58, tuple; 59, attribute; 60, identifier:lexsort_depth; 61, identifier:len; 62, argument_list; 63, attribute; 64, unary_operator; 65, attribute; 66, unary_operator; 67, identifier:levels; 68, list_comprehension; 69, identifier:labels; 70, list_comprehension; 71, identifier:index; 72, identifier:row_index; 73, identifier:columns; 74, attribute; 75, identifier:df; 76, identifier:row; 77, identifier:df; 78, identifier:index; 79, identifier:index_row; 80, identifier:df; 81, identifier:index; 82, integer:2; 83, identifier:df; 84, identifier:index; 85, integer:1; 86, list; 87, for_in_clause; 88, list; 89, for_in_clause; 90, identifier:df; 91, identifier:columns; 92, identifier:i; 93, identifier:i; 94, identifier:index_row; 95, integer:0; 96, identifier:i; 97, identifier:index_row
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 8, 15; 9, 16; 10, 17; 11, 18; 12, 19; 12, 20; 12, 21; 14, 22; 16, 23; 16, 24; 17, 25; 17, 26; 18, 27; 18, 28; 19, 29; 19, 30; 21, 31; 22, 32; 22, 33; 24, 34; 24, 35; 26, 36; 26, 37; 28, 38; 28, 39; 29, 40; 29, 41; 30, 42; 30, 43; 31, 44; 32, 45; 32, 46; 34, 47; 34, 48; 35, 49; 35, 50; 36, 51; 36, 52; 37, 53; 37, 54; 37, 55; 38, 56; 38, 57; 39, 58; 40, 59; 40, 60; 41, 61; 41, 62; 42, 63; 42, 64; 43, 65; 43, 66; 49, 67; 49, 68; 50, 69; 50, 70; 54, 71; 54, 72; 55, 73; 55, 74; 58, 75; 58, 76; 59, 77; 59, 78; 62, 79; 63, 80; 63, 81; 64, 82; 65, 83; 65, 84; 66, 85; 68, 86; 68, 87; 70, 88; 70, 89; 74, 90; 74, 91; 86, 92; 87, 93; 87, 94; 88, 95; 89, 96; 89, 97
def multi_index_insert_row(df, index_row, values_row): """ Return a new dataframe with a row inserted for a multi-index dataframe. This will sort the rows according to the ordered multi-index levels. """ row_index = pd.MultiIndex(levels=[[i] for i in index_row], labels=[[0] for i in index_row]) row = pd.DataFrame(values_row, index=row_index, columns=df.columns) df = pd.concat((df, row)) if df.index.lexsort_depth == len(index_row) and df.index[-2] < df.index[-1]: # We've just appended a row to an already-sorted dataframe return df # The df wasn't sorted or the row has to be put in the middle somewhere return df.sort_index()
0, module; 1, function_definition; 2, function_name:substring_search; 3, parameters; 4, block; 5, identifier:word; 6, identifier:collection; 7, expression_statement; 8, return_statement; 9, comment:"""Find all matches in the `collection` for the specified `word`. If `word` is empty, returns all items in `collection`. :type word: str :param word: The substring to search for. :type collection: collection, usually a list :param collection: A collection of words to match. :rtype: list of strings :return: A sorted list of matching words from collection. """; 10, list_comprehension; 11, identifier:item; 12, for_in_clause; 13, if_clause; 14, identifier:item; 15, call; 16, call; 17, identifier:sorted; 18, argument_list; 19, attribute; 20, argument_list; 21, identifier:collection; 22, identifier:item; 23, identifier:startswith; 24, identifier:word
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 7, 9; 8, 10; 10, 11; 10, 12; 10, 13; 12, 14; 12, 15; 13, 16; 15, 17; 15, 18; 16, 19; 16, 20; 18, 21; 19, 22; 19, 23; 20, 24
def substring_search(word, collection): """Find all matches in the `collection` for the specified `word`. If `word` is empty, returns all items in `collection`. :type word: str :param word: The substring to search for. :type collection: collection, usually a list :param collection: A collection of words to match. :rtype: list of strings :return: A sorted list of matching words from collection. """ return [item for item in sorted(collection) if item.startswith(word)]
0, module; 1, function_definition; 2, function_name:_nodes; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, return_statement; 8, comment:""" Returns the list of nodes present in the network Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) >>> sorted(dbn._nodes()) ['B', 'A', 'C'] """; 9, call; 10, identifier:list; 11, argument_list; 12, call; 13, identifier:set; 14, argument_list; 15, list_comprehension; 16, identifier:node; 17, for_in_clause; 18, pattern_list; 19, call; 20, identifier:node; 21, identifier:timeslice; 22, attribute; 23, argument_list; 24, call; 25, identifier:nodes; 26, identifier:super; 27, argument_list; 28, identifier:DynamicBayesianNetwork; 29, identifier:self
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 9, 10; 9, 11; 11, 12; 12, 13; 12, 14; 14, 15; 15, 16; 15, 17; 17, 18; 17, 19; 18, 20; 18, 21; 19, 22; 19, 23; 22, 24; 22, 25; 24, 26; 24, 27; 27, 28; 27, 29
def _nodes(self): """ Returns the list of nodes present in the network Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> dbn = DBN() >>> dbn.add_nodes_from(['A', 'B', 'C']) >>> sorted(dbn._nodes()) ['B', 'A', 'C'] """ return list(set([node for node, timeslice in super(DynamicBayesianNetwork, self).nodes()]))
0, module; 1, function_definition; 2, function_name:add_edge; 3, parameters; 4, block; 5, identifier:self; 6, identifier:start; 7, identifier:end; 8, dictionary_splat_pattern; 9, expression_statement; 10, try_statement; 11, if_statement; 12, expression_statement; 13, if_statement; 14, identifier:kwargs; 15, comment:""" Add an edge between two nodes. The nodes will be automatically added if they are not present in the network. Parameters ---------- start: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. end: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> model = DBN() >>> model.add_nodes_from(['D', 'I']) >>> model.add_edge(('D',0), ('I',0)) >>> sorted(model.edges()) [(('D', 0), ('I', 0)), (('D', 1), ('I', 1))] """; 16, block; 17, except_clause; 18, comparison_operator:start == end; 19, block; 20, elif_clause; 21, call; 22, comparison_operator:start[1] == end[1]; 23, block; 24, else_clause; 25, if_statement; 26, identifier:TypeError; 27, block; 28, identifier:start; 29, identifier:end; 30, raise_statement; 31, boolean_operator; 32, block; 33, attribute; 34, argument_list; 35, subscript; 36, subscript; 37, expression_statement; 38, block; 39, boolean_operator; 40, block; 41, elif_clause; 42, elif_clause; 43, elif_clause; 44, elif_clause; 45, elif_clause; 46, raise_statement; 47, call; 48, boolean_operator; 49, line_continuation:\; 50, call; 51, raise_statement; 52, call; 53, identifier:add_edge; 54, identifier:start; 55, identifier:end; 56, dictionary_splat; 57, identifier:start; 58, integer:1; 59, identifier:end; 60, integer:1; 61, call; 62, expression_statement; 63, comparison_operator:len(start) != 2; 64, comparison_operator:len(end) != 2; 65, raise_statement; 66, boolean_operator; 67, block; 68, comparison_operator:start[1] == end[1]; 69, block; 70, comparison_operator:start[1] == end[1] - 1; 71, block; 72, comparison_operator:start[1] > end[1]; 73, block; 74, comparison_operator:start[1] != end[1]; 75, block; 76, call; 77, identifier:ValueError; 78, argument_list; 79, comparison_operator:start in super(DynamicBayesianNetwork, self).nodes(); 80, comparison_operator:end \ in super(DynamicBayesianNetwork, self).nodes(); 81, attribute; 82, argument_list; 83, call; 84, identifier:super; 85, argument_list; 86, identifier:kwargs; 87, attribute; 88, argument_list; 89, call; 90, call; 91, integer:2; 92, call; 93, integer:2; 94, call; 95, not_operator; 96, not_operator; 97, raise_statement; 98, subscript; 99, subscript; 100, expression_statement; 101, expression_statement; 102, subscript; 103, binary_operator:end[1] - 1; 104, expression_statement; 105, expression_statement; 106, subscript; 107, subscript; 108, raise_statement; 109, subscript; 110, subscript; 111, raise_statement; 112, identifier:ValueError; 113, argument_list; 114, string; 115, identifier:start; 116, call; 117, identifier:end; 118, line_continuation:\; 119, call; 120, identifier:nx; 121, identifier:has_path; 122, identifier:self; 123, identifier:end; 124, identifier:start; 125, identifier:ValueError; 126, argument_list; 127, identifier:DynamicBayesianNetwork; 128, identifier:self; 129, call; 130, identifier:add_edge; 131, tuple; 132, tuple; 133, attribute; 134, argument_list; 135, identifier:len; 136, argument_list; 137, identifier:len; 138, argument_list; 139, identifier:ValueError; 140, argument_list; 141, call; 142, call; 143, call; 144, identifier:start; 145, integer:1; 146, identifier:end; 147, integer:1; 148, assignment; 149, assignment; 150, identifier:start; 151, integer:1; 152, subscript; 153, integer:1; 154, assignment; 155, assignment; 156, identifier:start; 157, integer:1; 158, identifier:end; 159, integer:1; 160, call; 161, identifier:start; 162, integer:1; 163, identifier:end; 164, integer:1; 165, call; 166, string; 167, string_content:Self Loops are not allowed; 168, attribute; 169, argument_list; 170, attribute; 171, argument_list; 172, call; 173, identifier:super; 174, argument_list; 175, subscript; 176, binary_operator:1 - start[1]; 177, subscript; 178, binary_operator:1 - end[1]; 179, call; 180, identifier:add_node; 181, tuple; 182, identifier:start; 183, identifier:end; 184, string; 185, identifier:isinstance; 186, argument_list; 187, identifier:isinstance; 188, argument_list; 189, identifier:ValueError; 190, argument_list; 191, identifier:start; 192, tuple; 193, identifier:end; 194, tuple; 195, identifier:end; 196, integer:1; 197, identifier:start; 198, tuple; 199, identifier:end; 200, tuple; 201, identifier:NotImplementedError; 202, argument_list; 203, identifier:ValueError; 204, argument_list; 205, string_content:Nodes must be of type (node, time_slice).; 206, call; 207, identifier:nodes; 208, call; 209, identifier:nodes; 210, attribute; 211, argument_list; 212, identifier:DynamicBayesianNetwork; 213, identifier:self; 214, identifier:start; 215, integer:0; 216, integer:1; 217, subscript; 218, identifier:end; 219, integer:0; 220, integer:1; 221, subscript; 222, identifier:super; 223, argument_list; 224, subscript; 225, binary_operator:1 - end[1]; 226, string_content:Nodes must be of type (node, time_slice).; 227, subscript; 228, identifier:int; 229, subscript; 230, identifier:int; 231, string; 232, subscript; 233, integer:0; 234, subscript; 235, integer:0; 236, subscript; 237, integer:0; 238, subscript; 239, integer:1; 240, string; 241, string:"Edges over multiple time slices is not currently supported"; 242, identifier:super; 243, argument_list; 244, identifier:super; 245, argument_list; 246, string; 247, identifier:format; 248, keyword_argument; 249, keyword_argument; 250, identifier:start; 251, integer:1; 252, identifier:end; 253, integer:1; 254, identifier:DynamicBayesianNetwork; 255, identifier:self; 256, identifier:end; 257, integer:0; 258, integer:1; 259, subscript; 260, identifier:start; 261, integer:1; 262, identifier:end; 263, integer:1; 264, string_content:Nodes must be of type (node, time_slice).; 265, identifier:start; 266, integer:0; 267, identifier:end; 268, integer:0; 269, identifier:start; 270, integer:0; 271, identifier:end; 272, integer:0; 273, string_content:Edges in backward direction are not allowed.; 274, identifier:DynamicBayesianNetwork; 275, identifier:self; 276, identifier:DynamicBayesianNetwork; 277, identifier:self; 278, string_content:Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.; 279, identifier:start; 280, call; 281, identifier:end; 282, call; 283, identifier:end; 284, integer:1; 285, identifier:str; 286, argument_list; 287, identifier:str; 288, argument_list; 289, identifier:start; 290, identifier:end
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 8, 14; 9, 15; 10, 16; 10, 17; 11, 18; 11, 19; 11, 20; 12, 21; 13, 22; 13, 23; 13, 24; 16, 25; 17, 26; 17, 27; 18, 28; 18, 29; 19, 30; 20, 31; 20, 32; 21, 33; 21, 34; 22, 35; 22, 36; 23, 37; 24, 38; 25, 39; 25, 40; 25, 41; 25, 42; 25, 43; 25, 44; 25, 45; 27, 46; 30, 47; 31, 48; 31, 49; 31, 50; 32, 51; 33, 52; 33, 53; 34, 54; 34, 55; 34, 56; 35, 57; 35, 58; 36, 59; 36, 60; 37, 61; 38, 62; 39, 63; 39, 64; 40, 65; 41, 66; 41, 67; 42, 68; 42, 69; 43, 70; 43, 71; 44, 72; 44, 73; 45, 74; 45, 75; 46, 76; 47, 77; 47, 78; 48, 79; 48, 80; 50, 81; 50, 82; 51, 83; 52, 84; 52, 85; 56, 86; 61, 87; 61, 88; 62, 89; 63, 90; 63, 91; 64, 92; 64, 93; 65, 94; 66, 95; 66, 96; 67, 97; 68, 98; 68, 99; 69, 100; 69, 101; 70, 102; 70, 103; 71, 104; 71, 105; 72, 106; 72, 107; 73, 108; 74, 109; 74, 110; 75, 111; 76, 112; 76, 113; 78, 114; 79, 115; 79, 116; 80, 117; 80, 118; 80, 119; 81, 120; 81, 121; 82, 122; 82, 123; 82, 124; 83, 125; 83, 126; 85, 127; 85, 128; 87, 129; 87, 130; 88, 131; 88, 132; 89, 133; 89, 134; 90, 135; 90, 136; 92, 137; 92, 138; 94, 139; 94, 140; 95, 141; 96, 142; 97, 143; 98, 144; 98, 145; 99, 146; 99, 147; 100, 148; 101, 149; 102, 150; 102, 151; 103, 152; 103, 153; 104, 154; 105, 155; 106, 156; 106, 157; 107, 158; 107, 159; 108, 160; 109, 161; 109, 162; 110, 163; 110, 164; 111, 165; 113, 166; 114, 167; 116, 168; 116, 169; 119, 170; 119, 171; 126, 172; 129, 173; 129, 174; 131, 175; 131, 176; 132, 177; 132, 178; 133, 179; 133, 180; 134, 181; 136, 182; 138, 183; 140, 184; 141, 185; 141, 186; 142, 187; 142, 188; 143, 189; 143, 190; 148, 191; 148, 192; 149, 193; 149, 194; 152, 195; 152, 196; 154, 197; 154, 198; 155, 199; 155, 200; 160, 201; 160, 202; 165, 203; 165, 204; 166, 205; 168, 206; 168, 207; 170, 208; 170, 209; 172, 210; 172, 211; 174, 212; 174, 213; 175, 214; 175, 215; 176, 216; 176, 217; 177, 218; 177, 219; 178, 220; 178, 221; 179, 222; 179, 223; 181, 224; 181, 225; 184, 226; 186, 227; 186, 228; 188, 229; 188, 230; 190, 231; 192, 232; 192, 233; 194, 234; 194, 235; 198, 236; 198, 237; 200, 238; 200, 239; 202, 240; 204, 241; 206, 242; 206, 243; 208, 244; 208, 245; 210, 246; 210, 247; 211, 248; 211, 249; 217, 250; 217, 251; 221, 252; 221, 253; 223, 254; 223, 255; 224, 256; 224, 257; 225, 258; 225, 259; 227, 260; 227, 261; 229, 262; 229, 263; 231, 264; 232, 265; 232, 266; 234, 267; 234, 268; 236, 269; 236, 270; 238, 271; 238, 272; 240, 273; 243, 274; 243, 275; 245, 276; 245, 277; 246, 278; 248, 279; 248, 280; 249, 281; 249, 282; 259, 283; 259, 284; 280, 285; 280, 286; 282, 287; 282, 288; 286, 289; 288, 290
def add_edge(self, start, end, **kwargs): """ Add an edge between two nodes. The nodes will be automatically added if they are not present in the network. Parameters ---------- start: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. end: tuple Both the start and end nodes should specify the time slice as (node_name, time_slice). Here, node_name can be any hashable python object while the time_slice is an integer value, which denotes the time slice that the node belongs to. Examples -------- >>> from pgmpy.models import DynamicBayesianNetwork as DBN >>> model = DBN() >>> model.add_nodes_from(['D', 'I']) >>> model.add_edge(('D',0), ('I',0)) >>> sorted(model.edges()) [(('D', 0), ('I', 0)), (('D', 1), ('I', 1))] """ try: if len(start) != 2 or len(end) != 2: raise ValueError('Nodes must be of type (node, time_slice).') elif not isinstance(start[1], int) or not isinstance(end[1], int): raise ValueError('Nodes must be of type (node, time_slice).') elif start[1] == end[1]: start = (start[0], 0) end = (end[0], 0) elif start[1] == end[1] - 1: start = (start[0], 0) end = (end[0], 1) elif start[1] > end[1]: raise NotImplementedError('Edges in backward direction are not allowed.') elif start[1] != end[1]: raise ValueError("Edges over multiple time slices is not currently supported") except TypeError: raise ValueError('Nodes must be of type (node, time_slice).') if start == end: raise ValueError('Self Loops are not allowed') elif start in super(DynamicBayesianNetwork, self).nodes() and end \ in super(DynamicBayesianNetwork, self).nodes() and \ nx.has_path(self, end, start): raise ValueError('Loops are not allowed. Adding the edge from ({start} --> {end}) forms a loop.'.format( start=str(start), end=str(end))) super(DynamicBayesianNetwork, self).add_edge(start, end, **kwargs) if start[1] == end[1]: super(DynamicBayesianNetwork, self).add_edge((start[0], 1 - start[1]), (end[0], 1 - end[1])) else: super(DynamicBayesianNetwork, self).add_node((end[0], 1 - end[1]))
0, module; 1, function_definition; 2, function_name:estimate; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, expression_statement; 14, expression_statement; 15, while_statement; 16, return_statement; 17, identifier:start; 18, None; 19, identifier:tabu_length; 20, integer:0; 21, identifier:max_indegree; 22, None; 23, comment:""" Performs local hill climb search to estimates the `DAG` structure that has optimal score, according to the scoring method supplied in the constructor. Starts at model `start` and proceeds by step-by-step network modifications until a local maximum is reached. Only estimates network structure, no parametrization. Parameters ---------- start: DAG instance The starting point for the local search. By default a completely disconnected network is used. tabu_length: int If provided, the last `tabu_length` graph modifications cannot be reversed during the search procedure. This serves to enforce a wider exploration of the search space. Default value: 100. max_indegree: int or None If provided and unequal None, the procedure only searches among models where all nodes have at most `max_indegree` parents. Defaults to None. Returns ------- model: `DAG` instance A `DAG` at a (local) score maximum. Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import HillClimbSearch, BicScore >>> # create data sample with 9 random variables: ... data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 9)), columns=list('ABCDEFGHI')) >>> # add 10th dependent variable ... data['J'] = data['A'] * data['B'] >>> est = HillClimbSearch(data, scoring_method=BicScore(data)) >>> best_model = est.estimate() >>> sorted(best_model.nodes()) ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'] >>> best_model.edges() [('B', 'J'), ('A', 'J')] >>> # search a model with restriction on the number of parents: >>> est.estimate(max_indegree=1).edges() [('J', 'A'), ('B', 'J')] """; 24, assignment; 25, assignment; 26, comparison_operator:start is None; 27, block; 28, elif_clause; 29, assignment; 30, assignment; 31, True; 32, block; 33, identifier:current_model; 34, identifier:epsilon; 35, float:1e-8; 36, identifier:nodes; 37, call; 38, identifier:start; 39, None; 40, expression_statement; 41, expression_statement; 42, boolean_operator; 43, block; 44, identifier:tabu_list; 45, list; 46, identifier:current_model; 47, identifier:start; 48, expression_statement; 49, expression_statement; 50, for_statement; 51, if_statement; 52, attribute; 53, argument_list; 54, assignment; 55, call; 56, not_operator; 57, not_operator; 58, raise_statement; 59, assignment; 60, assignment; 61, pattern_list; 62, call; 63, block; 64, boolean_operator; 65, block; 66, elif_clause; 67, elif_clause; 68, elif_clause; 69, attribute; 70, identifier:keys; 71, identifier:start; 72, call; 73, attribute; 74, argument_list; 75, call; 76, comparison_operator:set(start.nodes()) == set(nodes); 77, call; 78, identifier:best_score_delta; 79, integer:0; 80, identifier:best_operation; 81, None; 82, identifier:operation; 83, identifier:score_delta; 84, attribute; 85, argument_list; 86, if_statement; 87, comparison_operator:best_operation is None; 88, comparison_operator:best_score_delta < epsilon; 89, break_statement; 90, comparison_operator:best_operation[0] == '+'; 91, block; 92, comparison_operator:best_operation[0] == '-'; 93, block; 94, comparison_operator:best_operation[0] == 'flip'; 95, block; 96, identifier:self; 97, identifier:state_names; 98, identifier:DAG; 99, argument_list; 100, identifier:start; 101, identifier:add_nodes_from; 102, identifier:nodes; 103, identifier:isinstance; 104, argument_list; 105, call; 106, call; 107, identifier:ValueError; 108, argument_list; 109, identifier:self; 110, identifier:_legal_operations; 111, identifier:current_model; 112, identifier:tabu_list; 113, identifier:max_indegree; 114, comparison_operator:score_delta > best_score_delta; 115, block; 116, identifier:best_operation; 117, None; 118, identifier:best_score_delta; 119, identifier:epsilon; 120, subscript; 121, string; 122, expression_statement; 123, expression_statement; 124, subscript; 125, string; 126, expression_statement; 127, expression_statement; 128, subscript; 129, string; 130, expression_statement; 131, expression_statement; 132, expression_statement; 133, expression_statement; 134, identifier:start; 135, identifier:DAG; 136, identifier:set; 137, argument_list; 138, identifier:set; 139, argument_list; 140, string:"'start' should be a DAG with the same variables as the data set, or 'None'."; 141, identifier:score_delta; 142, identifier:best_score_delta; 143, expression_statement; 144, expression_statement; 145, identifier:best_operation; 146, integer:0; 147, string_content:+; 148, call; 149, assignment; 150, identifier:best_operation; 151, integer:0; 152, string_content:-; 153, call; 154, assignment; 155, identifier:best_operation; 156, integer:0; 157, string_content:flip; 158, assignment; 159, call; 160, call; 161, assignment; 162, call; 163, identifier:nodes; 164, assignment; 165, assignment; 166, attribute; 167, argument_list; 168, identifier:tabu_list; 169, subscript; 170, attribute; 171, argument_list; 172, identifier:tabu_list; 173, subscript; 174, pattern_list; 175, subscript; 176, attribute; 177, argument_list; 178, attribute; 179, argument_list; 180, identifier:tabu_list; 181, subscript; 182, attribute; 183, argument_list; 184, identifier:best_operation; 185, identifier:operation; 186, identifier:best_score_delta; 187, identifier:score_delta; 188, identifier:current_model; 189, identifier:add_edge; 190, list_splat; 191, parenthesized_expression; 192, slice; 193, identifier:current_model; 194, identifier:remove_edge; 195, list_splat; 196, parenthesized_expression; 197, slice; 198, identifier:X; 199, identifier:Y; 200, identifier:best_operation; 201, integer:1; 202, identifier:current_model; 203, identifier:remove_edge; 204, identifier:X; 205, identifier:Y; 206, identifier:current_model; 207, identifier:add_edge; 208, identifier:Y; 209, identifier:X; 210, parenthesized_expression; 211, slice; 212, identifier:start; 213, identifier:nodes; 214, subscript; 215, binary_operator:[('-', best_operation[1])] + tabu_list; 216, identifier:tabu_length; 217, subscript; 218, binary_operator:[('+', best_operation[1])] + tabu_list; 219, identifier:tabu_length; 220, binary_operator:[best_operation] + tabu_list; 221, identifier:tabu_length; 222, identifier:best_operation; 223, integer:1; 224, list; 225, identifier:tabu_list; 226, identifier:best_operation; 227, integer:1; 228, list; 229, identifier:tabu_list; 230, list; 231, identifier:tabu_list; 232, tuple; 233, tuple; 234, identifier:best_operation; 235, string; 236, subscript; 237, string; 238, subscript; 239, string_content:-; 240, identifier:best_operation; 241, integer:1; 242, string_content:+; 243, identifier:best_operation; 244, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 6, 18; 7, 19; 7, 20; 8, 21; 8, 22; 9, 23; 10, 24; 11, 25; 12, 26; 12, 27; 12, 28; 13, 29; 14, 30; 15, 31; 15, 32; 16, 33; 24, 34; 24, 35; 25, 36; 25, 37; 26, 38; 26, 39; 27, 40; 27, 41; 28, 42; 28, 43; 29, 44; 29, 45; 30, 46; 30, 47; 32, 48; 32, 49; 32, 50; 32, 51; 37, 52; 37, 53; 40, 54; 41, 55; 42, 56; 42, 57; 43, 58; 48, 59; 49, 60; 50, 61; 50, 62; 50, 63; 51, 64; 51, 65; 51, 66; 51, 67; 51, 68; 52, 69; 52, 70; 54, 71; 54, 72; 55, 73; 55, 74; 56, 75; 57, 76; 58, 77; 59, 78; 59, 79; 60, 80; 60, 81; 61, 82; 61, 83; 62, 84; 62, 85; 63, 86; 64, 87; 64, 88; 65, 89; 66, 90; 66, 91; 67, 92; 67, 93; 68, 94; 68, 95; 69, 96; 69, 97; 72, 98; 72, 99; 73, 100; 73, 101; 74, 102; 75, 103; 75, 104; 76, 105; 76, 106; 77, 107; 77, 108; 84, 109; 84, 110; 85, 111; 85, 112; 85, 113; 86, 114; 86, 115; 87, 116; 87, 117; 88, 118; 88, 119; 90, 120; 90, 121; 91, 122; 91, 123; 92, 124; 92, 125; 93, 126; 93, 127; 94, 128; 94, 129; 95, 130; 95, 131; 95, 132; 95, 133; 104, 134; 104, 135; 105, 136; 105, 137; 106, 138; 106, 139; 108, 140; 114, 141; 114, 142; 115, 143; 115, 144; 120, 145; 120, 146; 121, 147; 122, 148; 123, 149; 124, 150; 124, 151; 125, 152; 126, 153; 127, 154; 128, 155; 128, 156; 129, 157; 130, 158; 131, 159; 132, 160; 133, 161; 137, 162; 139, 163; 143, 164; 144, 165; 148, 166; 148, 167; 149, 168; 149, 169; 153, 170; 153, 171; 154, 172; 154, 173; 158, 174; 158, 175; 159, 176; 159, 177; 160, 178; 160, 179; 161, 180; 161, 181; 162, 182; 162, 183; 164, 184; 164, 185; 165, 186; 165, 187; 166, 188; 166, 189; 167, 190; 169, 191; 169, 192; 170, 193; 170, 194; 171, 195; 173, 196; 173, 197; 174, 198; 174, 199; 175, 200; 175, 201; 176, 202; 176, 203; 177, 204; 177, 205; 178, 206; 178, 207; 179, 208; 179, 209; 181, 210; 181, 211; 182, 212; 182, 213; 190, 214; 191, 215; 192, 216; 195, 217; 196, 218; 197, 219; 210, 220; 211, 221; 214, 222; 214, 223; 215, 224; 215, 225; 217, 226; 217, 227; 218, 228; 218, 229; 220, 230; 220, 231; 224, 232; 228, 233; 230, 234; 232, 235; 232, 236; 233, 237; 233, 238; 235, 239; 236, 240; 236, 241; 237, 242; 238, 243; 238, 244
def estimate(self, start=None, tabu_length=0, max_indegree=None): """ Performs local hill climb search to estimates the `DAG` structure that has optimal score, according to the scoring method supplied in the constructor. Starts at model `start` and proceeds by step-by-step network modifications until a local maximum is reached. Only estimates network structure, no parametrization. Parameters ---------- start: DAG instance The starting point for the local search. By default a completely disconnected network is used. tabu_length: int If provided, the last `tabu_length` graph modifications cannot be reversed during the search procedure. This serves to enforce a wider exploration of the search space. Default value: 100. max_indegree: int or None If provided and unequal None, the procedure only searches among models where all nodes have at most `max_indegree` parents. Defaults to None. Returns ------- model: `DAG` instance A `DAG` at a (local) score maximum. Examples -------- >>> import pandas as pd >>> import numpy as np >>> from pgmpy.estimators import HillClimbSearch, BicScore >>> # create data sample with 9 random variables: ... data = pd.DataFrame(np.random.randint(0, 5, size=(5000, 9)), columns=list('ABCDEFGHI')) >>> # add 10th dependent variable ... data['J'] = data['A'] * data['B'] >>> est = HillClimbSearch(data, scoring_method=BicScore(data)) >>> best_model = est.estimate() >>> sorted(best_model.nodes()) ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J'] >>> best_model.edges() [('B', 'J'), ('A', 'J')] >>> # search a model with restriction on the number of parents: >>> est.estimate(max_indegree=1).edges() [('J', 'A'), ('B', 'J')] """ epsilon = 1e-8 nodes = self.state_names.keys() if start is None: start = DAG() start.add_nodes_from(nodes) elif not isinstance(start, DAG) or not set(start.nodes()) == set(nodes): raise ValueError("'start' should be a DAG with the same variables as the data set, or 'None'.") tabu_list = [] current_model = start while True: best_score_delta = 0 best_operation = None for operation, score_delta in self._legal_operations(current_model, tabu_list, max_indegree): if score_delta > best_score_delta: best_operation = operation best_score_delta = score_delta if best_operation is None or best_score_delta < epsilon: break elif best_operation[0] == '+': current_model.add_edge(*best_operation[1]) tabu_list = ([('-', best_operation[1])] + tabu_list)[:tabu_length] elif best_operation[0] == '-': current_model.remove_edge(*best_operation[1]) tabu_list = ([('+', best_operation[1])] + tabu_list)[:tabu_length] elif best_operation[0] == 'flip': X, Y = best_operation[1] current_model.remove_edge(X, Y) current_model.add_edge(Y, X) tabu_list = ([best_operation] + tabu_list)[:tabu_length] return current_model
0, module; 1, function_definition; 2, function_name:add_node; 3, parameters; 4, block; 5, identifier:self; 6, identifier:node; 7, default_parameter; 8, expression_statement; 9, comment:# Check for networkx 2.0 syntax; 10, if_statement; 11, expression_statement; 12, identifier:weight; 13, None; 14, comment:""" Adds a single node to the Graph. Parameters ---------- node: str, int, or any hashable python object. The node to add to the graph. weight: int, float The weight of the node. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_node(node='A') >>> sorted(G.nodes()) ['A'] Adding a node with some weight. >>> G.add_node(node='B', weight=0.3) The weight of these nodes can be accessed as: >>> G.node['B'] {'weight': 0.3} >>> G.node['A'] {'weight': None} """; 15, boolean_operator; 16, block; 17, else_clause; 18, call; 19, boolean_operator; 20, call; 21, expression_statement; 22, if_statement; 23, block; 24, attribute; 25, argument_list; 26, call; 27, comparison_operator:len(node) == 2; 28, identifier:isinstance; 29, argument_list; 30, assignment; 31, comparison_operator:attrs.get('weight', None) is not None; 32, block; 33, expression_statement; 34, call; 35, identifier:add_node; 36, identifier:node; 37, keyword_argument; 38, identifier:isinstance; 39, argument_list; 40, call; 41, integer:2; 42, subscript; 43, identifier:dict; 44, pattern_list; 45, identifier:node; 46, call; 47, None; 48, expression_statement; 49, assignment; 50, identifier:super; 51, argument_list; 52, identifier:weight; 53, identifier:weight; 54, identifier:node; 55, identifier:tuple; 56, identifier:len; 57, argument_list; 58, identifier:node; 59, integer:1; 60, identifier:node; 61, identifier:attrs; 62, attribute; 63, argument_list; 64, assignment; 65, identifier:attrs; 66, dictionary; 67, identifier:DAG; 68, identifier:self; 69, identifier:node; 70, identifier:attrs; 71, identifier:get; 72, string; 73, None; 74, subscript; 75, identifier:weight; 76, pair; 77, string_content:weight; 78, identifier:attrs; 79, string; 80, string; 81, identifier:weight; 82, string_content:weight; 83, string_content:weight
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 10, 15; 10, 16; 10, 17; 11, 18; 15, 19; 15, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 20, 29; 21, 30; 22, 31; 22, 32; 23, 33; 24, 34; 24, 35; 25, 36; 25, 37; 26, 38; 26, 39; 27, 40; 27, 41; 29, 42; 29, 43; 30, 44; 30, 45; 31, 46; 31, 47; 32, 48; 33, 49; 34, 50; 34, 51; 37, 52; 37, 53; 39, 54; 39, 55; 40, 56; 40, 57; 42, 58; 42, 59; 44, 60; 44, 61; 46, 62; 46, 63; 48, 64; 49, 65; 49, 66; 51, 67; 51, 68; 57, 69; 62, 70; 62, 71; 63, 72; 63, 73; 64, 74; 64, 75; 66, 76; 72, 77; 74, 78; 74, 79; 76, 80; 76, 81; 79, 82; 80, 83
def add_node(self, node, weight=None): """ Adds a single node to the Graph. Parameters ---------- node: str, int, or any hashable python object. The node to add to the graph. weight: int, float The weight of the node. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_node(node='A') >>> sorted(G.nodes()) ['A'] Adding a node with some weight. >>> G.add_node(node='B', weight=0.3) The weight of these nodes can be accessed as: >>> G.node['B'] {'weight': 0.3} >>> G.node['A'] {'weight': None} """ # Check for networkx 2.0 syntax if isinstance(node, tuple) and len(node) == 2 and isinstance(node[1], dict): node, attrs = node if attrs.get('weight', None) is not None: attrs['weight'] = weight else: attrs = {'weight': weight} super(DAG, self).add_node(node, weight=weight)
0, module; 1, function_definition; 2, function_name:add_nodes_from; 3, parameters; 4, block; 5, identifier:self; 6, identifier:nodes; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, if_statement; 11, identifier:weights; 12, None; 13, comment:""" Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None} """; 14, assignment; 15, identifier:weights; 16, block; 17, else_clause; 18, identifier:nodes; 19, call; 20, if_statement; 21, for_statement; 22, block; 23, identifier:list; 24, argument_list; 25, comparison_operator:len(nodes) != len(weights); 26, block; 27, identifier:index; 28, call; 29, block; 30, for_statement; 31, identifier:nodes; 32, call; 33, call; 34, raise_statement; 35, identifier:range; 36, argument_list; 37, expression_statement; 38, identifier:node; 39, identifier:nodes; 40, block; 41, identifier:len; 42, argument_list; 43, identifier:len; 44, argument_list; 45, call; 46, call; 47, call; 48, expression_statement; 49, identifier:nodes; 50, identifier:weights; 51, identifier:ValueError; 52, argument_list; 53, identifier:len; 54, argument_list; 55, attribute; 56, argument_list; 57, call; 58, concatenated_string; 59, identifier:nodes; 60, identifier:self; 61, identifier:add_node; 62, keyword_argument; 63, keyword_argument; 64, attribute; 65, argument_list; 66, string:"The number of elements in nodes and weights"; 67, string:"should be equal."; 68, identifier:node; 69, subscript; 70, identifier:weight; 71, subscript; 72, identifier:self; 73, identifier:add_node; 74, keyword_argument; 75, identifier:nodes; 76, identifier:index; 77, identifier:weights; 78, identifier:index; 79, identifier:node; 80, identifier:node
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 7, 11; 7, 12; 8, 13; 9, 14; 10, 15; 10, 16; 10, 17; 14, 18; 14, 19; 16, 20; 16, 21; 17, 22; 19, 23; 19, 24; 20, 25; 20, 26; 21, 27; 21, 28; 21, 29; 22, 30; 24, 31; 25, 32; 25, 33; 26, 34; 28, 35; 28, 36; 29, 37; 30, 38; 30, 39; 30, 40; 32, 41; 32, 42; 33, 43; 33, 44; 34, 45; 36, 46; 37, 47; 40, 48; 42, 49; 44, 50; 45, 51; 45, 52; 46, 53; 46, 54; 47, 55; 47, 56; 48, 57; 52, 58; 54, 59; 55, 60; 55, 61; 56, 62; 56, 63; 57, 64; 57, 65; 58, 66; 58, 67; 62, 68; 62, 69; 63, 70; 63, 71; 64, 72; 64, 73; 65, 74; 69, 75; 69, 76; 71, 77; 71, 78; 74, 79; 74, 80
def add_nodes_from(self, nodes, weights=None): """ Add multiple nodes to the Graph. **The behviour of adding weights is different than in networkx. Parameters ---------- nodes: iterable container A container of nodes (list, dict, set, or any hashable python object). weights: list, tuple (default=None) A container of weights (int, float). The weight value at index i is associated with the variable at index i. Examples -------- >>> from pgmpy.base import DAG >>> G = DAG() >>> G.add_nodes_from(nodes=['A', 'B', 'C']) >>> sorted(G.nodes()) ['A', 'B', 'C'] Adding nodes with weights: >>> G.add_nodes_from(nodes=['D', 'E'], weights=[0.3, 0.6]) >>> G.node['D'] {'weight': 0.3} >>> G.node['E'] {'weight': 0.6} >>> G.node['A'] {'weight': None} """ nodes = list(nodes) if weights: if len(nodes) != len(weights): raise ValueError("The number of elements in nodes and weights" "should be equal.") for index in range(len(nodes)): self.add_node(node=nodes[index], weight=weights[index]) else: for node in nodes: self.add_node(node=node)
0, module; 1, function_definition; 2, function_name:rank_items; 3, parameters; 4, block; 5, identifier:self; 6, identifier:userid; 7, identifier:user_items; 8, identifier:selected_items; 9, default_parameter; 10, expression_statement; 11, comment:# check if selected_items contains itemids that are not in the model(user_items); 12, if_statement; 13, comment:# calculate the relevance scores; 14, expression_statement; 15, expression_statement; 16, comment:# remove items that are not in the selected_items; 17, expression_statement; 18, expression_statement; 19, comment:# returned items should be equal to input selected items; 20, for_statement; 21, return_statement; 22, identifier:recalculate_user; 23, False; 24, comment:""" Rank given items for a user and returns sorted item list """; 25, boolean_operator; 26, block; 27, assignment; 28, assignment; 29, assignment; 30, assignment; 31, identifier:itemid; 32, identifier:selected_items; 33, block; 34, identifier:ret; 35, comparison_operator:max(selected_items) >= user_items.shape[1]; 36, comparison_operator:min(selected_items) < 0; 37, raise_statement; 38, identifier:liked_vector; 39, subscript; 40, identifier:recommendations; 41, call; 42, identifier:best; 43, call; 44, identifier:ret; 45, list_comprehension; 46, if_statement; 47, call; 48, subscript; 49, call; 50, integer:0; 51, call; 52, identifier:user_items; 53, identifier:userid; 54, attribute; 55, argument_list; 56, identifier:sorted; 57, argument_list; 58, identifier:rec; 59, for_in_clause; 60, if_clause; 61, comparison_operator:itemid not in recommendations.indices; 62, block; 63, identifier:max; 64, argument_list; 65, attribute; 66, integer:1; 67, identifier:min; 68, argument_list; 69, identifier:IndexError; 70, argument_list; 71, identifier:liked_vector; 72, identifier:dot; 73, attribute; 74, call; 75, keyword_argument; 76, identifier:rec; 77, identifier:best; 78, comparison_operator:rec[0] in selected_items; 79, identifier:itemid; 80, attribute; 81, expression_statement; 82, identifier:selected_items; 83, identifier:user_items; 84, identifier:shape; 85, identifier:selected_items; 86, string:"Some of selected itemids are not in the model"; 87, identifier:self; 88, identifier:similarity; 89, identifier:zip; 90, argument_list; 91, identifier:key; 92, lambda; 93, subscript; 94, identifier:selected_items; 95, identifier:recommendations; 96, identifier:indices; 97, call; 98, attribute; 99, attribute; 100, lambda_parameters; 101, unary_operator; 102, identifier:rec; 103, integer:0; 104, attribute; 105, argument_list; 106, identifier:recommendations; 107, identifier:indices; 108, identifier:recommendations; 109, identifier:data; 110, identifier:x; 111, subscript; 112, identifier:ret; 113, identifier:append; 114, tuple; 115, identifier:x; 116, integer:1; 117, identifier:itemid; 118, unary_operator; 119, float:1.0
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 9, 22; 9, 23; 10, 24; 12, 25; 12, 26; 14, 27; 15, 28; 17, 29; 18, 30; 20, 31; 20, 32; 20, 33; 21, 34; 25, 35; 25, 36; 26, 37; 27, 38; 27, 39; 28, 40; 28, 41; 29, 42; 29, 43; 30, 44; 30, 45; 33, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 39, 52; 39, 53; 41, 54; 41, 55; 43, 56; 43, 57; 45, 58; 45, 59; 45, 60; 46, 61; 46, 62; 47, 63; 47, 64; 48, 65; 48, 66; 49, 67; 49, 68; 51, 69; 51, 70; 54, 71; 54, 72; 55, 73; 57, 74; 57, 75; 59, 76; 59, 77; 60, 78; 61, 79; 61, 80; 62, 81; 64, 82; 65, 83; 65, 84; 68, 85; 70, 86; 73, 87; 73, 88; 74, 89; 74, 90; 75, 91; 75, 92; 78, 93; 78, 94; 80, 95; 80, 96; 81, 97; 90, 98; 90, 99; 92, 100; 92, 101; 93, 102; 93, 103; 97, 104; 97, 105; 98, 106; 98, 107; 99, 108; 99, 109; 100, 110; 101, 111; 104, 112; 104, 113; 105, 114; 111, 115; 111, 116; 114, 117; 114, 118; 118, 119
def rank_items(self, userid, user_items, selected_items, recalculate_user=False): """ Rank given items for a user and returns sorted item list """ # check if selected_items contains itemids that are not in the model(user_items) if max(selected_items) >= user_items.shape[1] or min(selected_items) < 0: raise IndexError("Some of selected itemids are not in the model") # calculate the relevance scores liked_vector = user_items[userid] recommendations = liked_vector.dot(self.similarity) # remove items that are not in the selected_items best = sorted(zip(recommendations.indices, recommendations.data), key=lambda x: -x[1]) ret = [rec for rec in best if rec[0] in selected_items] # returned items should be equal to input selected items for itemid in selected_items: if itemid not in recommendations.indices: ret.append((itemid, -1.0)) return ret
0, module; 1, function_definition; 2, function_name:get_sorted_structure; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, identifier:key; 12, None; 13, identifier:reverse; 14, False; 15, comment:""" Get a sorted copy of the structure. The parameters have the same meaning as in list.sort. By default, sites are sorted by the electronegativity of the species. Args: key: Specifies a function of one argument that is used to extract a comparison key from each list element: key=str.lower. The default value is None (compare the elements directly). reverse (bool): If set to True, then the list elements are sorted as if each comparison were reversed. """; 16, assignment; 17, call; 18, identifier:sites; 19, call; 20, attribute; 21, argument_list; 22, identifier:sorted; 23, argument_list; 24, attribute; 25, identifier:from_sites; 26, identifier:sites; 27, keyword_argument; 28, identifier:self; 29, keyword_argument; 30, keyword_argument; 31, identifier:self; 32, identifier:__class__; 33, identifier:charge; 34, attribute; 35, identifier:key; 36, identifier:key; 37, identifier:reverse; 38, identifier:reverse; 39, identifier:self; 40, identifier:_charge
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 6, 11; 6, 12; 7, 13; 7, 14; 8, 15; 9, 16; 10, 17; 16, 18; 16, 19; 17, 20; 17, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 21, 27; 23, 28; 23, 29; 23, 30; 24, 31; 24, 32; 27, 33; 27, 34; 29, 35; 29, 36; 30, 37; 30, 38; 34, 39; 34, 40
def get_sorted_structure(self, key=None, reverse=False): """ Get a sorted copy of the structure. The parameters have the same meaning as in list.sort. By default, sites are sorted by the electronegativity of the species. Args: key: Specifies a function of one argument that is used to extract a comparison key from each list element: key=str.lower. The default value is None (compare the elements directly). reverse (bool): If set to True, then the list elements are sorted as if each comparison were reversed. """ sites = sorted(self, key=key, reverse=reverse) return self.__class__.from_sites(sites, charge=self._charge)
0, module; 1, function_definition; 2, function_name:from_str; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:input_string; 7, identifier:fmt; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, import_from_statement; 13, import_from_statement; 14, import_from_statement; 15, import_from_statement; 16, import_from_statement; 17, expression_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, return_statement; 22, identifier:primitive; 23, False; 24, identifier:sort; 25, False; 26, identifier:merge_tol; 27, float:0.0; 28, comment:""" Reads a structure from a string. Args: input_string (str): String to parse. fmt (str): A format specification. primitive (bool): Whether to find a primitive cell. Defaults to False. sort (bool): Whether to sort the sites in accordance to the default ordering criteria, i.e., electronegativity. merge_tol (float): If this is some positive number, sites that are within merge_tol from each other will be merged. Usually 0.01 should be enough to deal with common numerical issues. Returns: IStructure / Structure """; 29, dotted_name; 30, dotted_name; 31, dotted_name; 32, dotted_name; 33, dotted_name; 34, dotted_name; 35, dotted_name; 36, dotted_name; 37, dotted_name; 38, dotted_name; 39, assignment; 40, comparison_operator:fmt == "cif"; 41, block; 42, elif_clause; 43, elif_clause; 44, elif_clause; 45, elif_clause; 46, elif_clause; 47, elif_clause; 48, else_clause; 49, identifier:sort; 50, block; 51, identifier:merge_tol; 52, block; 53, call; 54, identifier:pymatgen; 55, identifier:io; 56, identifier:cif; 57, identifier:CifParser; 58, identifier:pymatgen; 59, identifier:io; 60, identifier:vasp; 61, identifier:Poscar; 62, identifier:pymatgen; 63, identifier:io; 64, identifier:cssr; 65, identifier:Cssr; 66, identifier:pymatgen; 67, identifier:io; 68, identifier:xcrysden; 69, identifier:XSF; 70, identifier:pymatgen; 71, identifier:io; 72, identifier:atat; 73, identifier:Mcsqs; 74, identifier:fmt; 75, call; 76, identifier:fmt; 77, string:"cif"; 78, expression_statement; 79, expression_statement; 80, comparison_operator:fmt == "poscar"; 81, block; 82, comparison_operator:fmt == "cssr"; 83, block; 84, comparison_operator:fmt == "json"; 85, block; 86, comparison_operator:fmt == "yaml"; 87, block; 88, comparison_operator:fmt == "xsf"; 89, block; 90, comparison_operator:fmt == "mcsqs"; 91, block; 92, block; 93, expression_statement; 94, expression_statement; 95, attribute; 96, argument_list; 97, attribute; 98, argument_list; 99, assignment; 100, assignment; 101, identifier:fmt; 102, string:"poscar"; 103, expression_statement; 104, identifier:fmt; 105, string:"cssr"; 106, expression_statement; 107, expression_statement; 108, identifier:fmt; 109, string:"json"; 110, expression_statement; 111, expression_statement; 112, identifier:fmt; 113, string:"yaml"; 114, import_statement; 115, expression_statement; 116, expression_statement; 117, identifier:fmt; 118, string:"xsf"; 119, expression_statement; 120, identifier:fmt; 121, string:"mcsqs"; 122, expression_statement; 123, raise_statement; 124, assignment; 125, call; 126, identifier:cls; 127, identifier:from_sites; 128, identifier:s; 129, identifier:fmt; 130, identifier:lower; 131, identifier:parser; 132, call; 133, identifier:s; 134, subscript; 135, assignment; 136, assignment; 137, assignment; 138, assignment; 139, assignment; 140, aliased_import; 141, assignment; 142, assignment; 143, assignment; 144, assignment; 145, call; 146, identifier:s; 147, call; 148, attribute; 149, argument_list; 150, attribute; 151, argument_list; 152, call; 153, integer:0; 154, identifier:s; 155, attribute; 156, identifier:cssr; 157, call; 158, identifier:s; 159, attribute; 160, identifier:d; 161, call; 162, identifier:s; 163, call; 164, dotted_name; 165, identifier:yaml; 166, identifier:d; 167, call; 168, identifier:s; 169, call; 170, identifier:s; 171, attribute; 172, identifier:s; 173, call; 174, identifier:ValueError; 175, argument_list; 176, attribute; 177, argument_list; 178, identifier:s; 179, identifier:merge_sites; 180, identifier:merge_tol; 181, identifier:CifParser; 182, identifier:from_string; 183, identifier:input_string; 184, attribute; 185, argument_list; 186, call; 187, identifier:structure; 188, attribute; 189, argument_list; 190, identifier:cssr; 191, identifier:structure; 192, attribute; 193, argument_list; 194, attribute; 195, argument_list; 196, identifier:ruamel; 197, identifier:yaml; 198, attribute; 199, argument_list; 200, attribute; 201, argument_list; 202, call; 203, identifier:structure; 204, attribute; 205, argument_list; 206, binary_operator:"Unrecognized format `%s`!" % fmt; 207, identifier:s; 208, identifier:get_sorted_structure; 209, identifier:parser; 210, identifier:get_structures; 211, keyword_argument; 212, attribute; 213, argument_list; 214, identifier:Cssr; 215, identifier:from_string; 216, identifier:input_string; 217, identifier:json; 218, identifier:loads; 219, identifier:input_string; 220, identifier:Structure; 221, identifier:from_dict; 222, identifier:d; 223, identifier:yaml; 224, identifier:safe_load; 225, identifier:input_string; 226, identifier:Structure; 227, identifier:from_dict; 228, identifier:d; 229, attribute; 230, argument_list; 231, identifier:Mcsqs; 232, identifier:structure_from_string; 233, identifier:input_string; 234, string:"Unrecognized format `%s`!"; 235, identifier:fmt; 236, identifier:primitive; 237, identifier:primitive; 238, identifier:Poscar; 239, identifier:from_string; 240, identifier:input_string; 241, False; 242, keyword_argument; 243, identifier:XSF; 244, identifier:from_string; 245, identifier:input_string; 246, identifier:read_velocities; 247, False
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 12, 29; 12, 30; 13, 31; 13, 32; 14, 33; 14, 34; 15, 35; 15, 36; 16, 37; 16, 38; 17, 39; 18, 40; 18, 41; 18, 42; 18, 43; 18, 44; 18, 45; 18, 46; 18, 47; 18, 48; 19, 49; 19, 50; 20, 51; 20, 52; 21, 53; 29, 54; 29, 55; 29, 56; 30, 57; 31, 58; 31, 59; 31, 60; 32, 61; 33, 62; 33, 63; 33, 64; 34, 65; 35, 66; 35, 67; 35, 68; 36, 69; 37, 70; 37, 71; 37, 72; 38, 73; 39, 74; 39, 75; 40, 76; 40, 77; 41, 78; 41, 79; 42, 80; 42, 81; 43, 82; 43, 83; 44, 84; 44, 85; 45, 86; 45, 87; 46, 88; 46, 89; 47, 90; 47, 91; 48, 92; 50, 93; 52, 94; 53, 95; 53, 96; 75, 97; 75, 98; 78, 99; 79, 100; 80, 101; 80, 102; 81, 103; 82, 104; 82, 105; 83, 106; 83, 107; 84, 108; 84, 109; 85, 110; 85, 111; 86, 112; 86, 113; 87, 114; 87, 115; 87, 116; 88, 117; 88, 118; 89, 119; 90, 120; 90, 121; 91, 122; 92, 123; 93, 124; 94, 125; 95, 126; 95, 127; 96, 128; 97, 129; 97, 130; 99, 131; 99, 132; 100, 133; 100, 134; 103, 135; 106, 136; 107, 137; 110, 138; 111, 139; 114, 140; 115, 141; 116, 142; 119, 143; 122, 144; 123, 145; 124, 146; 124, 147; 125, 148; 125, 149; 132, 150; 132, 151; 134, 152; 134, 153; 135, 154; 135, 155; 136, 156; 136, 157; 137, 158; 137, 159; 138, 160; 138, 161; 139, 162; 139, 163; 140, 164; 140, 165; 141, 166; 141, 167; 142, 168; 142, 169; 143, 170; 143, 171; 144, 172; 144, 173; 145, 174; 145, 175; 147, 176; 147, 177; 148, 178; 148, 179; 149, 180; 150, 181; 150, 182; 151, 183; 152, 184; 152, 185; 155, 186; 155, 187; 157, 188; 157, 189; 159, 190; 159, 191; 161, 192; 161, 193; 163, 194; 163, 195; 164, 196; 164, 197; 167, 198; 167, 199; 169, 200; 169, 201; 171, 202; 171, 203; 173, 204; 173, 205; 175, 206; 176, 207; 176, 208; 184, 209; 184, 210; 185, 211; 186, 212; 186, 213; 188, 214; 188, 215; 189, 216; 192, 217; 192, 218; 193, 219; 194, 220; 194, 221; 195, 222; 198, 223; 198, 224; 199, 225; 200, 226; 200, 227; 201, 228; 202, 229; 202, 230; 204, 231; 204, 232; 205, 233; 206, 234; 206, 235; 211, 236; 211, 237; 212, 238; 212, 239; 213, 240; 213, 241; 213, 242; 229, 243; 229, 244; 230, 245; 242, 246; 242, 247
def from_str(cls, input_string, fmt, primitive=False, sort=False, merge_tol=0.0): """ Reads a structure from a string. Args: input_string (str): String to parse. fmt (str): A format specification. primitive (bool): Whether to find a primitive cell. Defaults to False. sort (bool): Whether to sort the sites in accordance to the default ordering criteria, i.e., electronegativity. merge_tol (float): If this is some positive number, sites that are within merge_tol from each other will be merged. Usually 0.01 should be enough to deal with common numerical issues. Returns: IStructure / Structure """ from pymatgen.io.cif import CifParser from pymatgen.io.vasp import Poscar from pymatgen.io.cssr import Cssr from pymatgen.io.xcrysden import XSF from pymatgen.io.atat import Mcsqs fmt = fmt.lower() if fmt == "cif": parser = CifParser.from_string(input_string) s = parser.get_structures(primitive=primitive)[0] elif fmt == "poscar": s = Poscar.from_string(input_string, False, read_velocities=False).structure elif fmt == "cssr": cssr = Cssr.from_string(input_string) s = cssr.structure elif fmt == "json": d = json.loads(input_string) s = Structure.from_dict(d) elif fmt == "yaml": import ruamel.yaml as yaml d = yaml.safe_load(input_string) s = Structure.from_dict(d) elif fmt == "xsf": s = XSF.from_string(input_string).structure elif fmt == "mcsqs": s = Mcsqs.structure_from_string(input_string) else: raise ValueError("Unrecognized format `%s`!" % fmt) if sort: s = s.get_sorted_structure() if merge_tol: s.merge_sites(merge_tol) return cls.from_sites(s)
0, module; 1, function_definition; 2, function_name:get_transition_chempots; 3, parameters; 4, block; 5, identifier:self; 6, identifier:element; 7, expression_statement; 8, if_statement; 9, expression_statement; 10, for_statement; 11, expression_statement; 12, for_statement; 13, expression_statement; 14, return_statement; 15, comment:""" Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative. """; 16, comparison_operator:element not in self.elements; 17, block; 18, assignment; 19, identifier:facet; 20, attribute; 21, block; 22, assignment; 23, identifier:c; 24, call; 25, block; 26, call; 27, call; 28, identifier:element; 29, attribute; 30, raise_statement; 31, identifier:critical_chempots; 32, list; 33, identifier:self; 34, identifier:facets; 35, expression_statement; 36, expression_statement; 37, identifier:clean_pots; 38, list; 39, identifier:sorted; 40, argument_list; 41, if_statement; 42, attribute; 43, argument_list; 44, identifier:tuple; 45, argument_list; 46, identifier:self; 47, identifier:elements; 48, call; 49, assignment; 50, call; 51, identifier:critical_chempots; 52, comparison_operator:len(clean_pots) == 0; 53, block; 54, else_clause; 55, identifier:clean_pots; 56, identifier:reverse; 57, identifier:clean_pots; 58, identifier:ValueError; 59, argument_list; 60, identifier:chempots; 61, call; 62, attribute; 63, argument_list; 64, call; 65, integer:0; 66, expression_statement; 67, block; 68, concatenated_string; 69, attribute; 70, argument_list; 71, identifier:critical_chempots; 72, identifier:append; 73, subscript; 74, identifier:len; 75, argument_list; 76, call; 77, if_statement; 78, string:"get_transition_chempots can only be called with "; 79, string:"elements in the phase diagram."; 80, identifier:self; 81, identifier:_get_facet_chempots; 82, identifier:facet; 83, identifier:chempots; 84, identifier:element; 85, identifier:clean_pots; 86, attribute; 87, argument_list; 88, comparison_operator:abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol; 89, block; 90, identifier:clean_pots; 91, identifier:append; 92, identifier:c; 93, call; 94, attribute; 95, expression_statement; 96, identifier:abs; 97, argument_list; 98, identifier:PhaseDiagram; 99, identifier:numerical_tol; 100, call; 101, binary_operator:c - clean_pots[-1]; 102, attribute; 103, argument_list; 104, identifier:c; 105, subscript; 106, identifier:clean_pots; 107, identifier:append; 108, identifier:c; 109, identifier:clean_pots; 110, unary_operator; 111, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 10, 20; 10, 21; 11, 22; 12, 23; 12, 24; 12, 25; 13, 26; 14, 27; 16, 28; 16, 29; 17, 30; 18, 31; 18, 32; 20, 33; 20, 34; 21, 35; 21, 36; 22, 37; 22, 38; 24, 39; 24, 40; 25, 41; 26, 42; 26, 43; 27, 44; 27, 45; 29, 46; 29, 47; 30, 48; 35, 49; 36, 50; 40, 51; 41, 52; 41, 53; 41, 54; 42, 55; 42, 56; 45, 57; 48, 58; 48, 59; 49, 60; 49, 61; 50, 62; 50, 63; 52, 64; 52, 65; 53, 66; 54, 67; 59, 68; 61, 69; 61, 70; 62, 71; 62, 72; 63, 73; 64, 74; 64, 75; 66, 76; 67, 77; 68, 78; 68, 79; 69, 80; 69, 81; 70, 82; 73, 83; 73, 84; 75, 85; 76, 86; 76, 87; 77, 88; 77, 89; 86, 90; 86, 91; 87, 92; 88, 93; 88, 94; 89, 95; 93, 96; 93, 97; 94, 98; 94, 99; 95, 100; 97, 101; 100, 102; 100, 103; 101, 104; 101, 105; 102, 106; 102, 107; 103, 108; 105, 109; 105, 110; 110, 111
def get_transition_chempots(self, element): """ Get the critical chemical potentials for an element in the Phase Diagram. Args: element: An element. Has to be in the PD in the first place. Returns: A sorted sequence of critical chemical potentials, from less negative to more negative. """ if element not in self.elements: raise ValueError("get_transition_chempots can only be called with " "elements in the phase diagram.") critical_chempots = [] for facet in self.facets: chempots = self._get_facet_chempots(facet) critical_chempots.append(chempots[element]) clean_pots = [] for c in sorted(critical_chempots): if len(clean_pots) == 0: clean_pots.append(c) else: if abs(c - clean_pots[-1]) > PhaseDiagram.numerical_tol: clean_pots.append(c) clean_pots.reverse() return tuple(clean_pots)
0, module; 1, function_definition; 2, function_name:from_dir; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:top; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, return_statement; 13, identifier:exts; 14, None; 15, identifier:exclude_dirs; 16, string:"_*"; 17, comment:""" Find all pseudos in the directory tree starting from top. Args: top: Top of the directory tree exts: List of files extensions. if exts == "all_files" we try to open all files in top exclude_dirs: Wildcard used to exclude directories. return: :class:`PseudoTable` sorted by atomic number Z. """; 18, assignment; 19, comparison_operator:exts == "all_files"; 20, block; 21, else_clause; 22, call; 23, identifier:pseudos; 24, list; 25, identifier:exts; 26, string:"all_files"; 27, for_statement; 28, if_statement; 29, expression_statement; 30, block; 31, attribute; 32, argument_list; 33, identifier:f; 34, list_comprehension; 35, block; 36, not_operator; 37, block; 38, call; 39, if_statement; 40, for_statement; 41, call; 42, identifier:sort_by_z; 43, call; 44, for_in_clause; 45, if_statement; 46, identifier:pseudos; 47, expression_statement; 48, return_statement; 49, attribute; 50, argument_list; 51, comparison_operator:exts is None; 52, block; 53, identifier:p; 54, call; 55, block; 56, identifier:cls; 57, argument_list; 58, attribute; 59, argument_list; 60, identifier:fn; 61, call; 62, call; 63, block; 64, call; 65, None; 66, identifier:logger; 67, identifier:info; 68, binary_operator:'Creating PseudoTable with %i pseudopotentials' % len(pseudos); 69, identifier:exts; 70, None; 71, expression_statement; 72, identifier:find_exts; 73, argument_list; 74, try_statement; 75, identifier:pseudos; 76, attribute; 77, identifier:join; 78, identifier:top; 79, identifier:fn; 80, attribute; 81, argument_list; 82, attribute; 83, argument_list; 84, try_statement; 85, attribute; 86, argument_list; 87, string; 88, call; 89, assignment; 90, identifier:top; 91, identifier:exts; 92, keyword_argument; 93, block; 94, except_clause; 95, identifier:os; 96, identifier:path; 97, identifier:os; 98, identifier:listdir; 99, identifier:top; 100, attribute; 101, identifier:isfile; 102, identifier:f; 103, block; 104, except_clause; 105, identifier:logger; 106, identifier:warning; 107, binary_operator:'No pseudopotentials parsed from folder %s' % top; 108, string_content:Creating PseudoTable with %i pseudopotentials; 109, identifier:len; 110, argument_list; 111, identifier:exts; 112, tuple; 113, identifier:exclude_dirs; 114, identifier:exclude_dirs; 115, expression_statement; 116, as_pattern; 117, block; 118, identifier:os; 119, identifier:path; 120, expression_statement; 121, if_statement; 122, block; 123, string; 124, identifier:top; 125, identifier:pseudos; 126, string:"psp8"; 127, call; 128, identifier:Exception; 129, as_pattern_target; 130, expression_statement; 131, assignment; 132, identifier:p; 133, block; 134, else_clause; 135, expression_statement; 136, string_content:No pseudopotentials parsed from folder %s; 137, attribute; 138, argument_list; 139, identifier:exc; 140, call; 141, identifier:p; 142, call; 143, expression_statement; 144, block; 145, call; 146, identifier:pseudos; 147, identifier:append; 148, call; 149, attribute; 150, argument_list; 151, attribute; 152, argument_list; 153, call; 154, expression_statement; 155, attribute; 156, argument_list; 157, attribute; 158, argument_list; 159, identifier:logger; 160, identifier:critical; 161, binary_operator:"Error in %s:\n%s" % (p, exc); 162, identifier:Pseudo; 163, identifier:from_file; 164, identifier:f; 165, attribute; 166, argument_list; 167, call; 168, identifier:logger; 169, identifier:info; 170, binary_operator:'Skipping file %s' % f; 171, identifier:Pseudo; 172, identifier:from_file; 173, identifier:p; 174, string:"Error in %s:\n%s"; 175, tuple; 176, identifier:pseudos; 177, identifier:append; 178, identifier:p; 179, attribute; 180, argument_list; 181, string; 182, identifier:f; 183, identifier:p; 184, identifier:exc; 185, identifier:logger; 186, identifier:info; 187, binary_operator:'Skipping file %s' % f; 188, string_content:Skipping file %s; 189, string; 190, identifier:f; 191, string_content:Skipping file %s
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 10, 18; 11, 19; 11, 20; 11, 21; 12, 22; 18, 23; 18, 24; 19, 25; 19, 26; 20, 27; 20, 28; 20, 29; 21, 30; 22, 31; 22, 32; 27, 33; 27, 34; 27, 35; 28, 36; 28, 37; 29, 38; 30, 39; 30, 40; 31, 41; 31, 42; 34, 43; 34, 44; 35, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 39, 51; 39, 52; 40, 53; 40, 54; 40, 55; 41, 56; 41, 57; 43, 58; 43, 59; 44, 60; 44, 61; 45, 62; 45, 63; 47, 64; 48, 65; 49, 66; 49, 67; 50, 68; 51, 69; 51, 70; 52, 71; 54, 72; 54, 73; 55, 74; 57, 75; 58, 76; 58, 77; 59, 78; 59, 79; 61, 80; 61, 81; 62, 82; 62, 83; 63, 84; 64, 85; 64, 86; 68, 87; 68, 88; 71, 89; 73, 90; 73, 91; 73, 92; 74, 93; 74, 94; 76, 95; 76, 96; 80, 97; 80, 98; 81, 99; 82, 100; 82, 101; 83, 102; 84, 103; 84, 104; 85, 105; 85, 106; 86, 107; 87, 108; 88, 109; 88, 110; 89, 111; 89, 112; 92, 113; 92, 114; 93, 115; 94, 116; 94, 117; 100, 118; 100, 119; 103, 120; 103, 121; 104, 122; 107, 123; 107, 124; 110, 125; 112, 126; 115, 127; 116, 128; 116, 129; 117, 130; 120, 131; 121, 132; 121, 133; 121, 134; 122, 135; 123, 136; 127, 137; 127, 138; 129, 139; 130, 140; 131, 141; 131, 142; 133, 143; 134, 144; 135, 145; 137, 146; 137, 147; 138, 148; 140, 149; 140, 150; 142, 151; 142, 152; 143, 153; 144, 154; 145, 155; 145, 156; 148, 157; 148, 158; 149, 159; 149, 160; 150, 161; 151, 162; 151, 163; 152, 164; 153, 165; 153, 166; 154, 167; 155, 168; 155, 169; 156, 170; 157, 171; 157, 172; 158, 173; 161, 174; 161, 175; 165, 176; 165, 177; 166, 178; 167, 179; 167, 180; 170, 181; 170, 182; 175, 183; 175, 184; 179, 185; 179, 186; 180, 187; 181, 188; 187, 189; 187, 190; 189, 191
def from_dir(cls, top, exts=None, exclude_dirs="_*"): """ Find all pseudos in the directory tree starting from top. Args: top: Top of the directory tree exts: List of files extensions. if exts == "all_files" we try to open all files in top exclude_dirs: Wildcard used to exclude directories. return: :class:`PseudoTable` sorted by atomic number Z. """ pseudos = [] if exts == "all_files": for f in [os.path.join(top, fn) for fn in os.listdir(top)]: if os.path.isfile(f): try: p = Pseudo.from_file(f) if p: pseudos.append(p) else: logger.info('Skipping file %s' % f) except: logger.info('Skipping file %s' % f) if not pseudos: logger.warning('No pseudopotentials parsed from folder %s' % top) return None logger.info('Creating PseudoTable with %i pseudopotentials' % len(pseudos)) else: if exts is None: exts=("psp8",) for p in find_exts(top, exts, exclude_dirs=exclude_dirs): try: pseudos.append(Pseudo.from_file(p)) except Exception as exc: logger.critical("Error in %s:\n%s" % (p, exc)) return cls(pseudos).sort_by_z()
0, module; 1, function_definition; 2, function_name:sorted; 3, parameters; 4, block; 5, identifier:self; 6, identifier:attrname; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, comment:# Sort attrs, and build new table with sorted pseudos.; 12, return_statement; 13, identifier:reverse; 14, False; 15, comment:""" Sort the table according to the value of attribute attrname. Return: New class:`PseudoTable` object """; 16, assignment; 17, pattern_list; 18, identifier:self; 19, block; 20, call; 21, identifier:attrs; 22, list; 23, identifier:i; 24, identifier:pseudo; 25, try_statement; 26, expression_statement; 27, attribute; 28, argument_list; 29, block; 30, except_clause; 31, call; 32, identifier:self; 33, identifier:__class__; 34, list_comprehension; 35, expression_statement; 36, identifier:AttributeError; 37, block; 38, attribute; 39, argument_list; 40, subscript; 41, for_in_clause; 42, assignment; 43, expression_statement; 44, identifier:attrs; 45, identifier:append; 46, tuple; 47, identifier:self; 48, subscript; 49, identifier:a; 50, call; 51, identifier:a; 52, call; 53, assignment; 54, identifier:i; 55, identifier:a; 56, identifier:a; 57, integer:0; 58, identifier:sorted; 59, argument_list; 60, identifier:getattr; 61, argument_list; 62, identifier:a; 63, attribute; 64, identifier:attrs; 65, keyword_argument; 66, keyword_argument; 67, identifier:pseudo; 68, identifier:attrname; 69, identifier:np; 70, identifier:inf; 71, identifier:key; 72, lambda; 73, identifier:reverse; 74, identifier:reverse; 75, lambda_parameters; 76, subscript; 77, identifier:t; 78, identifier:t; 79, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 10, 17; 10, 18; 10, 19; 12, 20; 16, 21; 16, 22; 17, 23; 17, 24; 19, 25; 19, 26; 20, 27; 20, 28; 25, 29; 25, 30; 26, 31; 27, 32; 27, 33; 28, 34; 29, 35; 30, 36; 30, 37; 31, 38; 31, 39; 34, 40; 34, 41; 35, 42; 37, 43; 38, 44; 38, 45; 39, 46; 40, 47; 40, 48; 41, 49; 41, 50; 42, 51; 42, 52; 43, 53; 46, 54; 46, 55; 48, 56; 48, 57; 50, 58; 50, 59; 52, 60; 52, 61; 53, 62; 53, 63; 59, 64; 59, 65; 59, 66; 61, 67; 61, 68; 63, 69; 63, 70; 65, 71; 65, 72; 66, 73; 66, 74; 72, 75; 72, 76; 75, 77; 76, 78; 76, 79
def sorted(self, attrname, reverse=False): """ Sort the table according to the value of attribute attrname. Return: New class:`PseudoTable` object """ attrs = [] for i, pseudo in self: try: a = getattr(pseudo, attrname) except AttributeError: a = np.inf attrs.append((i, a)) # Sort attrs, and build new table with sorted pseudos. return self.__class__([self[a[0]] for a in sorted(attrs, key=lambda t: t[1], reverse=reverse)])