nodes
stringlengths
501
22.4k
edges
stringlengths
138
5.07k
code
stringlengths
108
19.3k
0, module; 1, function_definition; 2, function_name:get_sortkey; 3, parameters; 4, block; 5, identifier:table; 6, expression_statement; 7, comment:# Just pick the first column in the table in alphabetical order.; 8, comment:# Ideally we would get the primary key from bcdc api, but it doesn't; 9, comment:# seem to be available; 10, expression_statement; 11, return_statement; 12, comment:"""Get a field to sort by """; 13, assignment; 14, subscript; 15, identifier:wfs; 16, call; 17, call; 18, integer:0; 19, identifier:WebFeatureService; 20, argument_list; 21, identifier:sorted; 22, argument_list; 23, keyword_argument; 24, keyword_argument; 25, call; 26, identifier:url; 27, attribute; 28, identifier:version; 29, string:"2.0.0"; 30, attribute; 31, argument_list; 32, identifier:bcdata; 33, identifier:OWS_URL; 34, subscript; 35, identifier:keys; 36, call; 37, string:"properties"; 38, attribute; 39, argument_list; 40, identifier:wfs; 41, identifier:get_schema; 42, binary_operator:"pub:" + table; 43, string:"pub:"; 44, identifier:table
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 10, 13; 11, 14; 13, 15; 13, 16; 14, 17; 14, 18; 16, 19; 16, 20; 17, 21; 17, 22; 20, 23; 20, 24; 22, 25; 23, 26; 23, 27; 24, 28; 24, 29; 25, 30; 25, 31; 27, 32; 27, 33; 30, 34; 30, 35; 34, 36; 34, 37; 36, 38; 36, 39; 38, 40; 38, 41; 39, 42; 42, 43; 42, 44
def get_sortkey(table): """Get a field to sort by """ # Just pick the first column in the table in alphabetical order. # Ideally we would get the primary key from bcdc api, but it doesn't # seem to be available wfs = WebFeatureService(url=bcdata.OWS_URL, version="2.0.0") return sorted(wfs.get_schema("pub:" + table)["properties"].keys())[0]
0, module; 1, function_definition; 2, function_name:_get_sorted; 3, parameters; 4, block; 5, identifier:self; 6, identifier:resources; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Order the resources by priority - the most specific paths come first. :param resources: List of :class:`wsgiservice.resource.Resource` classes to be served by this application. """; 12, assignment; 13, identifier:resource; 14, identifier:resources; 15, block; 16, list_comprehension; 17, identifier:tmp; 18, list; 19, expression_statement; 20, comment:# Each slash counts as 10 priority, each variable takes one away; 21, expression_statement; 22, expression_statement; 23, identifier:resource; 24, for_in_clause; 25, assignment; 26, assignment; 27, call; 28, pattern_list; 29, call; 30, identifier:path; 31, attribute; 32, identifier:priority; 33, binary_operator:path.count('/') * 10 - path.count('{'); 34, attribute; 35, argument_list; 36, identifier:prio; 37, identifier:resource; 38, identifier:reversed; 39, argument_list; 40, identifier:resource; 41, identifier:_path; 42, binary_operator:path.count('/') * 10; 43, call; 44, identifier:tmp; 45, identifier:append; 46, tuple; 47, call; 48, call; 49, integer:10; 50, attribute; 51, argument_list; 52, identifier:priority; 53, identifier:resource; 54, identifier:sorted; 55, argument_list; 56, attribute; 57, argument_list; 58, identifier:path; 59, identifier:count; 60, string; 61, identifier:tmp; 62, identifier:path; 63, identifier:count; 64, string; 65, string_content:{; 66, string_content:/
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 15, 19; 15, 20; 15, 21; 15, 22; 16, 23; 16, 24; 19, 25; 21, 26; 22, 27; 24, 28; 24, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 31, 40; 31, 41; 33, 42; 33, 43; 34, 44; 34, 45; 35, 46; 39, 47; 42, 48; 42, 49; 43, 50; 43, 51; 46, 52; 46, 53; 47, 54; 47, 55; 48, 56; 48, 57; 50, 58; 50, 59; 51, 60; 55, 61; 56, 62; 56, 63; 57, 64; 60, 65; 64, 66
def _get_sorted(self, resources): """Order the resources by priority - the most specific paths come first. :param resources: List of :class:`wsgiservice.resource.Resource` classes to be served by this application. """ tmp = [] for resource in resources: path = resource._path # Each slash counts as 10 priority, each variable takes one away priority = path.count('/') * 10 - path.count('{') tmp.append((priority, resource)) return [resource for prio, resource in reversed(sorted(tmp))]
0, module; 1, function_definition; 2, function_name:_group_models; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, comment:# Add models to appropriate list. We only aggregate models which; 9, comment:# completed successfully and have a valid AIC and BMD.; 10, for_statement; 11, comment:# Sort each list by the number of parameters; 12, function_definition; 13, for_statement; 14, return_statement; 15, comment:""" If AIC and BMD are numeric and identical, then treat models as identical. Returns a list of lists. The outer list is a list of related models, the inner list contains each individual model, sorted by the number of parameters in ascending order. This is required because in some cases, a higher-order model may not use some parameters and can effectively collapse to a lower order model (for example, a 2nd order polynomial and power model may collapse to a linear model). In summary outputs, we may want to present all models in one row, since they are the same model effectively. """; 16, assignment; 17, pattern_list; 18, call; 19, block; 20, function_name:_get_num_params; 21, parameters; 22, block; 23, pattern_list; 24, call; 25, block; 26, call; 27, identifier:od; 28, call; 29, identifier:i; 30, identifier:model; 31, identifier:enumerate; 32, argument_list; 33, expression_statement; 34, if_statement; 35, identifier:model; 36, return_statement; 37, identifier:key; 38, identifier:_models; 39, attribute; 40, argument_list; 41, expression_statement; 42, identifier:list; 43, argument_list; 44, identifier:OrderedDict; 45, argument_list; 46, attribute; 47, assignment; 48, boolean_operator; 49, block; 50, else_clause; 51, parenthesized_expression; 52, identifier:od; 53, identifier:items; 54, call; 55, call; 56, identifier:self; 57, identifier:models; 58, identifier:output; 59, call; 60, boolean_operator; 61, comparison_operator:output["BMD"] > 0; 62, expression_statement; 63, if_statement; 64, block; 65, conditional_expression:len(model.output["parameters"]) if hasattr(model, "output") and "parameters" in model.output else 0; 66, attribute; 67, argument_list; 68, attribute; 69, argument_list; 70, identifier:getattr; 71, argument_list; 72, call; 73, call; 74, subscript; 75, integer:0; 76, assignment; 77, comparison_operator:key in od; 78, block; 79, else_clause; 80, expression_statement; 81, call; 82, boolean_operator; 83, integer:0; 84, identifier:_models; 85, identifier:sort; 86, keyword_argument; 87, identifier:od; 88, identifier:values; 89, identifier:model; 90, string:"output"; 91, dictionary; 92, attribute; 93, argument_list; 94, attribute; 95, argument_list; 96, identifier:output; 97, string:"BMD"; 98, identifier:key; 99, call; 100, identifier:key; 101, identifier:od; 102, expression_statement; 103, block; 104, assignment; 105, identifier:len; 106, argument_list; 107, call; 108, comparison_operator:"parameters" in model.output; 109, identifier:key; 110, identifier:_get_num_params; 111, identifier:output; 112, identifier:get; 113, string:"AIC"; 114, identifier:output; 115, identifier:get; 116, string:"BMD"; 117, attribute; 118, argument_list; 119, call; 120, expression_statement; 121, subscript; 122, list; 123, subscript; 124, identifier:hasattr; 125, argument_list; 126, string:"parameters"; 127, attribute; 128, string:"{}-{}"; 129, identifier:format; 130, subscript; 131, subscript; 132, attribute; 133, argument_list; 134, assignment; 135, identifier:od; 136, identifier:i; 137, identifier:model; 138, attribute; 139, string:"parameters"; 140, identifier:model; 141, string:"output"; 142, identifier:model; 143, identifier:output; 144, identifier:output; 145, string:"AIC"; 146, identifier:output; 147, string:"BMD"; 148, subscript; 149, identifier:append; 150, identifier:model; 151, subscript; 152, list; 153, identifier:model; 154, identifier:output; 155, identifier:od; 156, identifier:key; 157, identifier:od; 158, identifier:key; 159, identifier:model
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 6, 15; 7, 16; 10, 17; 10, 18; 10, 19; 12, 20; 12, 21; 12, 22; 13, 23; 13, 24; 13, 25; 14, 26; 16, 27; 16, 28; 17, 29; 17, 30; 18, 31; 18, 32; 19, 33; 19, 34; 21, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 25, 41; 26, 42; 26, 43; 28, 44; 28, 45; 32, 46; 33, 47; 34, 48; 34, 49; 34, 50; 36, 51; 39, 52; 39, 53; 41, 54; 43, 55; 46, 56; 46, 57; 47, 58; 47, 59; 48, 60; 48, 61; 49, 62; 49, 63; 50, 64; 51, 65; 54, 66; 54, 67; 55, 68; 55, 69; 59, 70; 59, 71; 60, 72; 60, 73; 61, 74; 61, 75; 62, 76; 63, 77; 63, 78; 63, 79; 64, 80; 65, 81; 65, 82; 65, 83; 66, 84; 66, 85; 67, 86; 68, 87; 68, 88; 71, 89; 71, 90; 71, 91; 72, 92; 72, 93; 73, 94; 73, 95; 74, 96; 74, 97; 76, 98; 76, 99; 77, 100; 77, 101; 78, 102; 79, 103; 80, 104; 81, 105; 81, 106; 82, 107; 82, 108; 86, 109; 86, 110; 92, 111; 92, 112; 93, 113; 94, 114; 94, 115; 95, 116; 99, 117; 99, 118; 102, 119; 103, 120; 104, 121; 104, 122; 106, 123; 107, 124; 107, 125; 108, 126; 108, 127; 117, 128; 117, 129; 118, 130; 118, 131; 119, 132; 119, 133; 120, 134; 121, 135; 121, 136; 122, 137; 123, 138; 123, 139; 125, 140; 125, 141; 127, 142; 127, 143; 130, 144; 130, 145; 131, 146; 131, 147; 132, 148; 132, 149; 133, 150; 134, 151; 134, 152; 138, 153; 138, 154; 148, 155; 148, 156; 151, 157; 151, 158; 152, 159
def _group_models(self): """ If AIC and BMD are numeric and identical, then treat models as identical. Returns a list of lists. The outer list is a list of related models, the inner list contains each individual model, sorted by the number of parameters in ascending order. This is required because in some cases, a higher-order model may not use some parameters and can effectively collapse to a lower order model (for example, a 2nd order polynomial and power model may collapse to a linear model). In summary outputs, we may want to present all models in one row, since they are the same model effectively. """ od = OrderedDict() # Add models to appropriate list. We only aggregate models which # completed successfully and have a valid AIC and BMD. for i, model in enumerate(self.models): output = getattr(model, "output", {}) if output.get("AIC") and output.get("BMD") and output["BMD"] > 0: key = "{}-{}".format(output["AIC"], output["BMD"]) if key in od: od[key].append(model) else: od[key] = [model] else: od[i] = [model] # Sort each list by the number of parameters def _get_num_params(model): return ( len(model.output["parameters"]) if hasattr(model, "output") and "parameters" in model.output else 0 ) for key, _models in od.items(): _models.sort(key=_get_num_params) return list(od.values())
0, module; 1, function_definition; 2, function_name:search; 3, parameters; 4, block; 5, identifier:term; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, return_statement; 14, identifier:category; 15, attribute; 16, identifier:pages; 17, integer:1; 18, identifier:sort; 19, None; 20, identifier:order; 21, None; 22, comment:"""Return a search result for term in category. Can also be sorted and span multiple pages."""; 23, assignment; 24, call; 25, identifier:s; 26, identifier:Categories; 27, identifier:ALL; 28, identifier:s; 29, call; 30, attribute; 31, argument_list; 32, identifier:Search; 33, argument_list; 34, identifier:s; 35, identifier:search; 36, keyword_argument; 37, keyword_argument; 38, keyword_argument; 39, keyword_argument; 40, keyword_argument; 41, identifier:term; 42, identifier:term; 43, identifier:category; 44, identifier:category; 45, identifier:pages; 46, identifier:pages; 47, identifier:sort; 48, identifier:sort; 49, identifier:order; 50, identifier:order
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 6, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 9, 21; 10, 22; 11, 23; 12, 24; 13, 25; 15, 26; 15, 27; 23, 28; 23, 29; 24, 30; 24, 31; 29, 32; 29, 33; 30, 34; 30, 35; 31, 36; 31, 37; 31, 38; 31, 39; 31, 40; 36, 41; 36, 42; 37, 43; 37, 44; 38, 45; 38, 46; 39, 47; 39, 48; 40, 49; 40, 50
def search(term, category=Categories.ALL, pages=1, sort=None, order=None): """Return a search result for term in category. Can also be sorted and span multiple pages.""" s = Search() s.search(term=term, category=category, pages=pages, sort=sort, order=order) return s
0, module; 1, function_definition; 2, function_name:popular; 3, parameters; 4, block; 5, default_parameter; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, identifier:category; 12, None; 13, identifier:sortOption; 14, string:"title"; 15, comment:"""Return a search result containing torrents appearing on the KAT home page. Can be categorized. Cannot be sorted or contain multiple pages"""; 16, assignment; 17, call; 18, identifier:s; 19, identifier:s; 20, call; 21, attribute; 22, argument_list; 23, identifier:Search; 24, argument_list; 25, identifier:s; 26, identifier:popular; 27, identifier:category; 28, identifier:sortOption
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 5, 11; 5, 12; 6, 13; 6, 14; 7, 15; 8, 16; 9, 17; 10, 18; 16, 19; 16, 20; 17, 21; 17, 22; 20, 23; 20, 24; 21, 25; 21, 26; 22, 27; 22, 28
def popular(category=None, sortOption = "title"): """Return a search result containing torrents appearing on the KAT home page. Can be categorized. Cannot be sorted or contain multiple pages""" s = Search() s.popular(category, sortOption) return s
0, module; 1, function_definition; 2, function_name:recent; 3, parameters; 4, block; 5, default_parameter; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:category; 14, None; 15, identifier:pages; 16, integer:1; 17, identifier:sort; 18, None; 19, identifier:order; 20, None; 21, comment:"""Return most recently added torrents. Can be sorted and categorized and contain multiple pages."""; 22, assignment; 23, call; 24, identifier:s; 25, identifier:s; 26, call; 27, attribute; 28, argument_list; 29, identifier:Search; 30, argument_list; 31, identifier:s; 32, identifier:recent; 33, identifier:category; 34, identifier:pages; 35, identifier:sort; 36, identifier:order
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 5, 13; 5, 14; 6, 15; 6, 16; 7, 17; 7, 18; 8, 19; 8, 20; 9, 21; 10, 22; 11, 23; 12, 24; 22, 25; 22, 26; 23, 27; 23, 28; 26, 29; 26, 30; 27, 31; 27, 32; 28, 33; 28, 34; 28, 35; 28, 36
def recent(category=None, pages=1, sort=None, order=None): """Return most recently added torrents. Can be sorted and categorized and contain multiple pages.""" s = Search() s.recent(category, pages, sort, order) return s
0, module; 1, function_definition; 2, function_name:pick_action_todo; 3, parameters; 4, block; 5, expression_statement; 6, for_statement; 7, return_statement; 8, comment:""" only for testing and AI - user will usually choose an action Sort of works """; 9, pattern_list; 10, call; 11, comment:#print('todo = ', todo); 12, block; 13, subscript; 14, identifier:ndx; 15, identifier:todo; 16, identifier:enumerate; 17, argument_list; 18, if_statement; 19, identifier:actions; 20, integer:3; 21, identifier:things_to_do; 22, call; 23, block; 24, identifier:roll_dice; 25, argument_list; 26, expression_statement; 27, if_statement; 28, if_statement; 29, subscript; 30, assignment; 31, boolean_operator; 32, block; 33, boolean_operator; 34, block; 35, identifier:todo; 36, string:"chance"; 37, identifier:cur_act; 38, subscript; 39, comparison_operator:todo["WHERE_COL"] == "energy"; 40, comparison_operator:my_char["energy"] > todo["WHERE_VAL"]; 41, return_statement; 42, comparison_operator:todo["WHERE_COL"] == "gold"; 43, comparison_operator:my_char["gold"] > todo["WHERE_VAL"]; 44, return_statement; 45, identifier:actions; 46, call; 47, subscript; 48, string:"energy"; 49, subscript; 50, subscript; 51, identifier:cur_act; 52, subscript; 53, string:"gold"; 54, subscript; 55, subscript; 56, identifier:cur_act; 57, identifier:get_action_by_name; 58, argument_list; 59, identifier:todo; 60, string:"WHERE_COL"; 61, identifier:my_char; 62, string:"energy"; 63, identifier:todo; 64, string:"WHERE_VAL"; 65, identifier:todo; 66, string:"WHERE_COL"; 67, identifier:my_char; 68, string:"gold"; 69, identifier:todo; 70, string:"WHERE_VAL"; 71, subscript; 72, identifier:todo; 73, string:"name"
0, 1; 1, 2; 1, 3; 1, 4; 4, 5; 4, 6; 4, 7; 5, 8; 6, 9; 6, 10; 6, 11; 6, 12; 7, 13; 9, 14; 9, 15; 10, 16; 10, 17; 12, 18; 13, 19; 13, 20; 17, 21; 18, 22; 18, 23; 22, 24; 22, 25; 23, 26; 23, 27; 23, 28; 25, 29; 26, 30; 27, 31; 27, 32; 28, 33; 28, 34; 29, 35; 29, 36; 30, 37; 30, 38; 31, 39; 31, 40; 32, 41; 33, 42; 33, 43; 34, 44; 38, 45; 38, 46; 39, 47; 39, 48; 40, 49; 40, 50; 41, 51; 42, 52; 42, 53; 43, 54; 43, 55; 44, 56; 46, 57; 46, 58; 47, 59; 47, 60; 49, 61; 49, 62; 50, 63; 50, 64; 52, 65; 52, 66; 54, 67; 54, 68; 55, 69; 55, 70; 58, 71; 71, 72; 71, 73
def pick_action_todo(): """ only for testing and AI - user will usually choose an action Sort of works """ for ndx, todo in enumerate(things_to_do): #print('todo = ', todo) if roll_dice(todo["chance"]): cur_act = actions[get_action_by_name(todo["name"])] if todo["WHERE_COL"] == "energy" and my_char["energy"] > todo["WHERE_VAL"]: return cur_act if todo["WHERE_COL"] == "gold" and my_char["gold"] > todo["WHERE_VAL"]: return cur_act return actions[3]
0, module; 1, function_definition; 2, function_name:power; 3, parameters; 4, block; 5, identifier:set_; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, return_statement; 10, comment:"""Returns all subsets of given set. :return: Powerset of given set, i.e. iterable containing all its subsets, sorted by ascending cardinality. """; 11, call; 12, assignment; 13, call; 14, identifier:ensure_countable; 15, argument_list; 16, identifier:result; 17, call; 18, identifier:_harmonize_subset_types; 19, argument_list; 20, identifier:set_; 21, attribute; 22, generator_expression; 23, identifier:set_; 24, identifier:result; 25, identifier:chain; 26, identifier:from_iterable; 27, call; 28, for_in_clause; 29, identifier:combinations; 30, argument_list; 31, identifier:r; 32, call; 33, identifier:set_; 34, identifier:r; 35, identifier:xrange; 36, argument_list; 37, binary_operator:len(set_) + 1; 38, call; 39, integer:1; 40, identifier:len; 41, argument_list; 42, identifier:set_
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 8, 12; 9, 13; 11, 14; 11, 15; 12, 16; 12, 17; 13, 18; 13, 19; 15, 20; 17, 21; 17, 22; 19, 23; 19, 24; 21, 25; 21, 26; 22, 27; 22, 28; 27, 29; 27, 30; 28, 31; 28, 32; 30, 33; 30, 34; 32, 35; 32, 36; 36, 37; 37, 38; 37, 39; 38, 40; 38, 41; 41, 42
def power(set_): """Returns all subsets of given set. :return: Powerset of given set, i.e. iterable containing all its subsets, sorted by ascending cardinality. """ ensure_countable(set_) result = chain.from_iterable(combinations(set_, r) for r in xrange(len(set_) + 1)) return _harmonize_subset_types(set_, result)
0, module; 1, function_definition; 2, function_name:to_json; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, return_statement; 10, identifier:indent; 11, None; 12, identifier:sort_keys; 13, True; 14, comment:"""Return a JSON string representation of this instance :param indent: specify an indent level or a string used to indent each level :param sort_keys: the output is sorted by key """; 15, call; 16, attribute; 17, argument_list; 18, identifier:json; 19, identifier:dumps; 20, call; 21, keyword_argument; 22, keyword_argument; 23, attribute; 24, argument_list; 25, identifier:indent; 26, identifier:indent; 27, identifier:sort_keys; 28, identifier:sort_keys; 29, identifier:self; 30, identifier:to_dict
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 6, 10; 6, 11; 7, 12; 7, 13; 8, 14; 9, 15; 15, 16; 15, 17; 16, 18; 16, 19; 17, 20; 17, 21; 17, 22; 20, 23; 20, 24; 21, 25; 21, 26; 22, 27; 22, 28; 23, 29; 23, 30
def to_json(self, indent=None, sort_keys=True): """Return a JSON string representation of this instance :param indent: specify an indent level or a string used to indent each level :param sort_keys: the output is sorted by key """ return json.dumps(self.to_dict(), indent=indent, sort_keys=sort_keys)
0, module; 1, function_definition; 2, function_name:bin_spikes; 3, parameters; 4, block; 5, identifier:spike_times; 6, identifier:binsz; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times """; 12, assignment; 13, pattern_list; 14, call; 15, comment:# around to fix rounding errors; 16, block; 17, identifier:bins; 18, identifier:bins; 19, call; 20, identifier:i; 21, identifier:stime; 22, identifier:enumerate; 23, argument_list; 24, expression_statement; 25, attribute; 26, argument_list; 27, identifier:spike_times; 28, assignment; 29, identifier:np; 30, identifier:empty; 31, tuple; 32, keyword_argument; 33, subscript; 34, call; 35, call; 36, identifier:dtype; 37, identifier:int; 38, identifier:bins; 39, identifier:i; 40, attribute; 41, argument_list; 42, identifier:len; 43, argument_list; 44, identifier:np; 45, identifier:floor; 46, call; 47, identifier:spike_times; 48, attribute; 49, argument_list; 50, identifier:np; 51, identifier:around; 52, binary_operator:stime/binsz; 53, integer:5; 54, identifier:stime; 55, identifier:binsz
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 9, 15; 9, 16; 10, 17; 12, 18; 12, 19; 13, 20; 13, 21; 14, 22; 14, 23; 16, 24; 19, 25; 19, 26; 23, 27; 24, 28; 25, 29; 25, 30; 26, 31; 26, 32; 28, 33; 28, 34; 31, 35; 32, 36; 32, 37; 33, 38; 33, 39; 34, 40; 34, 41; 35, 42; 35, 43; 40, 44; 40, 45; 41, 46; 43, 47; 46, 48; 46, 49; 48, 50; 48, 51; 49, 52; 49, 53; 52, 54; 52, 55
def bin_spikes(spike_times, binsz): """Sort spike times into bins :param spike_times: times of spike instances :type spike_times: list :param binsz: length of time bin to use :type binsz: float :returns: list of bin indicies, one for each element in spike_times """ bins = np.empty((len(spike_times),), dtype=int) for i, stime in enumerate(spike_times): # around to fix rounding errors bins[i] = np.floor(np.around(stime/binsz, 5)) return bins
0, module; 1, function_definition; 2, function_name:make_dynamic_class; 3, parameters; 4, block; 5, identifier:typename; 6, identifier:field_names; 7, expression_statement; 8, if_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, return_statement; 20, comment:"""a factory function to create type dynamically The factory function is used by :func:`objson.load` and :func:`objson.loads`. Creating the object deserialize from json string. The inspiration come from :func:`collections.namedtuple`. the difference is that I don't your the class template to define a dynamic class, instead of, I use the :func:`type` factory function. Class prototype definition :: class JsonObject(object): __identifier__ = "dolphin" def __init__(self, kv=None): if kv is None: kv = dict() self.__dict__.update(kv) def __getitem__(self, key): return self.__dict__.get(key) def __setitem__(self, key, value): self.__dict__[key] = value def __iter__(self): return iter(self.__dict__) def __repr__(self): keys = sorted(self.__dict__.keys()) text = ', '.join(["%s=%r" % (key, self[key]) for key in keys]) return '{%s}' % text name=_property('name') Basic Usage :: from objson import make_dynamic_class, dumps Entity = make_dynamic_class('Entity', 'name, sex, age') entity = Entity() entity.name, entity.sex, entity.age = 'benjamin', 'male', 21 dumps(entity) :param typename: dynamic class's name :param field_names: a string :class:`list` and a field name string which separated by comma, ``['name', 'sex']`` or ``"name,sex"`` :return: a class type """; 21, call; 22, block; 23, assignment; 24, assignment; 25, assignment; 26, assignment; 27, assignment; 28, assignment; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, call; 34, identifier:isinstance; 35, argument_list; 36, expression_statement; 37, identifier:field_names; 38, call; 39, identifier:safe_fields_names; 40, call; 41, identifier:attr; 42, call; 43, subscript; 44, identifier:typename; 45, subscript; 46, string:"dolphin"; 47, subscript; 48, identifier:_dynamic__init; 49, subscript; 50, lambda; 51, subscript; 52, identifier:_dynamic__setitem; 53, subscript; 54, lambda; 55, subscript; 56, lambda; 57, identifier:type; 58, argument_list; 59, identifier:field_names; 60, identifier:basestring; 61, assignment; 62, identifier:map; 63, argument_list; 64, identifier:map; 65, argument_list; 66, identifier:dict; 67, generator_expression; 68, identifier:attr; 69, string; 70, identifier:attr; 71, string; 72, identifier:attr; 73, string; 74, identifier:attr; 75, string; 76, lambda_parameters; 77, call; 78, identifier:attr; 79, string; 80, identifier:attr; 81, string; 82, lambda_parameters; 83, call; 84, identifier:attr; 85, string; 86, lambda_parameters; 87, binary_operator:"{%s}" % (', '.join([ "%s=%r" % (key, self[key]) for key in sorted(self.__dict__.keys()) ])); 88, identifier:typename; 89, tuple; 90, identifier:attr; 91, identifier:field_names; 92, call; 93, identifier:str; 94, identifier:field_names; 95, identifier:_encode_property_name; 96, identifier:field_names; 97, tuple; 98, for_in_clause; 99, string_content:__doc__; 100, string_content:__identifier__; 101, string_content:__init__; 102, string_content:__getitem__; 103, identifier:self; 104, identifier:key; 105, attribute; 106, argument_list; 107, string_content:__setitem__; 108, string_content:__iter__; 109, identifier:self; 110, identifier:iter; 111, argument_list; 112, string_content:__repr__; 113, identifier:self; 114, string:"{%s}"; 115, parenthesized_expression; 116, identifier:object; 117, attribute; 118, argument_list; 119, identifier:safe_name; 120, call; 121, pattern_list; 122, call; 123, attribute; 124, identifier:get; 125, identifier:key; 126, attribute; 127, call; 128, call; 129, identifier:split; 130, identifier:_property; 131, argument_list; 132, identifier:name; 133, identifier:safe_name; 134, identifier:zip; 135, argument_list; 136, identifier:self; 137, identifier:__dict__; 138, identifier:self; 139, identifier:__dict__; 140, attribute; 141, argument_list; 142, attribute; 143, argument_list; 144, identifier:name; 145, identifier:field_names; 146, identifier:safe_fields_names; 147, string; 148, identifier:join; 149, list_comprehension; 150, identifier:field_names; 151, identifier:replace; 152, string:","; 153, string:" "; 154, string_content:,; 155, binary_operator:"%s=%r" % (key, self[key]); 156, for_in_clause; 157, string:"%s=%r"; 158, tuple; 159, identifier:key; 160, call; 161, identifier:key; 162, subscript; 163, identifier:sorted; 164, argument_list; 165, identifier:self; 166, identifier:key; 167, call; 168, attribute; 169, argument_list; 170, attribute; 171, identifier:keys; 172, identifier:self; 173, identifier:__dict__
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 7, 20; 8, 21; 8, 22; 9, 23; 10, 24; 11, 25; 12, 26; 13, 27; 14, 28; 15, 29; 16, 30; 17, 31; 18, 32; 19, 33; 21, 34; 21, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 25, 41; 25, 42; 26, 43; 26, 44; 27, 45; 27, 46; 28, 47; 28, 48; 29, 49; 29, 50; 30, 51; 30, 52; 31, 53; 31, 54; 32, 55; 32, 56; 33, 57; 33, 58; 35, 59; 35, 60; 36, 61; 38, 62; 38, 63; 40, 64; 40, 65; 42, 66; 42, 67; 43, 68; 43, 69; 45, 70; 45, 71; 47, 72; 47, 73; 49, 74; 49, 75; 50, 76; 50, 77; 51, 78; 51, 79; 53, 80; 53, 81; 54, 82; 54, 83; 55, 84; 55, 85; 56, 86; 56, 87; 58, 88; 58, 89; 58, 90; 61, 91; 61, 92; 63, 93; 63, 94; 65, 95; 65, 96; 67, 97; 67, 98; 69, 99; 71, 100; 73, 101; 75, 102; 76, 103; 76, 104; 77, 105; 77, 106; 79, 107; 81, 108; 82, 109; 83, 110; 83, 111; 85, 112; 86, 113; 87, 114; 87, 115; 89, 116; 92, 117; 92, 118; 97, 119; 97, 120; 98, 121; 98, 122; 105, 123; 105, 124; 106, 125; 111, 126; 115, 127; 117, 128; 117, 129; 120, 130; 120, 131; 121, 132; 121, 133; 122, 134; 122, 135; 123, 136; 123, 137; 126, 138; 126, 139; 127, 140; 127, 141; 128, 142; 128, 143; 131, 144; 135, 145; 135, 146; 140, 147; 140, 148; 141, 149; 142, 150; 142, 151; 143, 152; 143, 153; 147, 154; 149, 155; 149, 156; 155, 157; 155, 158; 156, 159; 156, 160; 158, 161; 158, 162; 160, 163; 160, 164; 162, 165; 162, 166; 164, 167; 167, 168; 167, 169; 168, 170; 168, 171; 170, 172; 170, 173
def make_dynamic_class(typename, field_names): """a factory function to create type dynamically The factory function is used by :func:`objson.load` and :func:`objson.loads`. Creating the object deserialize from json string. The inspiration come from :func:`collections.namedtuple`. the difference is that I don't your the class template to define a dynamic class, instead of, I use the :func:`type` factory function. Class prototype definition :: class JsonObject(object): __identifier__ = "dolphin" def __init__(self, kv=None): if kv is None: kv = dict() self.__dict__.update(kv) def __getitem__(self, key): return self.__dict__.get(key) def __setitem__(self, key, value): self.__dict__[key] = value def __iter__(self): return iter(self.__dict__) def __repr__(self): keys = sorted(self.__dict__.keys()) text = ', '.join(["%s=%r" % (key, self[key]) for key in keys]) return '{%s}' % text name=_property('name') Basic Usage :: from objson import make_dynamic_class, dumps Entity = make_dynamic_class('Entity', 'name, sex, age') entity = Entity() entity.name, entity.sex, entity.age = 'benjamin', 'male', 21 dumps(entity) :param typename: dynamic class's name :param field_names: a string :class:`list` and a field name string which separated by comma, ``['name', 'sex']`` or ``"name,sex"`` :return: a class type """ if isinstance(field_names, basestring): field_names = field_names.replace(",", " ").split() field_names = map(str, field_names) safe_fields_names = map(_encode_property_name, field_names) attr = dict((safe_name, _property(name)) for name, safe_name in zip(field_names, safe_fields_names)) attr['__doc__'] = typename attr['__identifier__'] = "dolphin" attr['__init__'] = _dynamic__init attr['__getitem__'] = lambda self, key: self.__dict__.get(key) attr['__setitem__'] = _dynamic__setitem attr['__iter__'] = lambda self: iter(self.__dict__) attr['__repr__'] = lambda self: "{%s}" % (', '.join([ "%s=%r" % (key, self[key]) for key in sorted(self.__dict__.keys()) ])) return type(typename, (object,), attr)
0, module; 1, function_definition; 2, function_name:list_processed_parameter_group_histogram; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, if_statement; 14, if_statement; 15, if_statement; 16, return_statement; 17, identifier:group; 18, None; 19, identifier:start; 20, None; 21, identifier:stop; 22, None; 23, identifier:merge_time; 24, integer:20; 25, comment:""" Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup] """; 26, assignment; 27, comparison_operator:group is not None; 28, block; 29, comparison_operator:start is not None; 30, block; 31, comparison_operator:stop is not None; 32, block; 33, comparison_operator:merge_time is not None; 34, block; 35, call; 36, identifier:params; 37, dictionary; 38, identifier:group; 39, None; 40, expression_statement; 41, identifier:start; 42, None; 43, expression_statement; 44, identifier:stop; 45, None; 46, expression_statement; 47, identifier:merge_time; 48, None; 49, expression_statement; 50, attribute; 51, argument_list; 52, assignment; 53, assignment; 54, assignment; 55, assignment; 56, identifier:pagination; 57, identifier:Iterator; 58, keyword_argument; 59, keyword_argument; 60, keyword_argument; 61, keyword_argument; 62, keyword_argument; 63, keyword_argument; 64, subscript; 65, identifier:group; 66, subscript; 67, call; 68, subscript; 69, call; 70, subscript; 71, call; 72, identifier:client; 73, attribute; 74, identifier:path; 75, call; 76, identifier:params; 77, identifier:params; 78, identifier:response_class; 79, attribute; 80, identifier:items_key; 81, string; 82, identifier:item_mapper; 83, identifier:IndexGroup; 84, identifier:params; 85, string; 86, identifier:params; 87, string; 88, identifier:to_isostring; 89, argument_list; 90, identifier:params; 91, string; 92, identifier:to_isostring; 93, argument_list; 94, identifier:params; 95, string; 96, identifier:int; 97, argument_list; 98, identifier:self; 99, identifier:_client; 100, attribute; 101, argument_list; 102, identifier:archive_pb2; 103, identifier:IndexResponse; 104, string_content:group; 105, string_content:group; 106, string_content:start; 107, identifier:start; 108, string_content:stop; 109, identifier:stop; 110, string_content:mergeTime; 111, binary_operator:merge_time * 1000; 112, string; 113, identifier:format; 114, attribute; 115, identifier:merge_time; 116, integer:1000; 117, string_content:/archive/{}/parameter-index; 118, identifier:self; 119, identifier:_instance
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 6, 18; 7, 19; 7, 20; 8, 21; 8, 22; 9, 23; 9, 24; 10, 25; 11, 26; 12, 27; 12, 28; 13, 29; 13, 30; 14, 31; 14, 32; 15, 33; 15, 34; 16, 35; 26, 36; 26, 37; 27, 38; 27, 39; 28, 40; 29, 41; 29, 42; 30, 43; 31, 44; 31, 45; 32, 46; 33, 47; 33, 48; 34, 49; 35, 50; 35, 51; 40, 52; 43, 53; 46, 54; 49, 55; 50, 56; 50, 57; 51, 58; 51, 59; 51, 60; 51, 61; 51, 62; 51, 63; 52, 64; 52, 65; 53, 66; 53, 67; 54, 68; 54, 69; 55, 70; 55, 71; 58, 72; 58, 73; 59, 74; 59, 75; 60, 76; 60, 77; 61, 78; 61, 79; 62, 80; 62, 81; 63, 82; 63, 83; 64, 84; 64, 85; 66, 86; 66, 87; 67, 88; 67, 89; 68, 90; 68, 91; 69, 92; 69, 93; 70, 94; 70, 95; 71, 96; 71, 97; 73, 98; 73, 99; 75, 100; 75, 101; 79, 102; 79, 103; 81, 104; 85, 105; 87, 106; 89, 107; 91, 108; 93, 109; 95, 110; 97, 111; 100, 112; 100, 113; 101, 114; 111, 115; 111, 116; 112, 117; 114, 118; 114, 119
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20): """ Reads index records related to processed parameter groups between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :param float merge_time: Maximum gap in seconds before two consecutive index records are merged together. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if group is not None: params['group'] = group if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if merge_time is not None: params['mergeTime'] = int(merge_time * 1000) return pagination.Iterator( client=self._client, path='/archive/{}/parameter-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
0, module; 1, function_definition; 2, function_name:list_completeness_index; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, if_statement; 11, if_statement; 12, return_statement; 13, identifier:start; 14, None; 15, identifier:stop; 16, None; 17, comment:""" Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup] """; 18, assignment; 19, comparison_operator:start is not None; 20, block; 21, comparison_operator:stop is not None; 22, block; 23, call; 24, identifier:params; 25, dictionary; 26, identifier:start; 27, None; 28, expression_statement; 29, identifier:stop; 30, None; 31, expression_statement; 32, attribute; 33, argument_list; 34, assignment; 35, assignment; 36, identifier:pagination; 37, identifier:Iterator; 38, keyword_argument; 39, keyword_argument; 40, keyword_argument; 41, keyword_argument; 42, keyword_argument; 43, keyword_argument; 44, subscript; 45, call; 46, subscript; 47, call; 48, identifier:client; 49, attribute; 50, identifier:path; 51, call; 52, identifier:params; 53, identifier:params; 54, identifier:response_class; 55, attribute; 56, identifier:items_key; 57, string; 58, identifier:item_mapper; 59, identifier:IndexGroup; 60, identifier:params; 61, string; 62, identifier:to_isostring; 63, argument_list; 64, identifier:params; 65, string; 66, identifier:to_isostring; 67, argument_list; 68, identifier:self; 69, identifier:_client; 70, attribute; 71, argument_list; 72, identifier:archive_pb2; 73, identifier:IndexResponse; 74, string_content:group; 75, string_content:start; 76, identifier:start; 77, string_content:stop; 78, identifier:stop; 79, string; 80, identifier:format; 81, attribute; 82, string_content:/archive/{}/completeness-index; 83, identifier:self; 84, identifier:_instance
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 6, 14; 7, 15; 7, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 18, 24; 18, 25; 19, 26; 19, 27; 20, 28; 21, 29; 21, 30; 22, 31; 23, 32; 23, 33; 28, 34; 31, 35; 32, 36; 32, 37; 33, 38; 33, 39; 33, 40; 33, 41; 33, 42; 33, 43; 34, 44; 34, 45; 35, 46; 35, 47; 38, 48; 38, 49; 39, 50; 39, 51; 40, 52; 40, 53; 41, 54; 41, 55; 42, 56; 42, 57; 43, 58; 43, 59; 44, 60; 44, 61; 45, 62; 45, 63; 46, 64; 46, 65; 47, 66; 47, 67; 49, 68; 49, 69; 51, 70; 51, 71; 55, 72; 55, 73; 57, 74; 61, 75; 63, 76; 65, 77; 67, 78; 70, 79; 70, 80; 71, 81; 79, 82; 81, 83; 81, 84
def list_completeness_index(self, start=None, stop=None): """ Reads completeness index records between the specified start and stop time. Each iteration returns a chunk of chronologically-sorted records. :rtype: ~collections.Iterable[.IndexGroup] """ params = {} if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/completeness-index'.format(self._instance), params=params, response_class=archive_pb2.IndexResponse, items_key='group', item_mapper=IndexGroup, )
0, module; 1, function_definition; 2, function_name:list_packets; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, if_statement; 14, if_statement; 15, if_statement; 16, if_statement; 17, return_statement; 18, identifier:name; 19, None; 20, identifier:start; 21, None; 22, identifier:stop; 23, None; 24, identifier:page_size; 25, integer:500; 26, identifier:descending; 27, False; 28, comment:""" Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """; 29, assignment; 30, comparison_operator:name is not None; 31, block; 32, comparison_operator:page_size is not None; 33, block; 34, comparison_operator:start is not None; 35, block; 36, comparison_operator:stop is not None; 37, block; 38, call; 39, identifier:params; 40, dictionary; 41, identifier:name; 42, None; 43, expression_statement; 44, identifier:page_size; 45, None; 46, expression_statement; 47, identifier:start; 48, None; 49, expression_statement; 50, identifier:stop; 51, None; 52, expression_statement; 53, attribute; 54, argument_list; 55, pair; 56, assignment; 57, assignment; 58, assignment; 59, assignment; 60, identifier:pagination; 61, identifier:Iterator; 62, keyword_argument; 63, keyword_argument; 64, keyword_argument; 65, keyword_argument; 66, keyword_argument; 67, keyword_argument; 68, string; 69, conditional_expression:'desc' if descending else 'asc'; 70, subscript; 71, identifier:name; 72, subscript; 73, identifier:page_size; 74, subscript; 75, call; 76, subscript; 77, call; 78, identifier:client; 79, attribute; 80, identifier:path; 81, call; 82, identifier:params; 83, identifier:params; 84, identifier:response_class; 85, attribute; 86, identifier:items_key; 87, string; 88, identifier:item_mapper; 89, identifier:Packet; 90, string_content:order; 91, string; 92, identifier:descending; 93, string; 94, identifier:params; 95, string; 96, identifier:params; 97, string; 98, identifier:params; 99, string; 100, identifier:to_isostring; 101, argument_list; 102, identifier:params; 103, string; 104, identifier:to_isostring; 105, argument_list; 106, identifier:self; 107, identifier:_client; 108, attribute; 109, argument_list; 110, identifier:rest_pb2; 111, identifier:ListPacketsResponse; 112, string_content:packet; 113, string_content:desc; 114, string_content:asc; 115, string_content:name; 116, string_content:limit; 117, string_content:start; 118, identifier:start; 119, string_content:stop; 120, identifier:stop; 121, string; 122, identifier:format; 123, attribute; 124, string_content:/archive/{}/packets; 125, identifier:self; 126, identifier:_instance
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 6, 18; 6, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 12, 29; 13, 30; 13, 31; 14, 32; 14, 33; 15, 34; 15, 35; 16, 36; 16, 37; 17, 38; 29, 39; 29, 40; 30, 41; 30, 42; 31, 43; 32, 44; 32, 45; 33, 46; 34, 47; 34, 48; 35, 49; 36, 50; 36, 51; 37, 52; 38, 53; 38, 54; 40, 55; 43, 56; 46, 57; 49, 58; 52, 59; 53, 60; 53, 61; 54, 62; 54, 63; 54, 64; 54, 65; 54, 66; 54, 67; 55, 68; 55, 69; 56, 70; 56, 71; 57, 72; 57, 73; 58, 74; 58, 75; 59, 76; 59, 77; 62, 78; 62, 79; 63, 80; 63, 81; 64, 82; 64, 83; 65, 84; 65, 85; 66, 86; 66, 87; 67, 88; 67, 89; 68, 90; 69, 91; 69, 92; 69, 93; 70, 94; 70, 95; 72, 96; 72, 97; 74, 98; 74, 99; 75, 100; 75, 101; 76, 102; 76, 103; 77, 104; 77, 105; 79, 106; 79, 107; 81, 108; 81, 109; 85, 110; 85, 111; 87, 112; 91, 113; 93, 114; 95, 115; 97, 116; 99, 117; 101, 118; 103, 119; 105, 120; 108, 121; 108, 122; 109, 123; 121, 124; 123, 125; 123, 126
def list_packets(self, name=None, start=None, stop=None, page_size=500, descending=False): """ Reads packet information between the specified start and stop time. Packets are sorted by generation time and sequence number. :param ~datetime.datetime start: Minimum generation time of the returned packets (inclusive) :param ~datetime.datetime stop: Maximum genreation time of the returned packets (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` packets are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Packet] """ params = { 'order': 'desc' if descending else 'asc', } if name is not None: params['name'] = name if page_size is not None: params['limit'] = page_size if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) return pagination.Iterator( client=self._client, path='/archive/{}/packets'.format(self._instance), params=params, response_class=rest_pb2.ListPacketsResponse, items_key='packet', item_mapper=Packet, )
0, module; 1, function_definition; 2, function_name:list_events; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, expression_statement; 14, expression_statement; 15, if_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, return_statement; 22, identifier:source; 23, None; 24, identifier:severity; 25, None; 26, identifier:text_filter; 27, None; 28, identifier:start; 29, None; 30, identifier:stop; 31, None; 32, identifier:page_size; 33, integer:500; 34, identifier:descending; 35, False; 36, comment:""" Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Event] """; 37, assignment; 38, comparison_operator:source is not None; 39, block; 40, comparison_operator:page_size is not None; 41, block; 42, comparison_operator:severity is not None; 43, block; 44, comparison_operator:start is not None; 45, block; 46, comparison_operator:stop is not None; 47, block; 48, comparison_operator:text_filter is not None; 49, block; 50, call; 51, identifier:params; 52, dictionary; 53, identifier:source; 54, None; 55, expression_statement; 56, identifier:page_size; 57, None; 58, expression_statement; 59, identifier:severity; 60, None; 61, expression_statement; 62, identifier:start; 63, None; 64, expression_statement; 65, identifier:stop; 66, None; 67, expression_statement; 68, identifier:text_filter; 69, None; 70, expression_statement; 71, attribute; 72, argument_list; 73, pair; 74, assignment; 75, assignment; 76, assignment; 77, assignment; 78, assignment; 79, assignment; 80, identifier:pagination; 81, identifier:Iterator; 82, keyword_argument; 83, keyword_argument; 84, keyword_argument; 85, keyword_argument; 86, keyword_argument; 87, keyword_argument; 88, string; 89, conditional_expression:'desc' if descending else 'asc'; 90, subscript; 91, identifier:source; 92, subscript; 93, identifier:page_size; 94, subscript; 95, identifier:severity; 96, subscript; 97, call; 98, subscript; 99, call; 100, subscript; 101, identifier:text_filter; 102, identifier:client; 103, attribute; 104, identifier:path; 105, call; 106, identifier:params; 107, identifier:params; 108, identifier:response_class; 109, attribute; 110, identifier:items_key; 111, string; 112, identifier:item_mapper; 113, identifier:Event; 114, string_content:order; 115, string; 116, identifier:descending; 117, string; 118, identifier:params; 119, string; 120, identifier:params; 121, string; 122, identifier:params; 123, string; 124, identifier:params; 125, string; 126, identifier:to_isostring; 127, argument_list; 128, identifier:params; 129, string; 130, identifier:to_isostring; 131, argument_list; 132, identifier:params; 133, string; 134, identifier:self; 135, identifier:_client; 136, attribute; 137, argument_list; 138, identifier:rest_pb2; 139, identifier:ListEventsResponse; 140, string_content:event; 141, string_content:desc; 142, string_content:asc; 143, string_content:source; 144, string_content:limit; 145, string_content:severity; 146, string_content:start; 147, identifier:start; 148, string_content:stop; 149, identifier:stop; 150, string_content:q; 151, string; 152, identifier:format; 153, attribute; 154, string_content:/archive/{}/events; 155, identifier:self; 156, identifier:_instance
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 6, 22; 6, 23; 7, 24; 7, 25; 8, 26; 8, 27; 9, 28; 9, 29; 10, 30; 10, 31; 11, 32; 11, 33; 12, 34; 12, 35; 13, 36; 14, 37; 15, 38; 15, 39; 16, 40; 16, 41; 17, 42; 17, 43; 18, 44; 18, 45; 19, 46; 19, 47; 20, 48; 20, 49; 21, 50; 37, 51; 37, 52; 38, 53; 38, 54; 39, 55; 40, 56; 40, 57; 41, 58; 42, 59; 42, 60; 43, 61; 44, 62; 44, 63; 45, 64; 46, 65; 46, 66; 47, 67; 48, 68; 48, 69; 49, 70; 50, 71; 50, 72; 52, 73; 55, 74; 58, 75; 61, 76; 64, 77; 67, 78; 70, 79; 71, 80; 71, 81; 72, 82; 72, 83; 72, 84; 72, 85; 72, 86; 72, 87; 73, 88; 73, 89; 74, 90; 74, 91; 75, 92; 75, 93; 76, 94; 76, 95; 77, 96; 77, 97; 78, 98; 78, 99; 79, 100; 79, 101; 82, 102; 82, 103; 83, 104; 83, 105; 84, 106; 84, 107; 85, 108; 85, 109; 86, 110; 86, 111; 87, 112; 87, 113; 88, 114; 89, 115; 89, 116; 89, 117; 90, 118; 90, 119; 92, 120; 92, 121; 94, 122; 94, 123; 96, 124; 96, 125; 97, 126; 97, 127; 98, 128; 98, 129; 99, 130; 99, 131; 100, 132; 100, 133; 103, 134; 103, 135; 105, 136; 105, 137; 109, 138; 109, 139; 111, 140; 115, 141; 117, 142; 119, 143; 121, 144; 123, 145; 125, 146; 127, 147; 129, 148; 131, 149; 133, 150; 136, 151; 136, 152; 137, 153; 151, 154; 153, 155; 153, 156
def list_events(self, source=None, severity=None, text_filter=None, start=None, stop=None, page_size=500, descending=False): """ Reads events between the specified start and stop time. Events are sorted by generation time, source, then sequence number. :param str source: The source of the returned events. :param str severity: The minimum severity level of the returned events. One of ``INFO``, ``WATCH``, ``WARNING``, ``DISTRESS``, ``CRITICAL`` or ``SEVERE``. :param str text_filter: Filter the text message of the returned events :param ~datetime.datetime start: Minimum start date of the returned events (inclusive) :param ~datetime.datetime stop: Maximum start date of the returned events (exclusive) :param int page_size: Page size of underlying requests. Higher values imply less overhead, but risk hitting the maximum message size limit. :param bool descending: If set to ``True`` events are fetched in reverse order (most recent first). :rtype: ~collections.Iterable[.Event] """ params = { 'order': 'desc' if descending else 'asc', } if source is not None: params['source'] = source if page_size is not None: params['limit'] = page_size if severity is not None: params['severity'] = severity if start is not None: params['start'] = to_isostring(start) if stop is not None: params['stop'] = to_isostring(stop) if text_filter is not None: params['q'] = text_filter return pagination.Iterator( client=self._client, path='/archive/{}/events'.format(self._instance), params=params, response_class=rest_pb2.ListEventsResponse, items_key='event', item_mapper=Event, )
0, module; 1, function_definition; 2, function_name:copy_file; 3, parameters; 4, block; 5, identifier:source; 6, identifier:destination; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, identifier:unique; 14, False; 15, identifier:sort; 16, False; 17, identifier:case_sensitive; 18, True; 19, identifier:create_path; 20, False; 21, comment:""" Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None """; 22, call; 23, attribute; 24, argument_list; 25, identifier:_File; 26, identifier:copy; 27, identifier:source; 28, identifier:destination; 29, identifier:unique; 30, identifier:sort; 31, identifier:case_sensitive; 32, identifier:create_path
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 9, 18; 10, 19; 10, 20; 11, 21; 12, 22; 22, 23; 22, 24; 23, 25; 23, 26; 24, 27; 24, 28; 24, 29; 24, 30; 24, 31; 24, 32
def copy_file(source, destination, unique=False, sort=False, case_sensitive=True, create_path=False): """ Python utility to create file Args: source: absolute/relative path of source file destination: absolute/relative path of destination file. Use same as source for replacing the content of existing file. unique: Copy only unique lines from file sort: Sort the content of file case_sensitive: unique/sort operations to be performed case-sensitive string create_path: Recursively create the path to destination directory in case not found Returns: None """ _File.copy(source, destination, unique, sort, case_sensitive, create_path)
0, module; 1, function_definition; 2, function_name:set; 3, parameters; 4, block; 5, identifier:self; 6, dictionary_splat_pattern; 7, expression_statement; 8, expression_statement; 9, if_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, expression_statement; 14, if_statement; 15, if_statement; 16, if_statement; 17, if_statement; 18, if_statement; 19, if_statement; 20, if_statement; 21, if_statement; 22, if_statement; 23, if_statement; 24, if_statement; 25, if_statement; 26, if_statement; 27, if_statement; 28, identifier:kwargs; 29, comment:"""Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float """; 30, call; 31, comparison_operator:'acqtime' in kwargs; 32, block; 33, comparison_operator:'aifs' in kwargs; 34, block; 35, boolean_operator; 36, block; 37, comparison_operator:'trigger' in kwargs; 38, block; 39, call; 40, comparison_operator:'aochan' in kwargs; 41, block; 42, comparison_operator:'aichan' in kwargs; 43, block; 44, comparison_operator:'binsz' in kwargs; 45, block; 46, comparison_operator:'save' in kwargs; 47, block; 48, comparison_operator:'caldb' in kwargs; 49, block; 50, comparison_operator:'calv' in kwargs; 51, block; 52, comparison_operator:'calf' in kwargs; 53, block; 54, boolean_operator; 55, block; 56, comparison_operator:'datafile' in kwargs; 57, block; 58, comparison_operator:'reprate' in kwargs; 59, block; 60, comparison_operator:'save' in kwargs; 61, block; 62, comparison_operator:'average' in kwargs; 63, block; 64, comparison_operator:'reject' in kwargs; 65, block; 66, comparison_operator:'rejectrate' in kwargs; 67, block; 68, attribute; 69, argument_list; 70, string; 71, identifier:kwargs; 72, expression_statement; 73, string; 74, identifier:kwargs; 75, expression_statement; 76, expression_statement; 77, comparison_operator:'aifs' in kwargs; 78, comparison_operator:'acqtime' in kwargs; 79, expression_statement; 80, expression_statement; 81, expression_statement; 82, string; 83, identifier:kwargs; 84, expression_statement; 85, attribute; 86, argument_list; 87, string; 88, identifier:kwargs; 89, expression_statement; 90, string; 91, identifier:kwargs; 92, expression_statement; 93, string; 94, identifier:kwargs; 95, expression_statement; 96, string; 97, identifier:kwargs; 98, expression_statement; 99, string; 100, identifier:kwargs; 101, expression_statement; 102, string; 103, identifier:kwargs; 104, expression_statement; 105, string; 106, identifier:kwargs; 107, expression_statement; 108, comparison_operator:'caldb' in kwargs; 109, comparison_operator:'calv' in kwargs; 110, expression_statement; 111, string; 112, identifier:kwargs; 113, expression_statement; 114, string; 115, identifier:kwargs; 116, expression_statement; 117, string; 118, identifier:kwargs; 119, expression_statement; 120, string; 121, identifier:kwargs; 122, expression_statement; 123, string; 124, identifier:kwargs; 125, expression_statement; 126, string; 127, identifier:kwargs; 128, expression_statement; 129, attribute; 130, identifier:acquire; 131, string_content:acqtime; 132, call; 133, string_content:aifs; 134, call; 135, assignment; 136, string; 137, identifier:kwargs; 138, string; 139, identifier:kwargs; 140, assignment; 141, assignment; 142, assignment; 143, string_content:trigger; 144, call; 145, attribute; 146, identifier:release; 147, string_content:aochan; 148, assignment; 149, string_content:aichan; 150, assignment; 151, string_content:binsz; 152, assignment; 153, string_content:save; 154, assignment; 155, string_content:caldb; 156, assignment; 157, string_content:calv; 158, assignment; 159, string_content:calf; 160, assignment; 161, string; 162, identifier:kwargs; 163, string; 164, identifier:kwargs; 165, call; 166, string_content:datafile; 167, assignment; 168, string_content:reprate; 169, assignment; 170, string_content:save; 171, assignment; 172, string_content:average; 173, assignment; 174, string_content:reject; 175, assignment; 176, string_content:rejectrate; 177, assignment; 178, identifier:self; 179, identifier:player_lock; 180, attribute; 181, argument_list; 182, attribute; 183, argument_list; 184, attribute; 185, subscript; 186, string_content:aifs; 187, string_content:acqtime; 188, identifier:t; 189, call; 190, identifier:npoints; 191, binary_operator:t*float(kwargs.get('aifs', self.player.get_aifs())); 192, attribute; 193, call; 194, attribute; 195, argument_list; 196, identifier:self; 197, identifier:player_lock; 198, attribute; 199, subscript; 200, attribute; 201, subscript; 202, attribute; 203, subscript; 204, attribute; 205, subscript; 206, attribute; 207, subscript; 208, attribute; 209, subscript; 210, attribute; 211, subscript; 212, string_content:caldb; 213, string_content:calv; 214, attribute; 215, argument_list; 216, attribute; 217, subscript; 218, attribute; 219, subscript; 220, attribute; 221, subscript; 222, attribute; 223, subscript; 224, attribute; 225, subscript; 226, attribute; 227, subscript; 228, attribute; 229, identifier:set_aidur; 230, subscript; 231, attribute; 232, identifier:set_aifs; 233, subscript; 234, identifier:self; 235, identifier:aifs; 236, identifier:kwargs; 237, string; 238, attribute; 239, argument_list; 240, identifier:t; 241, call; 242, identifier:self; 243, identifier:aitimes; 244, attribute; 245, argument_list; 246, attribute; 247, identifier:set_trigger; 248, subscript; 249, identifier:self; 250, identifier:aochan; 251, identifier:kwargs; 252, string; 253, identifier:self; 254, identifier:aichan; 255, identifier:kwargs; 256, string; 257, identifier:self; 258, identifier:binsz; 259, identifier:kwargs; 260, string; 261, identifier:self; 262, identifier:save_data; 263, identifier:kwargs; 264, string; 265, identifier:self; 266, identifier:caldb; 267, identifier:kwargs; 268, string; 269, identifier:self; 270, identifier:calv; 271, identifier:kwargs; 272, string; 273, identifier:self; 274, identifier:calf; 275, identifier:kwargs; 276, string; 277, identifier:self; 278, identifier:update_reference_voltage; 279, identifier:self; 280, identifier:datafile; 281, identifier:kwargs; 282, string; 283, identifier:self; 284, identifier:reprate; 285, identifier:kwargs; 286, string; 287, identifier:self; 288, identifier:save_data; 289, identifier:kwargs; 290, string; 291, identifier:self; 292, identifier:average; 293, identifier:kwargs; 294, string; 295, identifier:self; 296, identifier:reject; 297, identifier:kwargs; 298, string; 299, identifier:self; 300, identifier:rejectrate; 301, identifier:kwargs; 302, string; 303, identifier:self; 304, identifier:player; 305, identifier:kwargs; 306, string; 307, identifier:self; 308, identifier:player; 309, identifier:kwargs; 310, string; 311, string_content:aifs; 312, identifier:kwargs; 313, identifier:get; 314, string; 315, call; 316, identifier:float; 317, argument_list; 318, identifier:np; 319, identifier:linspace; 320, integer:0; 321, identifier:t; 322, identifier:npoints; 323, identifier:self; 324, identifier:player; 325, identifier:kwargs; 326, string; 327, string_content:aochan; 328, string_content:aichan; 329, string_content:binsz; 330, string_content:save; 331, string_content:caldb; 332, string_content:calv; 333, string_content:calf; 334, string_content:datafile; 335, string_content:reprate; 336, string_content:save; 337, string_content:average; 338, string_content:reject; 339, string_content:rejectrate; 340, string_content:acqtime; 341, string_content:aifs; 342, string_content:acqtime; 343, attribute; 344, argument_list; 345, call; 346, string_content:trigger; 347, attribute; 348, identifier:get_aidur; 349, attribute; 350, argument_list; 351, identifier:self; 352, identifier:player; 353, identifier:kwargs; 354, identifier:get; 355, string; 356, call; 357, string_content:aifs; 358, attribute; 359, argument_list; 360, attribute; 361, identifier:get_aifs; 362, identifier:self; 363, identifier:player
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 6, 28; 7, 29; 8, 30; 9, 31; 9, 32; 10, 33; 10, 34; 11, 35; 11, 36; 12, 37; 12, 38; 13, 39; 14, 40; 14, 41; 15, 42; 15, 43; 16, 44; 16, 45; 17, 46; 17, 47; 18, 48; 18, 49; 19, 50; 19, 51; 20, 52; 20, 53; 21, 54; 21, 55; 22, 56; 22, 57; 23, 58; 23, 59; 24, 60; 24, 61; 25, 62; 25, 63; 26, 64; 26, 65; 27, 66; 27, 67; 30, 68; 30, 69; 31, 70; 31, 71; 32, 72; 33, 73; 33, 74; 34, 75; 34, 76; 35, 77; 35, 78; 36, 79; 36, 80; 36, 81; 37, 82; 37, 83; 38, 84; 39, 85; 39, 86; 40, 87; 40, 88; 41, 89; 42, 90; 42, 91; 43, 92; 44, 93; 44, 94; 45, 95; 46, 96; 46, 97; 47, 98; 48, 99; 48, 100; 49, 101; 50, 102; 50, 103; 51, 104; 52, 105; 52, 106; 53, 107; 54, 108; 54, 109; 55, 110; 56, 111; 56, 112; 57, 113; 58, 114; 58, 115; 59, 116; 60, 117; 60, 118; 61, 119; 62, 120; 62, 121; 63, 122; 64, 123; 64, 124; 65, 125; 66, 126; 66, 127; 67, 128; 68, 129; 68, 130; 70, 131; 72, 132; 73, 133; 75, 134; 76, 135; 77, 136; 77, 137; 78, 138; 78, 139; 79, 140; 80, 141; 81, 142; 82, 143; 84, 144; 85, 145; 85, 146; 87, 147; 89, 148; 90, 149; 92, 150; 93, 151; 95, 152; 96, 153; 98, 154; 99, 155; 101, 156; 102, 157; 104, 158; 105, 159; 107, 160; 108, 161; 108, 162; 109, 163; 109, 164; 110, 165; 111, 166; 113, 167; 114, 168; 116, 169; 117, 170; 119, 171; 120, 172; 122, 173; 123, 174; 125, 175; 126, 176; 128, 177; 129, 178; 129, 179; 132, 180; 132, 181; 134, 182; 134, 183; 135, 184; 135, 185; 136, 186; 138, 187; 140, 188; 140, 189; 141, 190; 141, 191; 142, 192; 142, 193; 144, 194; 144, 195; 145, 196; 145, 197; 148, 198; 148, 199; 150, 200; 150, 201; 152, 202; 152, 203; 154, 204; 154, 205; 156, 206; 156, 207; 158, 208; 158, 209; 160, 210; 160, 211; 161, 212; 163, 213; 165, 214; 165, 215; 167, 216; 167, 217; 169, 218; 169, 219; 171, 220; 171, 221; 173, 222; 173, 223; 175, 224; 175, 225; 177, 226; 177, 227; 180, 228; 180, 229; 181, 230; 182, 231; 182, 232; 183, 233; 184, 234; 184, 235; 185, 236; 185, 237; 189, 238; 189, 239; 191, 240; 191, 241; 192, 242; 192, 243; 193, 244; 193, 245; 194, 246; 194, 247; 195, 248; 198, 249; 198, 250; 199, 251; 199, 252; 200, 253; 200, 254; 201, 255; 201, 256; 202, 257; 202, 258; 203, 259; 203, 260; 204, 261; 204, 262; 205, 263; 205, 264; 206, 265; 206, 266; 207, 267; 207, 268; 208, 269; 208, 270; 209, 271; 209, 272; 210, 273; 210, 274; 211, 275; 211, 276; 214, 277; 214, 278; 216, 279; 216, 280; 217, 281; 217, 282; 218, 283; 218, 284; 219, 285; 219, 286; 220, 287; 220, 288; 221, 289; 221, 290; 222, 291; 222, 292; 223, 293; 223, 294; 224, 295; 224, 296; 225, 297; 225, 298; 226, 299; 226, 300; 227, 301; 227, 302; 228, 303; 228, 304; 230, 305; 230, 306; 231, 307; 231, 308; 233, 309; 233, 310; 237, 311; 238, 312; 238, 313; 239, 314; 239, 315; 241, 316; 241, 317; 244, 318; 244, 319; 245, 320; 245, 321; 245, 322; 246, 323; 246, 324; 248, 325; 248, 326; 252, 327; 256, 328; 260, 329; 264, 330; 268, 331; 272, 332; 276, 333; 282, 334; 286, 335; 290, 336; 294, 337; 298, 338; 302, 339; 306, 340; 310, 341; 314, 342; 315, 343; 315, 344; 317, 345; 326, 346; 343, 347; 343, 348; 345, 349; 345, 350; 347, 351; 347, 352; 349, 353; 349, 354; 350, 355; 350, 356; 355, 357; 356, 358; 356, 359; 358, 360; 358, 361; 360, 362; 360, 363
def set(self, **kwargs): """Sets an internal setting for acquistion, using keywords. Available parameters to set: :param acqtime: duration of recording (input) window (seconds) :type acqtime: float :param aifs: sample rate of the recording (input) operation (Hz) :type aifs: int :param aochan: AO (generation) channel name :type aochan: str :param aichan: AI (recording) channel name :type aichan: str :param nreps: number of repetitions for each unique stimulus :type nreps: int :param binsz: time bin duration for spike sorting (seconds) :type binsz: float :param caldb: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type caldb: float :param calv: See :meth:`StimulusModel<sparkle.stim.stimulus_model.StimulusModel.setReferenceVoltage>` :type calv: float :param datafile: a reference to an open file to save data to :type datafile: :class:`AcquisitionData<sparkle.data.dataobjects.AcquisitionData>` :param average: whether to average repetitions of a trace, saving only the averaged signal :type average: bool :param reject: whether to reject values higher than a defined threshold. Only used while average is true :type reject: bool :param rejectrate: the value to base artifact rejection on :type rejectrate: float """ self.player_lock.acquire() if 'acqtime' in kwargs: self.player.set_aidur(kwargs['acqtime']) if 'aifs' in kwargs: self.player.set_aifs(kwargs['aifs']) self.aifs = kwargs['aifs'] if 'aifs' in kwargs or 'acqtime' in kwargs: t = kwargs.get('acqtime', self.player.get_aidur()) npoints = t*float(kwargs.get('aifs', self.player.get_aifs())) self.aitimes = np.linspace(0, t, npoints) if 'trigger' in kwargs: self.player.set_trigger(kwargs['trigger']) self.player_lock.release() if 'aochan' in kwargs: self.aochan = kwargs['aochan'] if 'aichan' in kwargs: self.aichan = kwargs['aichan'] if 'binsz' in kwargs: self.binsz = kwargs['binsz'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'caldb' in kwargs: self.caldb = kwargs['caldb'] if 'calv' in kwargs: self.calv = kwargs['calv'] if 'calf' in kwargs: self.calf = kwargs['calf'] if 'caldb' in kwargs or 'calv' in kwargs: self.update_reference_voltage() if 'datafile' in kwargs: self.datafile = kwargs['datafile'] if 'reprate' in kwargs: self.reprate = kwargs['reprate'] if 'save' in kwargs: self.save_data = kwargs['save'] if 'average' in kwargs: self.average = kwargs['average'] if 'reject' in kwargs: self.reject = kwargs['reject'] if 'rejectrate' in kwargs: self.rejectrate = kwargs['rejectrate']
0, module; 1, function_definition; 2, function_name:readlist; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, comment:# Create and start threads; 9, for_statement; 10, for_statement; 11, expression_statement; 12, comment:# Create; 13, expression_statement; 14, comment:"""Sort the reads, and create lists to be used in creating sorted .fastq files"""; 15, call; 16, identifier:i; 17, call; 18, comment:# Send the threads to the appropriate destination function; 19, block; 20, identifier:sample; 21, attribute; 22, block; 23, call; 24, call; 25, identifier:printtime; 26, argument_list; 27, identifier:range; 28, argument_list; 29, expression_statement; 30, comment:# Set the daemon to true - something to do with thread management; 31, expression_statement; 32, comment:# Start the threading; 33, expression_statement; 34, attribute; 35, identifier:samples; 36, expression_statement; 37, attribute; 38, argument_list; 39, attribute; 40, argument_list; 41, string; 42, attribute; 43, attribute; 44, assignment; 45, call; 46, call; 47, identifier:self; 48, identifier:runmetadata; 49, call; 50, attribute; 51, identifier:join; 52, identifier:self; 53, identifier:fastqfilter; 54, string_content:Sorting reads; 55, identifier:self; 56, identifier:start; 57, identifier:self; 58, identifier:cpus; 59, identifier:threads; 60, call; 61, attribute; 62, argument_list; 63, attribute; 64, argument_list; 65, attribute; 66, argument_list; 67, identifier:self; 68, identifier:listqueue; 69, identifier:Thread; 70, argument_list; 71, identifier:threads; 72, identifier:setDaemon; 73, True; 74, identifier:threads; 75, identifier:start; 76, attribute; 77, identifier:put; 78, identifier:sample; 79, keyword_argument; 80, keyword_argument; 81, identifier:self; 82, identifier:listqueue; 83, identifier:target; 84, attribute; 85, identifier:args; 86, tuple; 87, identifier:self; 88, identifier:listread
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 6, 14; 7, 15; 9, 16; 9, 17; 9, 18; 9, 19; 10, 20; 10, 21; 10, 22; 11, 23; 13, 24; 15, 25; 15, 26; 17, 27; 17, 28; 19, 29; 19, 30; 19, 31; 19, 32; 19, 33; 21, 34; 21, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 26, 41; 26, 42; 28, 43; 29, 44; 31, 45; 33, 46; 34, 47; 34, 48; 36, 49; 37, 50; 37, 51; 39, 52; 39, 53; 41, 54; 42, 55; 42, 56; 43, 57; 43, 58; 44, 59; 44, 60; 45, 61; 45, 62; 46, 63; 46, 64; 49, 65; 49, 66; 50, 67; 50, 68; 60, 69; 60, 70; 61, 71; 61, 72; 62, 73; 63, 74; 63, 75; 65, 76; 65, 77; 66, 78; 70, 79; 70, 80; 76, 81; 76, 82; 79, 83; 79, 84; 80, 85; 80, 86; 84, 87; 84, 88
def readlist(self): """Sort the reads, and create lists to be used in creating sorted .fastq files""" printtime('Sorting reads', self.start) # Create and start threads for i in range(self.cpus): # Send the threads to the appropriate destination function threads = Thread(target=self.listread, args=()) # Set the daemon to true - something to do with thread management threads.setDaemon(True) # Start the threading threads.start() for sample in self.runmetadata.samples: self.listqueue.put(sample) self.listqueue.join() # Create self.fastqfilter()
0, module; 1, function_definition; 2, function_name:build_markdown_table; 3, parameters; 4, block; 5, identifier:headers; 6, identifier:rows; 7, default_parameter; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, for_statement; 13, return_statement; 14, identifier:row_keys; 15, None; 16, comment:"""Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string. """; 17, assignment; 18, assignment; 19, assignment; 20, identifier:row; 21, identifier:rows; 22, block; 23, binary_operator:'\n'.join(table) + '\n'; 24, identifier:row_maxes; 25, call; 26, identifier:row_keys; 27, boolean_operator; 28, identifier:table; 29, list; 30, expression_statement; 31, call; 32, string; 33, identifier:_find_row_maxes; 34, argument_list; 35, identifier:row_keys; 36, list_comprehension; 37, call; 38, call; 39, call; 40, attribute; 41, argument_list; 42, string_content; 43, identifier:headers; 44, identifier:rows; 45, identifier:key; 46, for_in_clause; 47, identifier:_build_row; 48, argument_list; 49, identifier:_build_separator; 50, argument_list; 51, attribute; 52, argument_list; 53, string; 54, identifier:join; 55, identifier:table; 56, escape_sequence:\n; 57, pattern_list; 58, call; 59, identifier:headers; 60, identifier:row_maxes; 61, identifier:row_keys; 62, identifier:row_maxes; 63, identifier:row_keys; 64, identifier:table; 65, identifier:append; 66, call; 67, string_content; 68, identifier:key; 69, identifier:value; 70, attribute; 71, argument_list; 72, identifier:_build_row; 73, argument_list; 74, escape_sequence:\n; 75, identifier:headers; 76, identifier:items; 77, identifier:row; 78, identifier:row_maxes; 79, identifier:row_keys
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 9, 17; 10, 18; 11, 19; 12, 20; 12, 21; 12, 22; 13, 23; 17, 24; 17, 25; 18, 26; 18, 27; 19, 28; 19, 29; 22, 30; 23, 31; 23, 32; 25, 33; 25, 34; 27, 35; 27, 36; 29, 37; 29, 38; 30, 39; 31, 40; 31, 41; 32, 42; 34, 43; 34, 44; 36, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 39, 51; 39, 52; 40, 53; 40, 54; 41, 55; 42, 56; 46, 57; 46, 58; 48, 59; 48, 60; 48, 61; 50, 62; 50, 63; 51, 64; 51, 65; 52, 66; 53, 67; 57, 68; 57, 69; 58, 70; 58, 71; 66, 72; 66, 73; 67, 74; 70, 75; 70, 76; 73, 77; 73, 78; 73, 79
def build_markdown_table(headers, rows, row_keys=None): """Build a lined up markdown table. Args: headers (dict): A key -> value pairing fo the headers. rows (list): List of dictionaries that contain all the keys listed in the headers. row_keys (list): A sorted list of keys to display Returns: A valid Markdown Table as a string. """ row_maxes = _find_row_maxes(headers, rows) row_keys = row_keys or [key for key, value in headers.items()] table = [ _build_row(headers, row_maxes, row_keys), _build_separator(row_maxes, row_keys) ] for row in rows: table.append(_build_row(row, row_maxes, row_keys)) return '\n'.join(table) + '\n'
0, module; 1, function_definition; 2, function_name:sort_item; 3, parameters; 4, block; 5, identifier:iterable; 6, identifier:number; 7, default_parameter; 8, expression_statement; 9, return_statement; 10, identifier:reverse; 11, False; 12, comment:"""Sort the itertable according to the given number item."""; 13, call; 14, identifier:sorted; 15, argument_list; 16, identifier:iterable; 17, keyword_argument; 18, keyword_argument; 19, identifier:key; 20, call; 21, identifier:reverse; 22, identifier:reverse; 23, identifier:itemgetter; 24, argument_list; 25, identifier:number
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 7, 10; 7, 11; 8, 12; 9, 13; 13, 14; 13, 15; 15, 16; 15, 17; 15, 18; 17, 19; 17, 20; 18, 21; 18, 22; 20, 23; 20, 24; 24, 25
def sort_item(iterable, number, reverse=False): """Sort the itertable according to the given number item.""" return sorted(iterable, key=itemgetter(number), reverse=reverse)
0, module; 1, function_definition; 2, function_name:ports_as_list; 3, parameters; 4, block; 5, identifier:port_str; 6, expression_statement; 7, if_statement; 8, if_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, if_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, comment:# TCP ports listed first, then UDP ports; 20, if_statement; 21, if_statement; 22, if_statement; 23, return_statement; 24, comment:""" Parses a ports string into two list of individual tcp and udp ports. @input string containing a port list e.g. T:1,2,3,5-8 U:22,80,600-1024 @return two list of sorted integers, for tcp and udp ports respectively. """; 25, not_operator; 26, block; 27, call; 28, block; 29, assignment; 30, assignment; 31, assignment; 32, assignment; 33, assignment; 34, comparison_operator:ports[b_tcp - 1] == ','; 35, block; 36, comparison_operator:ports[b_udp - 1] == ','; 37, block; 38, assignment; 39, assignment; 40, assignment; 41, boolean_operator; 42, block; 43, comment:# Only UDP ports; 44, elif_clause; 45, comment:# Only TCP ports; 46, elif_clause; 47, else_clause; 48, identifier:tports; 49, block; 50, identifier:uports; 51, block; 52, tuple; 53, identifier:port_str; 54, expression_statement; 55, return_statement; 56, identifier:ports_str_check_failed; 57, argument_list; 58, expression_statement; 59, return_statement; 60, identifier:tcp_list; 61, call; 62, identifier:udp_list; 63, call; 64, identifier:ports; 65, call; 66, identifier:b_tcp; 67, call; 68, identifier:b_udp; 69, call; 70, subscript; 71, string; 72, expression_statement; 73, subscript; 74, string; 75, expression_statement; 76, identifier:ports; 77, call; 78, identifier:tports; 79, string; 80, identifier:uports; 81, string; 82, comparison_operator:b_udp != -1; 83, comparison_operator:b_tcp != -1; 84, expression_statement; 85, expression_statement; 86, boolean_operator; 87, block; 88, boolean_operator; 89, block; 90, block; 91, for_statement; 92, expression_statement; 93, for_statement; 94, expression_statement; 95, identifier:tcp_list; 96, identifier:udp_list; 97, call; 98, list; 99, identifier:port_str; 100, call; 101, list; 102, identifier:list; 103, argument_list; 104, identifier:list; 105, argument_list; 106, attribute; 107, argument_list; 108, attribute; 109, argument_list; 110, attribute; 111, argument_list; 112, identifier:ports; 113, binary_operator:b_tcp - 1; 114, string_content:,; 115, assignment; 116, identifier:ports; 117, binary_operator:b_udp - 1; 118, string_content:,; 119, assignment; 120, identifier:port_str_arrange; 121, argument_list; 122, identifier:b_udp; 123, unary_operator; 124, identifier:b_tcp; 125, unary_operator; 126, assignment; 127, assignment; 128, comparison_operator:b_tcp == -1; 129, comparison_operator:b_udp != -1; 130, expression_statement; 131, comparison_operator:b_udp == -1; 132, comparison_operator:b_tcp != -1; 133, expression_statement; 134, expression_statement; 135, identifier:port; 136, call; 137, block; 138, call; 139, identifier:port; 140, call; 141, block; 142, call; 143, attribute; 144, argument_list; 145, None; 146, None; 147, attribute; 148, argument_list; 149, None; 150, None; 151, identifier:port_str; 152, identifier:replace; 153, string; 154, string; 155, identifier:ports; 156, identifier:find; 157, string:"T"; 158, identifier:ports; 159, identifier:find; 160, string:"U"; 161, identifier:b_tcp; 162, integer:1; 163, identifier:ports; 164, binary_operator:ports[:b_tcp - 1] + ports[b_tcp:]; 165, identifier:b_udp; 166, integer:1; 167, identifier:ports; 168, binary_operator:ports[:b_udp - 1] + ports[b_udp:]; 169, identifier:ports; 170, integer:1; 171, integer:1; 172, identifier:tports; 173, subscript; 174, identifier:uports; 175, subscript; 176, identifier:b_tcp; 177, unary_operator; 178, identifier:b_udp; 179, unary_operator; 180, assignment; 181, identifier:b_udp; 182, unary_operator; 183, identifier:b_tcp; 184, unary_operator; 185, assignment; 186, assignment; 187, attribute; 188, argument_list; 189, if_statement; 190, attribute; 191, argument_list; 192, attribute; 193, argument_list; 194, if_statement; 195, attribute; 196, argument_list; 197, identifier:LOGGER; 198, identifier:info; 199, string:"Invalid port value"; 200, identifier:LOGGER; 201, identifier:info; 202, string:"{0}: Port list malformed."; 203, string_content:; 204, subscript; 205, subscript; 206, subscript; 207, subscript; 208, identifier:ports; 209, slice; 210, identifier:ports; 211, slice; 212, integer:1; 213, integer:1; 214, identifier:uports; 215, subscript; 216, integer:1; 217, integer:1; 218, identifier:tports; 219, subscript; 220, identifier:tports; 221, identifier:ports; 222, identifier:tports; 223, identifier:split; 224, string; 225, comparison_operator:'-' in port; 226, block; 227, else_clause; 228, identifier:tcp_list; 229, identifier:sort; 230, identifier:uports; 231, identifier:split; 232, string; 233, comparison_operator:'-' in port; 234, block; 235, else_clause; 236, identifier:udp_list; 237, identifier:sort; 238, identifier:ports; 239, slice; 240, identifier:ports; 241, slice; 242, identifier:ports; 243, slice; 244, identifier:ports; 245, slice; 246, binary_operator:ports.index('T:') + 2; 247, call; 248, binary_operator:ports.index('U:') + 2; 249, identifier:ports; 250, slice; 251, identifier:ports; 252, slice; 253, string_content:,; 254, string; 255, identifier:port; 256, expression_statement; 257, block; 258, string_content:,; 259, string; 260, identifier:port; 261, expression_statement; 262, block; 263, binary_operator:b_tcp - 1; 264, identifier:b_tcp; 265, binary_operator:b_udp - 1; 266, identifier:b_udp; 267, call; 268, integer:2; 269, attribute; 270, argument_list; 271, call; 272, integer:2; 273, binary_operator:ports.index('U:') + 2; 274, binary_operator:ports.index('T:') + 2; 275, string_content:-; 276, call; 277, expression_statement; 278, string_content:-; 279, call; 280, expression_statement; 281, identifier:b_tcp; 282, integer:1; 283, identifier:b_udp; 284, integer:1; 285, attribute; 286, argument_list; 287, identifier:ports; 288, identifier:index; 289, string; 290, attribute; 291, argument_list; 292, call; 293, integer:2; 294, call; 295, integer:2; 296, attribute; 297, argument_list; 298, call; 299, attribute; 300, argument_list; 301, call; 302, identifier:ports; 303, identifier:index; 304, string; 305, string_content:U:; 306, identifier:ports; 307, identifier:index; 308, string; 309, attribute; 310, argument_list; 311, attribute; 312, argument_list; 313, identifier:tcp_list; 314, identifier:extend; 315, call; 316, attribute; 317, argument_list; 318, identifier:udp_list; 319, identifier:extend; 320, call; 321, attribute; 322, argument_list; 323, string_content:T:; 324, string_content:U:; 325, identifier:ports; 326, identifier:index; 327, string; 328, identifier:ports; 329, identifier:index; 330, string; 331, identifier:port_range_expand; 332, argument_list; 333, identifier:tcp_list; 334, identifier:append; 335, call; 336, identifier:port_range_expand; 337, argument_list; 338, identifier:udp_list; 339, identifier:append; 340, call; 341, string_content:U:; 342, string_content:T:; 343, identifier:port; 344, identifier:int; 345, argument_list; 346, identifier:port; 347, identifier:int; 348, argument_list; 349, identifier:port; 350, identifier:port
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 6, 24; 7, 25; 7, 26; 8, 27; 8, 28; 9, 29; 10, 30; 11, 31; 12, 32; 13, 33; 14, 34; 14, 35; 15, 36; 15, 37; 16, 38; 17, 39; 18, 40; 20, 41; 20, 42; 20, 43; 20, 44; 20, 45; 20, 46; 20, 47; 21, 48; 21, 49; 22, 50; 22, 51; 23, 52; 25, 53; 26, 54; 26, 55; 27, 56; 27, 57; 28, 58; 28, 59; 29, 60; 29, 61; 30, 62; 30, 63; 31, 64; 31, 65; 32, 66; 32, 67; 33, 68; 33, 69; 34, 70; 34, 71; 35, 72; 36, 73; 36, 74; 37, 75; 38, 76; 38, 77; 39, 78; 39, 79; 40, 80; 40, 81; 41, 82; 41, 83; 42, 84; 42, 85; 44, 86; 44, 87; 46, 88; 46, 89; 47, 90; 49, 91; 49, 92; 51, 93; 51, 94; 52, 95; 52, 96; 54, 97; 55, 98; 57, 99; 58, 100; 59, 101; 61, 102; 61, 103; 63, 104; 63, 105; 65, 106; 65, 107; 67, 108; 67, 109; 69, 110; 69, 111; 70, 112; 70, 113; 71, 114; 72, 115; 73, 116; 73, 117; 74, 118; 75, 119; 77, 120; 77, 121; 82, 122; 82, 123; 83, 124; 83, 125; 84, 126; 85, 127; 86, 128; 86, 129; 87, 130; 88, 131; 88, 132; 89, 133; 90, 134; 91, 135; 91, 136; 91, 137; 92, 138; 93, 139; 93, 140; 93, 141; 94, 142; 97, 143; 97, 144; 98, 145; 98, 146; 100, 147; 100, 148; 101, 149; 101, 150; 106, 151; 106, 152; 107, 153; 107, 154; 108, 155; 108, 156; 109, 157; 110, 158; 110, 159; 111, 160; 113, 161; 113, 162; 115, 163; 115, 164; 117, 165; 117, 166; 119, 167; 119, 168; 121, 169; 123, 170; 125, 171; 126, 172; 126, 173; 127, 174; 127, 175; 128, 176; 128, 177; 129, 178; 129, 179; 130, 180; 131, 181; 131, 182; 132, 183; 132, 184; 133, 185; 134, 186; 136, 187; 136, 188; 137, 189; 138, 190; 138, 191; 140, 192; 140, 193; 141, 194; 142, 195; 142, 196; 143, 197; 143, 198; 144, 199; 147, 200; 147, 201; 148, 202; 153, 203; 164, 204; 164, 205; 168, 206; 168, 207; 173, 208; 173, 209; 175, 210; 175, 211; 177, 212; 179, 213; 180, 214; 180, 215; 182, 216; 184, 217; 185, 218; 185, 219; 186, 220; 186, 221; 187, 222; 187, 223; 188, 224; 189, 225; 189, 226; 189, 227; 190, 228; 190, 229; 192, 230; 192, 231; 193, 232; 194, 233; 194, 234; 194, 235; 195, 236; 195, 237; 204, 238; 204, 239; 205, 240; 205, 241; 206, 242; 206, 243; 207, 244; 207, 245; 209, 246; 209, 247; 211, 248; 215, 249; 215, 250; 219, 251; 219, 252; 224, 253; 225, 254; 225, 255; 226, 256; 227, 257; 232, 258; 233, 259; 233, 260; 234, 261; 235, 262; 239, 263; 241, 264; 243, 265; 245, 266; 246, 267; 246, 268; 247, 269; 247, 270; 248, 271; 248, 272; 250, 273; 252, 274; 254, 275; 256, 276; 257, 277; 259, 278; 261, 279; 262, 280; 263, 281; 263, 282; 265, 283; 265, 284; 267, 285; 267, 286; 269, 287; 269, 288; 270, 289; 271, 290; 271, 291; 273, 292; 273, 293; 274, 294; 274, 295; 276, 296; 276, 297; 277, 298; 279, 299; 279, 300; 280, 301; 285, 302; 285, 303; 286, 304; 289, 305; 290, 306; 290, 307; 291, 308; 292, 309; 292, 310; 294, 311; 294, 312; 296, 313; 296, 314; 297, 315; 298, 316; 298, 317; 299, 318; 299, 319; 300, 320; 301, 321; 301, 322; 304, 323; 308, 324; 309, 325; 309, 326; 310, 327; 311, 328; 311, 329; 312, 330; 315, 331; 315, 332; 316, 333; 316, 334; 317, 335; 320, 336; 320, 337; 321, 338; 321, 339; 322, 340; 327, 341; 330, 342; 332, 343; 335, 344; 335, 345; 337, 346; 340, 347; 340, 348; 345, 349; 348, 350
def ports_as_list(port_str): """ Parses a ports string into two list of individual tcp and udp ports. @input string containing a port list e.g. T:1,2,3,5-8 U:22,80,600-1024 @return two list of sorted integers, for tcp and udp ports respectively. """ if not port_str: LOGGER.info("Invalid port value") return [None, None] if ports_str_check_failed(port_str): LOGGER.info("{0}: Port list malformed.") return [None, None] tcp_list = list() udp_list = list() ports = port_str.replace(' ', '') b_tcp = ports.find("T") b_udp = ports.find("U") if ports[b_tcp - 1] == ',': ports = ports[:b_tcp - 1] + ports[b_tcp:] if ports[b_udp - 1] == ',': ports = ports[:b_udp - 1] + ports[b_udp:] ports = port_str_arrange(ports) tports = '' uports = '' # TCP ports listed first, then UDP ports if b_udp != -1 and b_tcp != -1: tports = ports[ports.index('T:') + 2:ports.index('U:')] uports = ports[ports.index('U:') + 2:] # Only UDP ports elif b_tcp == -1 and b_udp != -1: uports = ports[ports.index('U:') + 2:] # Only TCP ports elif b_udp == -1 and b_tcp != -1: tports = ports[ports.index('T:') + 2:] else: tports = ports if tports: for port in tports.split(','): if '-' in port: tcp_list.extend(port_range_expand(port)) else: tcp_list.append(int(port)) tcp_list.sort() if uports: for port in uports.split(','): if '-' in port: udp_list.extend(port_range_expand(port)) else: udp_list.append(int(port)) udp_list.sort() return (tcp_list, udp_list)
0, module; 1, function_definition; 2, function_name:tsses; 3, parameters; 4, block; 5, identifier:db; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, if_statement; 16, if_statement; 17, function_definition; 18, comment:# GFF/GTF format; 19, expression_statement; 20, comment:# Figure out default attrs to use, depending on the original format.; 21, if_statement; 22, if_statement; 23, if_statement; 24, return_statement; 25, identifier:merge_overlapping; 26, False; 27, identifier:attrs; 28, None; 29, identifier:attrs_sep; 30, string:":"; 31, identifier:merge_kwargs; 32, None; 33, identifier:as_bed6; 34, False; 35, identifier:bedtools_227_or_later; 36, True; 37, comment:""" Create 1-bp transcription start sites for all transcripts in the database and return as a sorted pybedtools.BedTool object pointing to a temporary file. To save the file to a known location, use the `.moveto()` method on the resulting `pybedtools.BedTool` object. To extend regions upstream/downstream, see the `.slop()` method on the resulting `pybedtools.BedTool object`. Requires pybedtools. Parameters ---------- db : gffutils.FeatureDB The database to use as_bed6 : bool If True, output file is in BED6 format; otherwise it remains in the GFF/GTF format and dialect of the file used to create the database. Note that the merge options below necessarily force `as_bed6=True`. merge_overlapping : bool If True, output will be in BED format. Overlapping TSSes will be merged into a single feature, and their names will be collapsed using `merge_sep` and placed in the new name field. merge_kwargs : dict If `merge_overlapping=True`, these keyword arguments are passed to pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`. The merge operates on a BED6 file which will have had the name field constructed as specified by other arguments here. See the available options for your installed version of BEDTools; the defaults used here are `merge_kwargs=dict(o='distinct', c=4, s=True)`. Any provided `merge_kwargs` are used to *update* the default. It is recommended to not override `c=4` and `s=True`, otherwise the post-merge fixing may not work correctly. Good candidates for tweaking are `d` (merge distance), `o` (operation), `delim` (delimiter to use for collapse operations). attrs : str or list Only has an effect when `as_bed6=True` or `merge_overlapping=True`. Determines what goes in the name field of an output BED file. By default, "gene_id" for GTF databases and "ID" for GFF. If a list of attributes is supplied, e.g. ["gene_id", "transcript_id"], then these will be joined by `attr_join_sep` and then placed in the name field. attrs_sep: str If `as_bed6=True` or `merge_overlapping=True`, then use this character to separate attributes in the name field of the output BED. If also using `merge_overlapping=True`, you'll probably want this to be different than `merge_sep` in order to parse things out later. bedtools_227_or_later : bool In version 2.27, BEDTools changed the output for merge. By default, this function expects BEDTools version 2.27 or later, but set this to False to assume the older behavior. For testing purposes, the environment variable GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either "true" or "false" and is used to override this argument. Examples -------- >>> import gffutils >>> db = gffutils.create_db( ... gffutils.example_filename('FBgn0031208.gtf'), ... ":memory:", ... keep_order=True, ... verbose=False) Default settings -- no merging, and report a separate TSS on each line even if they overlap (as in the first two): >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300689"; chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300690"; chr2L gffutils_derived transcript_TSS 11000 11000 . - . gene_id "Fk_gene_1"; transcript_id "transcript_Fk_gene_1"; chr2L gffutils_derived transcript_TSS 12500 12500 . - . gene_id "Fk_gene_2"; transcript_id "transcript_Fk_gene_2"; <BLANKLINE> Default merging, showing the first two TSSes merged and reported as a single unique TSS for the gene. Note the conversion to BED: >>> x = tsses(db, merge_overlapping=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 11000 Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2 . - <BLANKLINE> Report both gene ID and transcript ID in the name. In some cases this can be easier to parse than the original GTF or GFF file. With no merging specified, we must add `as_bed6=True` to see the names in BED format. >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208:FBtr0300689 . + chr2L 7528 7529 FBgn0031208:FBtr0300690 . + chr2L 10999 11000 Fk_gene_1:transcript_Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2:transcript_Fk_gene_2 . - <BLANKLINE> Use a 3kb merge distance so the last 2 features are merged together: >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000)) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 12500 Fk_gene_1,Fk_gene_2 . - <BLANKLINE> The set of unique TSSes for each gene, +1kb upstream and 500bp downstream: >>> x = tsses(db, merge_overlapping=True) >>> x = x.slop(l=1000, r=500, s=True, genome='dm3') >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 6528 8029 FBgn0031208 . + chr2L 10499 12000 Fk_gene_1 . - chr2L 11999 13500 Fk_gene_2 . - <BLANKLINE> """; 38, assignment; 39, comparison_operator:_override is not None; 40, block; 41, identifier:bedtools_227_or_later; 42, block; 43, else_clause; 44, comparison_operator:merge_kwargs is not None; 45, block; 46, function_name:gen; 47, parameters; 48, block; 49, assignment; 50, comparison_operator:attrs is None; 51, block; 52, boolean_operator; 53, block; 54, identifier:merge_overlapping; 55, block; 56, identifier:x; 57, identifier:_override; 58, call; 59, identifier:_override; 60, None; 61, if_statement; 62, expression_statement; 63, block; 64, identifier:merge_kwargs; 65, None; 66, expression_statement; 67, expression_statement; 68, for_statement; 69, identifier:x; 70, call; 71, identifier:attrs; 72, None; 73, if_statement; 74, identifier:merge_overlapping; 75, identifier:as_bed6; 76, if_statement; 77, function_definition; 78, expression_statement; 79, if_statement; 80, attribute; 81, argument_list; 82, comparison_operator:_override == 'true'; 83, block; 84, elif_clause; 85, else_clause; 86, assignment; 87, expression_statement; 88, call; 89, comment:""" Generator of pybedtools.Intervals representing TSSes. """; 90, identifier:gene; 91, call; 92, block; 93, attribute; 94, argument_list; 95, comparison_operator:db.dialect['fmt'] == 'gtf'; 96, block; 97, else_clause; 98, call; 99, block; 100, function_name:to_bed; 101, parameters; 102, block; 103, assignment; 104, identifier:bedtools_227_or_later; 105, block; 106, else_clause; 107, attribute; 108, identifier:get; 109, string; 110, None; 111, identifier:_override; 112, string; 113, expression_statement; 114, comparison_operator:_override == 'false'; 115, block; 116, block; 117, identifier:_merge_kwargs; 118, call; 119, assignment; 120, attribute; 121, argument_list; 122, attribute; 123, argument_list; 124, for_statement; 125, call; 126, identifier:sort; 127, subscript; 128, string; 129, expression_statement; 130, block; 131, identifier:isinstance; 132, argument_list; 133, expression_statement; 134, identifier:f; 135, expression_statement; 136, expression_statement; 137, return_statement; 138, identifier:x; 139, call; 140, expression_statement; 141, block; 142, identifier:os; 143, identifier:environ; 144, string_content:GFFUTILS_USES_BEDTOOLS_227_OR_LATER; 145, string_content:true; 146, assignment; 147, identifier:_override; 148, string; 149, expression_statement; 150, raise_statement; 151, identifier:dict; 152, argument_list; 153, identifier:_merge_kwargs; 154, call; 155, identifier:_merge_kwargs; 156, identifier:update; 157, identifier:merge_kwargs; 158, identifier:db; 159, identifier:features_of_type; 160, string; 161, identifier:transcript; 162, call; 163, block; 164, attribute; 165, argument_list; 166, attribute; 167, string; 168, string_content:gtf; 169, assignment; 170, expression_statement; 171, identifier:attrs; 172, attribute; 173, assignment; 174, comment:""" Given a pybedtools.Interval, return a new Interval with the name set according to the kwargs provided above. """; 175, assignment; 176, call; 177, attribute; 178, argument_list; 179, assignment; 180, function_definition; 181, expression_statement; 182, identifier:bedtools_227_or_later; 183, True; 184, string_content:false; 185, assignment; 186, call; 187, keyword_argument; 188, keyword_argument; 189, keyword_argument; 190, identifier:dict; 191, argument_list; 192, string_content:gene; 193, attribute; 194, argument_list; 195, if_statement; 196, expression_statement; 197, expression_statement; 198, identifier:pybedtools; 199, identifier:BedTool; 200, call; 201, identifier:db; 202, identifier:dialect; 203, string_content:fmt; 204, identifier:attrs; 205, string; 206, assignment; 207, identifier:six; 208, identifier:string_types; 209, identifier:attrs; 210, list; 211, identifier:name; 212, call; 213, attribute; 214, argument_list; 215, call; 216, identifier:saveas; 217, identifier:x; 218, call; 219, function_name:fix_merge; 220, parameters; 221, block; 222, assignment; 223, identifier:bedtools_227_or_later; 224, False; 225, identifier:ValueError; 226, argument_list; 227, identifier:o; 228, string; 229, identifier:s; 230, True; 231, identifier:c; 232, string; 233, keyword_argument; 234, keyword_argument; 235, keyword_argument; 236, identifier:db; 237, identifier:children; 238, identifier:gene; 239, keyword_argument; 240, comparison_operator:transcript.strand == '-'; 241, block; 242, else_clause; 243, assignment; 244, yield; 245, identifier:gen; 246, argument_list; 247, string_content:gene_id; 248, identifier:attrs; 249, string; 250, identifier:attrs; 251, attribute; 252, argument_list; 253, identifier:pybedtools; 254, identifier:Interval; 255, attribute; 256, attribute; 257, attribute; 258, identifier:name; 259, call; 260, attribute; 261, attribute; 262, argument_list; 263, attribute; 264, argument_list; 265, identifier:f; 266, expression_statement; 267, return_statement; 268, identifier:x; 269, call; 270, call; 271, string_content:distinct; 272, string_content:4,5,6; 273, identifier:o; 274, string; 275, identifier:s; 276, True; 277, identifier:c; 278, string; 279, identifier:level; 280, integer:1; 281, attribute; 282, string; 283, expression_statement; 284, block; 285, attribute; 286, binary_operator:transcript.featuretype + '_TSS'; 287, call; 288, string_content:ID; 289, identifier:attrs_sep; 290, identifier:join; 291, list_comprehension; 292, identifier:f; 293, identifier:chrom; 294, identifier:f; 295, identifier:start; 296, identifier:f; 297, identifier:stop; 298, identifier:str; 299, argument_list; 300, identifier:f; 301, identifier:strand; 302, identifier:x; 303, identifier:each; 304, identifier:to_bed; 305, identifier:x; 306, identifier:merge; 307, dictionary_splat; 308, assignment; 309, call; 310, attribute; 311, argument_list; 312, attribute; 313, argument_list; 314, string_content:distinct; 315, string_content:4; 316, identifier:transcript; 317, identifier:strand; 318, string_content:-; 319, assignment; 320, expression_statement; 321, identifier:transcript; 322, identifier:featuretype; 323, attribute; 324, string; 325, attribute; 326, argument_list; 327, subscript; 328, for_in_clause; 329, attribute; 330, identifier:_merge_kwargs; 331, identifier:f; 332, call; 333, attribute; 334, argument_list; 335, call; 336, identifier:saveas; 337, concatenated_string; 338, identifier:format; 339, identifier:_override; 340, attribute; 341, attribute; 342, assignment; 343, identifier:transcript; 344, identifier:featuretype; 345, string_content:_TSS; 346, identifier:helpers; 347, identifier:asinterval; 348, identifier:transcript; 349, attribute; 350, identifier:i; 351, identifier:i; 352, identifier:attrs; 353, identifier:f; 354, identifier:score; 355, attribute; 356, argument_list; 357, identifier:pybedtools; 358, identifier:Interval; 359, attribute; 360, attribute; 361, attribute; 362, subscript; 363, string; 364, subscript; 365, attribute; 366, argument_list; 367, string:"Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER "; 368, string:"environment variable: {0}"; 369, identifier:transcript; 370, identifier:start; 371, identifier:transcript; 372, identifier:stop; 373, attribute; 374, attribute; 375, identifier:f; 376, identifier:attrs; 377, identifier:featurefuncs; 378, identifier:extend_fields; 379, identifier:f; 380, integer:6; 381, identifier:f; 382, identifier:chrom; 383, identifier:f; 384, identifier:start; 385, identifier:f; 386, identifier:stop; 387, identifier:f; 388, integer:4; 389, string_content:.; 390, identifier:f; 391, integer:3; 392, call; 393, identifier:each; 394, identifier:fix_merge; 395, identifier:transcript; 396, identifier:stop; 397, identifier:transcript; 398, identifier:start; 399, attribute; 400, argument_list; 401, call; 402, identifier:saveas; 403, attribute; 404, argument_list; 405, identifier:x; 406, identifier:merge; 407, dictionary_splat; 408, identifier:_merge_kwargs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 6, 25; 6, 26; 7, 27; 7, 28; 8, 29; 8, 30; 9, 31; 9, 32; 10, 33; 10, 34; 11, 35; 11, 36; 12, 37; 13, 38; 14, 39; 14, 40; 15, 41; 15, 42; 15, 43; 16, 44; 16, 45; 17, 46; 17, 47; 17, 48; 19, 49; 21, 50; 21, 51; 22, 52; 22, 53; 23, 54; 23, 55; 24, 56; 38, 57; 38, 58; 39, 59; 39, 60; 40, 61; 42, 62; 43, 63; 44, 64; 44, 65; 45, 66; 48, 67; 48, 68; 49, 69; 49, 70; 50, 71; 50, 72; 51, 73; 52, 74; 52, 75; 53, 76; 53, 77; 53, 78; 55, 79; 58, 80; 58, 81; 61, 82; 61, 83; 61, 84; 61, 85; 62, 86; 63, 87; 66, 88; 67, 89; 68, 90; 68, 91; 68, 92; 70, 93; 70, 94; 73, 95; 73, 96; 73, 97; 76, 98; 76, 99; 77, 100; 77, 101; 77, 102; 78, 103; 79, 104; 79, 105; 79, 106; 80, 107; 80, 108; 81, 109; 81, 110; 82, 111; 82, 112; 83, 113; 84, 114; 84, 115; 85, 116; 86, 117; 86, 118; 87, 119; 88, 120; 88, 121; 91, 122; 91, 123; 92, 124; 93, 125; 93, 126; 95, 127; 95, 128; 96, 129; 97, 130; 98, 131; 98, 132; 99, 133; 101, 134; 102, 135; 102, 136; 102, 137; 103, 138; 103, 139; 105, 140; 106, 141; 107, 142; 107, 143; 109, 144; 112, 145; 113, 146; 114, 147; 114, 148; 115, 149; 116, 150; 118, 151; 118, 152; 119, 153; 119, 154; 120, 155; 120, 156; 121, 157; 122, 158; 122, 159; 123, 160; 124, 161; 124, 162; 124, 163; 125, 164; 125, 165; 127, 166; 127, 167; 128, 168; 129, 169; 130, 170; 132, 171; 132, 172; 133, 173; 135, 174; 136, 175; 137, 176; 139, 177; 139, 178; 140, 179; 141, 180; 141, 181; 146, 182; 146, 183; 148, 184; 149, 185; 150, 186; 152, 187; 152, 188; 152, 189; 154, 190; 154, 191; 160, 192; 162, 193; 162, 194; 163, 195; 163, 196; 163, 197; 164, 198; 164, 199; 165, 200; 166, 201; 166, 202; 167, 203; 169, 204; 169, 205; 170, 206; 172, 207; 172, 208; 173, 209; 173, 210; 175, 211; 175, 212; 176, 213; 176, 214; 177, 215; 177, 216; 179, 217; 179, 218; 180, 219; 180, 220; 180, 221; 181, 222; 185, 223; 185, 224; 186, 225; 186, 226; 187, 227; 187, 228; 188, 229; 188, 230; 189, 231; 189, 232; 191, 233; 191, 234; 191, 235; 193, 236; 193, 237; 194, 238; 194, 239; 195, 240; 195, 241; 195, 242; 196, 243; 197, 244; 200, 245; 200, 246; 205, 247; 206, 248; 206, 249; 210, 250; 212, 251; 212, 252; 213, 253; 213, 254; 214, 255; 214, 256; 214, 257; 214, 258; 214, 259; 214, 260; 215, 261; 215, 262; 218, 263; 218, 264; 220, 265; 221, 266; 221, 267; 222, 268; 222, 269; 226, 270; 228, 271; 232, 272; 233, 273; 233, 274; 234, 275; 234, 276; 235, 277; 235, 278; 239, 279; 239, 280; 240, 281; 240, 282; 241, 283; 242, 284; 243, 285; 243, 286; 244, 287; 249, 288; 251, 289; 251, 290; 252, 291; 255, 292; 255, 293; 256, 294; 256, 295; 257, 296; 257, 297; 259, 298; 259, 299; 260, 300; 260, 301; 261, 302; 261, 303; 262, 304; 263, 305; 263, 306; 264, 307; 266, 308; 267, 309; 269, 310; 269, 311; 270, 312; 270, 313; 274, 314; 278, 315; 281, 316; 281, 317; 282, 318; 283, 319; 284, 320; 285, 321; 285, 322; 286, 323; 286, 324; 287, 325; 287, 326; 291, 327; 291, 328; 299, 329; 307, 330; 308, 331; 308, 332; 309, 333; 309, 334; 310, 335; 310, 336; 312, 337; 312, 338; 313, 339; 319, 340; 319, 341; 320, 342; 323, 343; 323, 344; 324, 345; 325, 346; 325, 347; 326, 348; 327, 349; 327, 350; 328, 351; 328, 352; 329, 353; 329, 354; 332, 355; 332, 356; 333, 357; 333, 358; 334, 359; 334, 360; 334, 361; 334, 362; 334, 363; 334, 364; 335, 365; 335, 366; 337, 367; 337, 368; 340, 369; 340, 370; 341, 371; 341, 372; 342, 373; 342, 374; 349, 375; 349, 376; 355, 377; 355, 378; 356, 379; 356, 380; 359, 381; 359, 382; 360, 383; 360, 384; 361, 385; 361, 386; 362, 387; 362, 388; 363, 389; 364, 390; 364, 391; 365, 392; 365, 393; 366, 394; 373, 395; 373, 396; 374, 397; 374, 398; 392, 399; 392, 400; 399, 401; 399, 402; 401, 403; 401, 404; 403, 405; 403, 406; 404, 407; 407, 408
def tsses(db, merge_overlapping=False, attrs=None, attrs_sep=":", merge_kwargs=None, as_bed6=False, bedtools_227_or_later=True): """ Create 1-bp transcription start sites for all transcripts in the database and return as a sorted pybedtools.BedTool object pointing to a temporary file. To save the file to a known location, use the `.moveto()` method on the resulting `pybedtools.BedTool` object. To extend regions upstream/downstream, see the `.slop()` method on the resulting `pybedtools.BedTool object`. Requires pybedtools. Parameters ---------- db : gffutils.FeatureDB The database to use as_bed6 : bool If True, output file is in BED6 format; otherwise it remains in the GFF/GTF format and dialect of the file used to create the database. Note that the merge options below necessarily force `as_bed6=True`. merge_overlapping : bool If True, output will be in BED format. Overlapping TSSes will be merged into a single feature, and their names will be collapsed using `merge_sep` and placed in the new name field. merge_kwargs : dict If `merge_overlapping=True`, these keyword arguments are passed to pybedtools.BedTool.merge(), which are in turn sent to `bedtools merge`. The merge operates on a BED6 file which will have had the name field constructed as specified by other arguments here. See the available options for your installed version of BEDTools; the defaults used here are `merge_kwargs=dict(o='distinct', c=4, s=True)`. Any provided `merge_kwargs` are used to *update* the default. It is recommended to not override `c=4` and `s=True`, otherwise the post-merge fixing may not work correctly. Good candidates for tweaking are `d` (merge distance), `o` (operation), `delim` (delimiter to use for collapse operations). attrs : str or list Only has an effect when `as_bed6=True` or `merge_overlapping=True`. Determines what goes in the name field of an output BED file. By default, "gene_id" for GTF databases and "ID" for GFF. If a list of attributes is supplied, e.g. ["gene_id", "transcript_id"], then these will be joined by `attr_join_sep` and then placed in the name field. attrs_sep: str If `as_bed6=True` or `merge_overlapping=True`, then use this character to separate attributes in the name field of the output BED. If also using `merge_overlapping=True`, you'll probably want this to be different than `merge_sep` in order to parse things out later. bedtools_227_or_later : bool In version 2.27, BEDTools changed the output for merge. By default, this function expects BEDTools version 2.27 or later, but set this to False to assume the older behavior. For testing purposes, the environment variable GFFUTILS_USES_BEDTOOLS_227_OR_LATER is set to either "true" or "false" and is used to override this argument. Examples -------- >>> import gffutils >>> db = gffutils.create_db( ... gffutils.example_filename('FBgn0031208.gtf'), ... ":memory:", ... keep_order=True, ... verbose=False) Default settings -- no merging, and report a separate TSS on each line even if they overlap (as in the first two): >>> print(tsses(db)) # doctest: +NORMALIZE_WHITESPACE chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300689"; chr2L gffutils_derived transcript_TSS 7529 7529 . + . gene_id "FBgn0031208"; transcript_id "FBtr0300690"; chr2L gffutils_derived transcript_TSS 11000 11000 . - . gene_id "Fk_gene_1"; transcript_id "transcript_Fk_gene_1"; chr2L gffutils_derived transcript_TSS 12500 12500 . - . gene_id "Fk_gene_2"; transcript_id "transcript_Fk_gene_2"; <BLANKLINE> Default merging, showing the first two TSSes merged and reported as a single unique TSS for the gene. Note the conversion to BED: >>> x = tsses(db, merge_overlapping=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 11000 Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2 . - <BLANKLINE> Report both gene ID and transcript ID in the name. In some cases this can be easier to parse than the original GTF or GFF file. With no merging specified, we must add `as_bed6=True` to see the names in BED format. >>> x = tsses(db, attrs=['gene_id', 'transcript_id'], as_bed6=True) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208:FBtr0300689 . + chr2L 7528 7529 FBgn0031208:FBtr0300690 . + chr2L 10999 11000 Fk_gene_1:transcript_Fk_gene_1 . - chr2L 12499 12500 Fk_gene_2:transcript_Fk_gene_2 . - <BLANKLINE> Use a 3kb merge distance so the last 2 features are merged together: >>> x = tsses(db, merge_overlapping=True, merge_kwargs=dict(d=3000)) >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 7528 7529 FBgn0031208 . + chr2L 10999 12500 Fk_gene_1,Fk_gene_2 . - <BLANKLINE> The set of unique TSSes for each gene, +1kb upstream and 500bp downstream: >>> x = tsses(db, merge_overlapping=True) >>> x = x.slop(l=1000, r=500, s=True, genome='dm3') >>> print(x) # doctest: +NORMALIZE_WHITESPACE chr2L 6528 8029 FBgn0031208 . + chr2L 10499 12000 Fk_gene_1 . - chr2L 11999 13500 Fk_gene_2 . - <BLANKLINE> """ _override = os.environ.get('GFFUTILS_USES_BEDTOOLS_227_OR_LATER', None) if _override is not None: if _override == 'true': bedtools_227_or_later = True elif _override == 'false': bedtools_227_or_later = False else: raise ValueError( "Unknown value for GFFUTILS_USES_BEDTOOLS_227_OR_LATER " "environment variable: {0}".format(_override)) if bedtools_227_or_later: _merge_kwargs = dict(o='distinct', s=True, c='4,5,6') else: _merge_kwargs = dict(o='distinct', s=True, c='4') if merge_kwargs is not None: _merge_kwargs.update(merge_kwargs) def gen(): """ Generator of pybedtools.Intervals representing TSSes. """ for gene in db.features_of_type('gene'): for transcript in db.children(gene, level=1): if transcript.strand == '-': transcript.start = transcript.stop else: transcript.stop = transcript.start transcript.featuretype = transcript.featuretype + '_TSS' yield helpers.asinterval(transcript) # GFF/GTF format x = pybedtools.BedTool(gen()).sort() # Figure out default attrs to use, depending on the original format. if attrs is None: if db.dialect['fmt'] == 'gtf': attrs = 'gene_id' else: attrs = 'ID' if merge_overlapping or as_bed6: if isinstance(attrs, six.string_types): attrs = [attrs] def to_bed(f): """ Given a pybedtools.Interval, return a new Interval with the name set according to the kwargs provided above. """ name = attrs_sep.join([f.attrs[i] for i in attrs]) return pybedtools.Interval( f.chrom, f.start, f.stop, name, str(f.score), f.strand) x = x.each(to_bed).saveas() if merge_overlapping: if bedtools_227_or_later: x = x.merge(**_merge_kwargs) else: def fix_merge(f): f = featurefuncs.extend_fields(f, 6) return pybedtools.Interval( f.chrom, f.start, f.stop, f[4], '.', f[3]) x = x.merge(**_merge_kwargs).saveas().each(fix_merge).saveas() return x
0, module; 1, function_definition; 2, function_name:interfeatures; 3, parameters; 4, block; 5, identifier:self; 6, identifier:features; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, for_statement; 14, identifier:new_featuretype; 15, None; 16, identifier:merge_attributes; 17, True; 18, identifier:dialect; 19, None; 20, identifier:attribute_func; 21, None; 22, identifier:update_attributes; 23, None; 24, comment:""" Construct new features representing the space between features. For example, if `features` is a list of exons, then this method will return the introns. If `features` is a list of genes, then this method will return the intergenic regions. Providing N features will return N - 1 new features. This method purposefully does *not* do any merging or sorting of coordinates, so you may want to use :meth:`FeatureDB.merge` first, or when selecting features use the `order_by` kwarg, e.g., `db.features_of_type('gene', order_by=('seqid', 'start'))`. Parameters ---------- features : iterable of :class:`feature.Feature` instances Sorted, merged iterable new_featuretype : string or None The new features will all be of this type, or, if None (default) then the featuretypes will be constructed from the neighboring features, e.g., `inter_exon_exon`. merge_attributes : bool If True, new features' attributes will be a merge of the neighboring features' attributes. This is useful if you have provided a list of exons; the introns will then retain the transcript and/or gene parents as a single item. Otherwise, if False, the attribute will be a comma-separated list of values, potentially listing the same gene ID twice. attribute_func : callable or None If None, then nothing special is done to the attributes. If callable, then the callable accepts two attribute dictionaries and returns a single attribute dictionary. If `merge_attributes` is True, then `attribute_func` is called before `merge_attributes`. This could be useful for manually managing IDs for the new features. update_attributes : dict After attributes have been modified and merged, this dictionary can be used to replace parts of the attributes dictionary. Returns ------- A generator that yields :class:`Feature` objects """; 25, pattern_list; 26, call; 27, comment:# no inter-feature for the first one; 28, block; 29, identifier:i; 30, identifier:f; 31, identifier:enumerate; 32, argument_list; 33, if_statement; 34, expression_statement; 35, if_statement; 36, if_statement; 37, if_statement; 38, expression_statement; 39, expression_statement; 40, comment:# Shrink; 41, expression_statement; 42, expression_statement; 43, if_statement; 44, if_statement; 45, expression_statement; 46, expression_statement; 47, expression_statement; 48, if_statement; 49, expression_statement; 50, expression_statement; 51, identifier:features; 52, comparison_operator:i == 0; 53, block; 54, assignment; 55, comparison_operator:new_featuretype is None; 56, block; 57, comparison_operator:last_feature.strand != f.strand; 58, block; 59, else_clause; 60, comparison_operator:last_feature.chrom != f.chrom; 61, comment:# We've moved to a new chromosome. For example, if we're; 62, comment:# getting intergenic regions from all genes, they will be on; 63, comment:# different chromosomes. We still assume sorted features, but; 64, comment:# don't complain if they're on different chromosomes -- just; 65, comment:# move on.; 66, block; 67, assignment; 68, assignment; 69, augmented_assignment; 70, augmented_assignment; 71, identifier:merge_attributes; 72, block; 73, else_clause; 74, identifier:update_attributes; 75, block; 76, assignment; 77, assignment; 78, assignment; 79, comparison_operator:dialect is None; 80, comment:# Support for @classmethod -- if calling from the class, then; 81, comment:# self.dialect is not defined, so defer to Feature's default; 82, comment:# (which will be constants.dialect, or GFF3).; 83, block; 84, yield; 85, assignment; 86, identifier:i; 87, integer:0; 88, expression_statement; 89, expression_statement; 90, continue_statement; 91, identifier:interfeature_stop; 92, attribute; 93, identifier:new_featuretype; 94, None; 95, expression_statement; 96, attribute; 97, attribute; 98, expression_statement; 99, block; 100, attribute; 101, attribute; 102, expression_statement; 103, continue_statement; 104, identifier:strand; 105, identifier:new_strand; 106, identifier:chrom; 107, attribute; 108, identifier:interfeature_start; 109, integer:1; 110, identifier:interfeature_stop; 111, integer:1; 112, expression_statement; 113, block; 114, expression_statement; 115, identifier:new_bin; 116, call; 117, identifier:_id; 118, None; 119, identifier:fields; 120, call; 121, identifier:dialect; 122, None; 123, try_statement; 124, call; 125, identifier:interfeature_start; 126, attribute; 127, assignment; 128, assignment; 129, identifier:f; 130, identifier:start; 131, assignment; 132, identifier:last_feature; 133, identifier:strand; 134, identifier:f; 135, identifier:strand; 136, assignment; 137, expression_statement; 138, identifier:last_feature; 139, identifier:chrom; 140, identifier:f; 141, identifier:chrom; 142, assignment; 143, identifier:last_feature; 144, identifier:chrom; 145, assignment; 146, expression_statement; 147, call; 148, attribute; 149, argument_list; 150, identifier:dict; 151, argument_list; 152, block; 153, except_clause; 154, attribute; 155, argument_list; 156, identifier:f; 157, identifier:stop; 158, identifier:interfeature_start; 159, attribute; 160, identifier:last_feature; 161, identifier:f; 162, identifier:new_featuretype; 163, binary_operator:'inter_%s_%s' % ( last_feature.featuretype, f.featuretype); 164, identifier:new_strand; 165, string; 166, assignment; 167, identifier:last_feature; 168, identifier:f; 169, identifier:new_attributes; 170, call; 171, assignment; 172, attribute; 173, argument_list; 174, identifier:bins; 175, identifier:bins; 176, identifier:interfeature_start; 177, identifier:interfeature_stop; 178, keyword_argument; 179, keyword_argument; 180, keyword_argument; 181, keyword_argument; 182, keyword_argument; 183, keyword_argument; 184, keyword_argument; 185, keyword_argument; 186, keyword_argument; 187, keyword_argument; 188, keyword_argument; 189, expression_statement; 190, identifier:AttributeError; 191, block; 192, identifier:self; 193, identifier:_feature_returner; 194, dictionary_splat; 195, identifier:f; 196, identifier:stop; 197, string; 198, tuple; 199, string_content:.; 200, identifier:new_strand; 201, attribute; 202, attribute; 203, argument_list; 204, identifier:new_attributes; 205, dictionary; 206, identifier:new_attributes; 207, identifier:update; 208, identifier:update_attributes; 209, identifier:one; 210, True; 211, identifier:seqid; 212, identifier:chrom; 213, identifier:source; 214, string; 215, identifier:featuretype; 216, identifier:new_featuretype; 217, identifier:start; 218, identifier:interfeature_start; 219, identifier:end; 220, identifier:interfeature_stop; 221, identifier:score; 222, string; 223, identifier:strand; 224, identifier:strand; 225, identifier:frame; 226, string; 227, identifier:attributes; 228, identifier:new_attributes; 229, identifier:bin; 230, identifier:new_bin; 231, assignment; 232, expression_statement; 233, identifier:fields; 234, string_content:inter_%s_%s; 235, attribute; 236, attribute; 237, identifier:f; 238, identifier:strand; 239, identifier:helpers; 240, identifier:merge_attributes; 241, attribute; 242, attribute; 243, string_content:gffutils_derived; 244, string_content:.; 245, string_content:.; 246, identifier:dialect; 247, attribute; 248, assignment; 249, identifier:last_feature; 250, identifier:featuretype; 251, identifier:f; 252, identifier:featuretype; 253, identifier:last_feature; 254, identifier:attributes; 255, identifier:f; 256, identifier:attributes; 257, identifier:self; 258, identifier:dialect; 259, identifier:dialect; 260, None
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 9, 19; 10, 20; 10, 21; 11, 22; 11, 23; 12, 24; 13, 25; 13, 26; 13, 27; 13, 28; 25, 29; 25, 30; 26, 31; 26, 32; 28, 33; 28, 34; 28, 35; 28, 36; 28, 37; 28, 38; 28, 39; 28, 40; 28, 41; 28, 42; 28, 43; 28, 44; 28, 45; 28, 46; 28, 47; 28, 48; 28, 49; 28, 50; 32, 51; 33, 52; 33, 53; 34, 54; 35, 55; 35, 56; 36, 57; 36, 58; 36, 59; 37, 60; 37, 61; 37, 62; 37, 63; 37, 64; 37, 65; 37, 66; 38, 67; 39, 68; 41, 69; 42, 70; 43, 71; 43, 72; 43, 73; 44, 74; 44, 75; 45, 76; 46, 77; 47, 78; 48, 79; 48, 80; 48, 81; 48, 82; 48, 83; 49, 84; 50, 85; 52, 86; 52, 87; 53, 88; 53, 89; 53, 90; 54, 91; 54, 92; 55, 93; 55, 94; 56, 95; 57, 96; 57, 97; 58, 98; 59, 99; 60, 100; 60, 101; 66, 102; 66, 103; 67, 104; 67, 105; 68, 106; 68, 107; 69, 108; 69, 109; 70, 110; 70, 111; 72, 112; 73, 113; 75, 114; 76, 115; 76, 116; 77, 117; 77, 118; 78, 119; 78, 120; 79, 121; 79, 122; 83, 123; 84, 124; 85, 125; 85, 126; 88, 127; 89, 128; 92, 129; 92, 130; 95, 131; 96, 132; 96, 133; 97, 134; 97, 135; 98, 136; 99, 137; 100, 138; 100, 139; 101, 140; 101, 141; 102, 142; 107, 143; 107, 144; 112, 145; 113, 146; 114, 147; 116, 148; 116, 149; 120, 150; 120, 151; 123, 152; 123, 153; 124, 154; 124, 155; 126, 156; 126, 157; 127, 158; 127, 159; 128, 160; 128, 161; 131, 162; 131, 163; 136, 164; 136, 165; 137, 166; 142, 167; 142, 168; 145, 169; 145, 170; 146, 171; 147, 172; 147, 173; 148, 174; 148, 175; 149, 176; 149, 177; 149, 178; 151, 179; 151, 180; 151, 181; 151, 182; 151, 183; 151, 184; 151, 185; 151, 186; 151, 187; 151, 188; 152, 189; 153, 190; 153, 191; 154, 192; 154, 193; 155, 194; 159, 195; 159, 196; 163, 197; 163, 198; 165, 199; 166, 200; 166, 201; 170, 202; 170, 203; 171, 204; 171, 205; 172, 206; 172, 207; 173, 208; 178, 209; 178, 210; 179, 211; 179, 212; 180, 213; 180, 214; 181, 215; 181, 216; 182, 217; 182, 218; 183, 219; 183, 220; 184, 221; 184, 222; 185, 223; 185, 224; 186, 225; 186, 226; 187, 227; 187, 228; 188, 229; 188, 230; 189, 231; 191, 232; 194, 233; 197, 234; 198, 235; 198, 236; 201, 237; 201, 238; 202, 239; 202, 240; 203, 241; 203, 242; 214, 243; 222, 244; 226, 245; 231, 246; 231, 247; 232, 248; 235, 249; 235, 250; 236, 251; 236, 252; 241, 253; 241, 254; 242, 255; 242, 256; 247, 257; 247, 258; 248, 259; 248, 260
def interfeatures(self, features, new_featuretype=None, merge_attributes=True, dialect=None, attribute_func=None, update_attributes=None): """ Construct new features representing the space between features. For example, if `features` is a list of exons, then this method will return the introns. If `features` is a list of genes, then this method will return the intergenic regions. Providing N features will return N - 1 new features. This method purposefully does *not* do any merging or sorting of coordinates, so you may want to use :meth:`FeatureDB.merge` first, or when selecting features use the `order_by` kwarg, e.g., `db.features_of_type('gene', order_by=('seqid', 'start'))`. Parameters ---------- features : iterable of :class:`feature.Feature` instances Sorted, merged iterable new_featuretype : string or None The new features will all be of this type, or, if None (default) then the featuretypes will be constructed from the neighboring features, e.g., `inter_exon_exon`. merge_attributes : bool If True, new features' attributes will be a merge of the neighboring features' attributes. This is useful if you have provided a list of exons; the introns will then retain the transcript and/or gene parents as a single item. Otherwise, if False, the attribute will be a comma-separated list of values, potentially listing the same gene ID twice. attribute_func : callable or None If None, then nothing special is done to the attributes. If callable, then the callable accepts two attribute dictionaries and returns a single attribute dictionary. If `merge_attributes` is True, then `attribute_func` is called before `merge_attributes`. This could be useful for manually managing IDs for the new features. update_attributes : dict After attributes have been modified and merged, this dictionary can be used to replace parts of the attributes dictionary. Returns ------- A generator that yields :class:`Feature` objects """ for i, f in enumerate(features): # no inter-feature for the first one if i == 0: interfeature_start = f.stop last_feature = f continue interfeature_stop = f.start if new_featuretype is None: new_featuretype = 'inter_%s_%s' % ( last_feature.featuretype, f.featuretype) if last_feature.strand != f.strand: new_strand = '.' else: new_strand = f.strand if last_feature.chrom != f.chrom: # We've moved to a new chromosome. For example, if we're # getting intergenic regions from all genes, they will be on # different chromosomes. We still assume sorted features, but # don't complain if they're on different chromosomes -- just # move on. last_feature = f continue strand = new_strand chrom = last_feature.chrom # Shrink interfeature_start += 1 interfeature_stop -= 1 if merge_attributes: new_attributes = helpers.merge_attributes( last_feature.attributes, f.attributes) else: new_attributes = {} if update_attributes: new_attributes.update(update_attributes) new_bin = bins.bins( interfeature_start, interfeature_stop, one=True) _id = None fields = dict( seqid=chrom, source='gffutils_derived', featuretype=new_featuretype, start=interfeature_start, end=interfeature_stop, score='.', strand=strand, frame='.', attributes=new_attributes, bin=new_bin) if dialect is None: # Support for @classmethod -- if calling from the class, then # self.dialect is not defined, so defer to Feature's default # (which will be constants.dialect, or GFF3). try: dialect = self.dialect except AttributeError: dialect = None yield self._feature_returner(**fields) interfeature_start = f.stop
0, module; 1, function_definition; 2, function_name:_reconstruct; 3, parameters; 4, block; 5, identifier:keyvals; 6, identifier:dialect; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, expression_statement; 13, comment:# Re-encode when reconstructing attributes; 14, if_statement; 15, comment:# May need to split multiple values into multiple key/val pairs; 16, if_statement; 17, function_definition; 18, if_statement; 19, for_statement; 20, comment:# Typically ";" or "; "; 21, expression_statement; 22, comment:# Sometimes need to add this; 23, if_statement; 24, return_statement; 25, identifier:keep_order; 26, False; 27, identifier:sort_attribute_values; 28, False; 29, comment:""" Reconstructs the original attributes string according to the dialect. Parameters ========== keyvals : dict Attributes from a GFF/GTF feature dialect : dict Dialect containing info on how to reconstruct a string version of the attributes keep_order : bool If True, then perform sorting of attribute keys to ensure they are in the same order as those provided in the original file. Default is False, which saves time especially on large data sets. sort_attribute_values : bool If True, then sort values to ensure they will always be in the same order. Mostly only useful for testing; default is False. """; 30, not_operator; 31, block; 32, not_operator; 33, block; 34, assignment; 35, boolean_operator; 36, block; 37, else_clause; 38, subscript; 39, block; 40, else_clause; 41, function_name:sort_key; 42, parameters; 43, comment:# sort keys by their order in the dialect; anything not in there will; 44, comment:# be in arbitrary order at the end.; 45, block; 46, identifier:keep_order; 47, block; 48, pattern_list; 49, identifier:items; 50, comment:# Multival sep is usually a comma:; 51, block; 52, assignment; 53, subscript; 54, block; 55, identifier:parts_str; 56, identifier:dialect; 57, raise_statement; 58, identifier:keyvals; 59, return_statement; 60, identifier:parts; 61, list; 62, attribute; 63, comparison_operator:dialect['fmt'] != 'gff3'; 64, expression_statement; 65, block; 66, identifier:dialect; 67, string; 68, expression_statement; 69, for_statement; 70, block; 71, identifier:x; 72, try_statement; 73, expression_statement; 74, identifier:key; 75, identifier:val; 76, if_statement; 77, expression_statement; 78, identifier:parts_str; 79, call; 80, identifier:dialect; 81, string; 82, expression_statement; 83, call; 84, string:""; 85, identifier:constants; 86, identifier:ignore_url_escape_characters; 87, subscript; 88, string; 89, assignment; 90, expression_statement; 91, for_statement; 92, string_content:repeated keys; 93, assignment; 94, pattern_list; 95, call; 96, block; 97, expression_statement; 98, block; 99, except_clause; 100, call; 101, identifier:val; 102, block; 103, else_clause; 104, call; 105, attribute; 106, argument_list; 107, string_content:trailing semicolon; 108, augmented_assignment; 109, identifier:AttributeStringError; 110, argument_list; 111, identifier:dialect; 112, string; 113, string_content:gff3; 114, identifier:attributes; 115, identifier:keyvals; 116, assignment; 117, pattern_list; 118, call; 119, block; 120, identifier:items; 121, list; 122, identifier:key; 123, identifier:val; 124, attribute; 125, argument_list; 126, if_statement; 127, assignment; 128, return_statement; 129, identifier:ValueError; 130, block; 131, attribute; 132, argument_list; 133, if_statement; 134, expression_statement; 135, if_statement; 136, block; 137, attribute; 138, argument_list; 139, subscript; 140, identifier:join; 141, identifier:parts; 142, identifier:parts_str; 143, string; 144, string_content:fmt; 145, identifier:attributes; 146, dictionary; 147, identifier:k; 148, identifier:v; 149, attribute; 150, argument_list; 151, expression_statement; 152, for_statement; 153, identifier:attributes; 154, identifier:items; 155, comparison_operator:len(val) > 1; 156, block; 157, else_clause; 158, identifier:items; 159, call; 160, call; 161, return_statement; 162, identifier:items; 163, identifier:sort; 164, keyword_argument; 165, identifier:sort_attribute_values; 166, block; 167, assignment; 168, identifier:val_str; 169, comment:# Surround with quotes if needed; 170, block; 171, if_statement; 172, identifier:parts; 173, identifier:append; 174, identifier:part; 175, identifier:dialect; 176, string; 177, string_content:;; 178, identifier:keyvals; 179, identifier:items; 180, assignment; 181, identifier:i; 182, identifier:v; 183, block; 184, call; 185, integer:1; 186, for_statement; 187, block; 188, identifier:list; 189, argument_list; 190, attribute; 191, argument_list; 192, float:1e6; 193, identifier:key; 194, identifier:sort_key; 195, expression_statement; 196, identifier:val_str; 197, call; 198, if_statement; 199, comment:# Typically "=" for GFF3 or " " otherwise; 200, expression_statement; 201, comparison_operator:dialect['fmt'] == 'gtf'; 202, block; 203, else_clause; 204, string_content:field separator; 205, subscript; 206, list; 207, expression_statement; 208, identifier:len; 209, argument_list; 210, identifier:v; 211, identifier:val; 212, block; 213, expression_statement; 214, call; 215, subscript; 216, identifier:index; 217, subscript; 218, assignment; 219, attribute; 220, argument_list; 221, subscript; 222, block; 223, assignment; 224, subscript; 225, string; 226, expression_statement; 227, block; 228, identifier:attributes; 229, identifier:k; 230, call; 231, identifier:val; 232, expression_statement; 233, call; 234, attribute; 235, argument_list; 236, identifier:dialect; 237, string; 238, identifier:x; 239, integer:0; 240, identifier:val; 241, call; 242, subscript; 243, identifier:join; 244, identifier:val; 245, identifier:dialect; 246, string; 247, expression_statement; 248, identifier:part; 249, call; 250, identifier:dialect; 251, string; 252, string_content:gtf; 253, assignment; 254, expression_statement; 255, attribute; 256, argument_list; 257, call; 258, attribute; 259, argument_list; 260, identifier:attributes; 261, identifier:items; 262, string_content:order; 263, identifier:sorted; 264, argument_list; 265, identifier:dialect; 266, string; 267, string_content:quoted GFF2 values; 268, assignment; 269, attribute; 270, argument_list; 271, string_content:fmt; 272, identifier:part; 273, call; 274, assignment; 275, subscript; 276, identifier:append; 277, call; 278, attribute; 279, argument_list; 280, identifier:items; 281, identifier:append; 282, tuple; 283, identifier:val; 284, string_content:multival separator; 285, identifier:val_str; 286, binary_operator:'"%s"' % val_str; 287, subscript; 288, identifier:join; 289, list; 290, attribute; 291, argument_list; 292, identifier:part; 293, identifier:key; 294, identifier:attributes; 295, identifier:k; 296, attribute; 297, argument_list; 298, identifier:items; 299, identifier:append; 300, tuple; 301, identifier:key; 302, identifier:val; 303, string:'"%s"'; 304, identifier:val_str; 305, identifier:dialect; 306, string; 307, identifier:key; 308, identifier:val_str; 309, subscript; 310, identifier:join; 311, list; 312, string; 313, identifier:join; 314, list_comprehension; 315, identifier:key; 316, list; 317, string_content:keyval separator; 318, identifier:dialect; 319, string; 320, identifier:key; 321, string:'""'; 322, subscript; 323, for_in_clause; 324, identifier:v; 325, string_content:keyval separator; 326, identifier:quoter; 327, identifier:j; 328, identifier:j; 329, identifier:i
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 7, 25; 7, 26; 8, 27; 8, 28; 9, 29; 10, 30; 10, 31; 11, 32; 11, 33; 12, 34; 14, 35; 14, 36; 14, 37; 16, 38; 16, 39; 16, 40; 17, 41; 17, 42; 17, 43; 17, 44; 17, 45; 18, 46; 18, 47; 19, 48; 19, 49; 19, 50; 19, 51; 21, 52; 23, 53; 23, 54; 24, 55; 30, 56; 31, 57; 32, 58; 33, 59; 34, 60; 34, 61; 35, 62; 35, 63; 36, 64; 37, 65; 38, 66; 38, 67; 39, 68; 39, 69; 40, 70; 42, 71; 45, 72; 47, 73; 48, 74; 48, 75; 51, 76; 51, 77; 52, 78; 52, 79; 53, 80; 53, 81; 54, 82; 57, 83; 59, 84; 62, 85; 62, 86; 63, 87; 63, 88; 64, 89; 65, 90; 65, 91; 67, 92; 68, 93; 69, 94; 69, 95; 69, 96; 70, 97; 72, 98; 72, 99; 73, 100; 76, 101; 76, 102; 76, 103; 77, 104; 79, 105; 79, 106; 81, 107; 82, 108; 83, 109; 83, 110; 87, 111; 87, 112; 88, 113; 89, 114; 89, 115; 90, 116; 91, 117; 91, 118; 91, 119; 93, 120; 93, 121; 94, 122; 94, 123; 95, 124; 95, 125; 96, 126; 97, 127; 98, 128; 99, 129; 99, 130; 100, 131; 100, 132; 102, 133; 102, 134; 102, 135; 103, 136; 104, 137; 104, 138; 105, 139; 105, 140; 106, 141; 108, 142; 108, 143; 112, 144; 116, 145; 116, 146; 117, 147; 117, 148; 118, 149; 118, 150; 119, 151; 119, 152; 124, 153; 124, 154; 126, 155; 126, 156; 126, 157; 127, 158; 127, 159; 128, 160; 130, 161; 131, 162; 131, 163; 132, 164; 133, 165; 133, 166; 134, 167; 135, 168; 135, 169; 135, 170; 136, 171; 137, 172; 137, 173; 138, 174; 139, 175; 139, 176; 143, 177; 149, 178; 149, 179; 151, 180; 152, 181; 152, 182; 152, 183; 155, 184; 155, 185; 156, 186; 157, 187; 159, 188; 159, 189; 160, 190; 160, 191; 161, 192; 164, 193; 164, 194; 166, 195; 167, 196; 167, 197; 170, 198; 170, 199; 170, 200; 171, 201; 171, 202; 171, 203; 176, 204; 180, 205; 180, 206; 183, 207; 184, 208; 184, 209; 186, 210; 186, 211; 186, 212; 187, 213; 189, 214; 190, 215; 190, 216; 191, 217; 195, 218; 197, 219; 197, 220; 198, 221; 198, 222; 200, 223; 201, 224; 201, 225; 202, 226; 203, 227; 205, 228; 205, 229; 207, 230; 209, 231; 212, 232; 213, 233; 214, 234; 214, 235; 215, 236; 215, 237; 217, 238; 217, 239; 218, 240; 218, 241; 219, 242; 219, 243; 220, 244; 221, 245; 221, 246; 222, 247; 223, 248; 223, 249; 224, 250; 224, 251; 225, 252; 226, 253; 227, 254; 230, 255; 230, 256; 232, 257; 233, 258; 233, 259; 234, 260; 234, 261; 237, 262; 241, 263; 241, 264; 242, 265; 242, 266; 246, 267; 247, 268; 249, 269; 249, 270; 251, 271; 253, 272; 253, 273; 254, 274; 255, 275; 255, 276; 256, 277; 257, 278; 257, 279; 258, 280; 258, 281; 259, 282; 264, 283; 266, 284; 268, 285; 268, 286; 269, 287; 269, 288; 270, 289; 273, 290; 273, 291; 274, 292; 274, 293; 275, 294; 275, 295; 277, 296; 277, 297; 278, 298; 278, 299; 279, 300; 282, 301; 282, 302; 286, 303; 286, 304; 287, 305; 287, 306; 289, 307; 289, 308; 290, 309; 290, 310; 291, 311; 296, 312; 296, 313; 297, 314; 300, 315; 300, 316; 306, 317; 309, 318; 309, 319; 311, 320; 311, 321; 314, 322; 314, 323; 316, 324; 319, 325; 322, 326; 322, 327; 323, 328; 323, 329
def _reconstruct(keyvals, dialect, keep_order=False, sort_attribute_values=False): """ Reconstructs the original attributes string according to the dialect. Parameters ========== keyvals : dict Attributes from a GFF/GTF feature dialect : dict Dialect containing info on how to reconstruct a string version of the attributes keep_order : bool If True, then perform sorting of attribute keys to ensure they are in the same order as those provided in the original file. Default is False, which saves time especially on large data sets. sort_attribute_values : bool If True, then sort values to ensure they will always be in the same order. Mostly only useful for testing; default is False. """ if not dialect: raise AttributeStringError() if not keyvals: return "" parts = [] # Re-encode when reconstructing attributes if constants.ignore_url_escape_characters or dialect['fmt'] != 'gff3': attributes = keyvals else: attributes = {} for k, v in keyvals.items(): attributes[k] = [] for i in v: attributes[k].append(''.join([quoter[j] for j in i])) # May need to split multiple values into multiple key/val pairs if dialect['repeated keys']: items = [] for key, val in attributes.items(): if len(val) > 1: for v in val: items.append((key, [v])) else: items.append((key, val)) else: items = list(attributes.items()) def sort_key(x): # sort keys by their order in the dialect; anything not in there will # be in arbitrary order at the end. try: return dialect['order'].index(x[0]) except ValueError: return 1e6 if keep_order: items.sort(key=sort_key) for key, val in items: # Multival sep is usually a comma: if val: if sort_attribute_values: val = sorted(val) val_str = dialect['multival separator'].join(val) if val_str: # Surround with quotes if needed if dialect['quoted GFF2 values']: val_str = '"%s"' % val_str # Typically "=" for GFF3 or " " otherwise part = dialect['keyval separator'].join([key, val_str]) else: if dialect['fmt'] == 'gtf': part = dialect['keyval separator'].join([key, '""']) else: part = key parts.append(part) # Typically ";" or "; " parts_str = dialect['field separator'].join(parts) # Sometimes need to add this if dialect['trailing semicolon']: parts_str += ';' return parts_str
0, module; 1, function_definition; 2, function_name:create_db; 3, parameters; 4, block; 5, identifier:data; 6, identifier:dbfn; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, default_parameter; 19, default_parameter; 20, default_parameter; 21, default_parameter; 22, default_parameter; 23, default_parameter; 24, default_parameter; 25, default_parameter; 26, default_parameter; 27, default_parameter; 28, default_parameter; 29, dictionary_splat_pattern; 30, expression_statement; 31, expression_statement; 32, comment:# Check if any older kwargs made it in; 33, expression_statement; 34, expression_statement; 35, comment:# First construct an iterator so that we can identify the file format.; 36, comment:# DataIterator figures out what kind of data was provided (string of lines,; 37, comment:# filename, or iterable of Features) and checks `checklines` lines to; 38, comment:# identify the dialect.; 39, expression_statement; 40, expression_statement; 41, if_statement; 42, comment:# However, a side-effect of this is that if `data` was a generator, then; 43, comment:# we've just consumed `checklines` items (see; 44, comment:# iterators.BaseIterator.__init__, which calls iterators.peek).; 45, comment:#; 46, comment:# But it also chains those consumed items back onto the beginning, and the; 47, comment:# result is available as as iterator._iter.; 48, comment:#; 49, comment:# That's what we should be using now for `data:; 50, expression_statement; 51, expression_statement; 52, comment:# Since we've already checked lines, we don't want to do it again; 53, expression_statement; 54, if_statement; 55, expression_statement; 56, expression_statement; 57, expression_statement; 58, expression_statement; 59, if_statement; 60, return_statement; 61, identifier:id_spec; 62, None; 63, identifier:force; 64, False; 65, identifier:verbose; 66, False; 67, identifier:checklines; 68, integer:10; 69, identifier:merge_strategy; 70, string; 71, identifier:transform; 72, None; 73, identifier:gtf_transcript_key; 74, string; 75, identifier:gtf_gene_key; 76, string; 77, identifier:gtf_subfeature; 78, string; 79, identifier:force_gff; 80, False; 81, identifier:force_dialect_check; 82, False; 83, identifier:from_string; 84, False; 85, identifier:keep_order; 86, False; 87, identifier:text_factory; 88, attribute; 89, identifier:force_merge_fields; 90, None; 91, identifier:pragmas; 92, attribute; 93, identifier:sort_attribute_values; 94, False; 95, identifier:dialect; 96, None; 97, identifier:_keep_tempfiles; 98, False; 99, identifier:infer_gene_extent; 100, True; 101, identifier:disable_infer_genes; 102, False; 103, identifier:disable_infer_transcripts; 104, False; 105, identifier:kwargs; 106, comment:""" Create a database from a GFF or GTF file. For more details on when and how to use the kwargs below, see the examples in the online documentation (:ref:`examples`). Parameters ---------- data : string or iterable If a string (and `from_string` is False), then `data` is the path to the original GFF or GTF file. If a string and `from_string` is True, then assume `data` is the actual data to use. Otherwise, it's an iterable of Feature objects. dbfn : string Path to the database that will be created. Can be the special string ":memory:" to create an in-memory database. id_spec : string, list, dict, callable, or None This parameter guides what will be used as the primary key for the database, which in turn determines how you will access individual features by name from the database. If `id_spec=None`, then auto-increment primary keys based on the feature type (e.g., "gene_1", "gene_2"). This is also the fallback behavior for the other values below. If `id_spec` is a string, then look for this key in the attributes. If it exists, then use its value as the primary key, otherwise autoincrement based on the feature type. For many GFF3 files, "ID" usually works well. If `id_spec` is a list or tuple of keys, then check for each one in order, using the first one found. For GFF3, this might be ["ID", "Name"], which would use the ID if it exists, otherwise the Name, otherwise autoincrement based on the feature type. If `id_spec` is a dictionary, then it is a mapping of feature types to what should be used as the ID. For example, for GTF files, `{'gene': 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values of this dictionary can also be a list, e.g., `{'gene': ['gene_id', 'geneID']}` If `id_spec` is a callable object, then it accepts a dictionary from the iterator and returns one of the following: * None (in which case the feature type will be auto-incremented) * string (which will be used as the primary key) * special string starting with "autoincrement:X", where "X" is a string that will be used for auto-incrementing. For example, if "autoincrement:chr10", then the first feature will be "chr10_1", the second "chr10_2", and so on. force : bool If `False` (default), then raise an exception if `dbfn` already exists. Use `force=True` to overwrite any existing databases. verbose : bool Report percent complete and other feedback on how the db creation is progressing. In order to report percent complete, the entire file needs to be read once to see how many items there are; for large files you may want to use `verbose=False` to avoid this. checklines : int Number of lines to check the dialect. merge_strategy : str One of {merge, create_unique, error, warning, replace}. This parameter specifies the behavior when two items have an identical primary key. Using `merge_strategy="merge"`, then there will be a single entry in the database, but the attributes of all features with the same primary key will be merged. Using `merge_strategy="create_unique"`, then the first entry will use the original primary key, but the second entry will have a unique, autoincremented primary key assigned to it Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID` exception will be raised. This means you will have to edit the file yourself to fix the duplicated IDs. Using `merge_strategy="warning"`, a warning will be printed to the logger, and the duplicate feature will be skipped. Using `merge_strategy="replace"` will replace the entire existing feature with the new feature. transform : callable Function (or other callable object) that accepts a `Feature` object and returns a (possibly modified) `Feature` object. gtf_transcript_key, gtf_gene_key : string Which attribute to use as the transcript ID and gene ID respectively for GTF files. Default is `transcript_id` and `gene_id` according to the GTF spec. gtf_subfeature : string Feature type to use as a "gene component" when inferring gene and transcript extents for GTF files. Default is `exon` according to the GTF spec. force_gff : bool If True, do not do automatic format detection -- only use GFF. force_dialect_check : bool If True, the dialect will be checkef for every feature (instead of just `checklines` features). This can be slow, but may be necessary for inconsistently-formatted input files. from_string : bool If True, then treat `data` as actual data (rather than the path to a file). keep_order : bool If True, all features returned from this instance will have the order of their attributes maintained. This can be turned on or off database-wide by setting the `keep_order` attribute or with this kwarg, or on a feature-by-feature basis by setting the `keep_order` attribute of an individual feature. Note that a single order of attributes will be used for all features. Specifically, the order will be determined by the order of attribute keys in the first `checklines` of the input data. See helpers._choose_dialect for more information on this. Default is False, since this includes a sorting step that can get time-consuming for many features. infer_gene_extent : bool DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and `disable_infer_genes` for more granular control. disable_infer_transcripts, disable_infer_genes : bool Only used for GTF files. By default -- and according to the GTF spec -- we assume that there are no transcript or gene features in the file. gffutils then infers the extent of each transcript based on its constituent exons and infers the extent of each gene bases on its constituent transcripts. This default behavior is problematic if the input file already contains transcript or gene features (like recent GENCODE GTF files for human), since 1) the work to infer extents is unnecessary, and 2) trying to insert an inferred feature back into the database triggers gffutils' feature-merging routines, which can get time consuming. The solution is to use `disable_infer_transcripts=True` if your GTF already has transcripts in it, and/or `disable_infer_genes=True` if it already has genes in it. This can result in dramatic (100x) speedup. Prior to version 0.8.4, setting `infer_gene_extents=False` would disable both transcript and gene inference simultaneously. As of version 0.8.4, these argument allow more granular control. force_merge_fields : list If merge_strategy="merge", then features will only be merged if their non-attribute values are identical (same chrom, source, start, stop, score, strand, phase). Using `force_merge_fields`, you can override this behavior to allow merges even when fields are different. This list can contain one or more of ['seqid', 'source', 'featuretype', 'score', 'strand', 'frame']. The resulting merged fields will be strings of comma-separated values. Note that 'start' and 'end' are not available, since these fields need to be integers. text_factory : callable Text factory to use for the sqlite3 database. See https://docs.python.org/2/library/\ sqlite3.html#sqlite3.Connection.text_factory for details. The default sqlite3.OptimizedUnicode will return Unicode objects only for non-ASCII data, and bytestrings otherwise. pragmas : dict Dictionary of pragmas used when creating the sqlite3 database. See http://www.sqlite.org/pragma.html for a list of available pragmas. The defaults are stored in constants.default_pragmas, which can be used as a template for supplying a custom dictionary. sort_attribute_values : bool All features returned from the database will have their attribute values sorted. Typically this is only useful for testing, since this can get time-consuming for large numbers of features. _keep_tempfiles : bool or string False by default to clean up intermediate tempfiles created during GTF import. If True, then keep these tempfile for testing or debugging. If string, then keep the tempfile for testing, but also use the string as the suffix fo the tempfile. This can be useful for testing in parallel environments. Returns ------- New :class:`FeatureDB` object. """; 107, assignment; 108, call; 109, assignment; 110, assignment; 111, call; 112, comparison_operator:dialect is None; 113, block; 114, assignment; 115, assignment; 116, assignment; 117, boolean_operator; 118, block; 119, elif_clause; 120, call; 121, assignment; 122, assignment; 123, call; 124, comparison_operator:dbfn == ':memory:'; 125, block; 126, else_clause; 127, identifier:db; 128, string_content:error; 129, string_content:transcript_id; 130, string_content:gene_id; 131, string_content:exon; 132, identifier:sqlite3; 133, identifier:OptimizedUnicode; 134, identifier:constants; 135, identifier:default_pragmas; 136, identifier:_locals; 137, call; 138, identifier:deprecation_handler; 139, argument_list; 140, identifier:kwargs; 141, call; 142, identifier:iterator; 143, call; 144, attribute; 145, argument_list; 146, identifier:dialect; 147, None; 148, expression_statement; 149, subscript; 150, attribute; 151, subscript; 152, attribute; 153, subscript; 154, integer:0; 155, identifier:force_gff; 156, parenthesized_expression; 157, expression_statement; 158, expression_statement; 159, expression_statement; 160, comparison_operator:dialect['fmt'] == 'gtf'; 161, block; 162, attribute; 163, argument_list; 164, subscript; 165, identifier:dialect; 166, identifier:c; 167, call; 168, attribute; 169, argument_list; 170, identifier:dbfn; 171, string; 172, expression_statement; 173, block; 174, identifier:locals; 175, argument_list; 176, identifier:kwargs; 177, identifier:dict; 178, generator_expression; 179, attribute; 180, argument_list; 181, identifier:kwargs; 182, identifier:update; 183, dictionary_splat; 184, assignment; 185, identifier:kwargs; 186, string; 187, identifier:iterator; 188, identifier:_iter; 189, identifier:kwargs; 190, string; 191, identifier:iterator; 192, identifier:directives; 193, identifier:kwargs; 194, string; 195, comparison_operator:dialect['fmt'] == 'gff3'; 196, assignment; 197, assignment; 198, assignment; 199, subscript; 200, string; 201, expression_statement; 202, expression_statement; 203, expression_statement; 204, identifier:kwargs; 205, identifier:update; 206, dictionary_splat; 207, identifier:kwargs; 208, string; 209, identifier:cls; 210, argument_list; 211, identifier:c; 212, identifier:create; 213, string_content::memory:; 214, assignment; 215, expression_statement; 216, tuple; 217, for_in_clause; 218, identifier:iterators; 219, identifier:DataIterator; 220, dictionary_splat; 221, identifier:_locals; 222, identifier:dialect; 223, attribute; 224, string_content:data; 225, string_content:directives; 226, string_content:checklines; 227, subscript; 228, string; 229, identifier:cls; 230, identifier:_GFFDBCreator; 231, identifier:id_spec; 232, boolean_operator; 233, identifier:add_kwargs; 234, call; 235, identifier:dialect; 236, string; 237, string_content:gtf; 238, assignment; 239, assignment; 240, assignment; 241, identifier:add_kwargs; 242, string_content:dialect; 243, dictionary_splat; 244, identifier:db; 245, call; 246, assignment; 247, identifier:i; 248, subscript; 249, identifier:i; 250, attribute; 251, identifier:kwargs; 252, identifier:iterator; 253, identifier:dialect; 254, identifier:dialect; 255, string; 256, string_content:gff3; 257, identifier:id_spec; 258, string; 259, identifier:dict; 260, argument_list; 261, string_content:fmt; 262, identifier:cls; 263, identifier:_GTFDBCreator; 264, identifier:id_spec; 265, boolean_operator; 266, identifier:add_kwargs; 267, call; 268, identifier:kwargs; 269, attribute; 270, argument_list; 271, identifier:db; 272, call; 273, identifier:_locals; 274, identifier:i; 275, identifier:constants; 276, identifier:_iterator_kwargs; 277, string_content:fmt; 278, string_content:ID; 279, keyword_argument; 280, identifier:id_spec; 281, dictionary; 282, identifier:dict; 283, argument_list; 284, identifier:interface; 285, identifier:FeatureDB; 286, attribute; 287, keyword_argument; 288, keyword_argument; 289, keyword_argument; 290, keyword_argument; 291, attribute; 292, argument_list; 293, identifier:id_spec; 294, identifier:id_spec; 295, pair; 296, pair; 297, keyword_argument; 298, keyword_argument; 299, keyword_argument; 300, keyword_argument; 301, identifier:c; 302, identifier:conn; 303, identifier:keep_order; 304, identifier:keep_order; 305, identifier:pragmas; 306, identifier:pragmas; 307, identifier:sort_attribute_values; 308, identifier:sort_attribute_values; 309, identifier:text_factory; 310, identifier:text_factory; 311, identifier:interface; 312, identifier:FeatureDB; 313, identifier:c; 314, keyword_argument; 315, keyword_argument; 316, keyword_argument; 317, keyword_argument; 318, string; 319, string; 320, string; 321, string; 322, identifier:transcript_key; 323, identifier:gtf_transcript_key; 324, identifier:gene_key; 325, identifier:gtf_gene_key; 326, identifier:subfeature; 327, identifier:gtf_subfeature; 328, identifier:id_spec; 329, identifier:id_spec; 330, identifier:keep_order; 331, identifier:keep_order; 332, identifier:pragmas; 333, identifier:pragmas; 334, identifier:sort_attribute_values; 335, identifier:sort_attribute_values; 336, identifier:text_factory; 337, identifier:text_factory; 338, string_content:gene; 339, string_content:gene_id; 340, string_content:transcript; 341, string_content:transcript_id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 3, 18; 3, 19; 3, 20; 3, 21; 3, 22; 3, 23; 3, 24; 3, 25; 3, 26; 3, 27; 3, 28; 3, 29; 4, 30; 4, 31; 4, 32; 4, 33; 4, 34; 4, 35; 4, 36; 4, 37; 4, 38; 4, 39; 4, 40; 4, 41; 4, 42; 4, 43; 4, 44; 4, 45; 4, 46; 4, 47; 4, 48; 4, 49; 4, 50; 4, 51; 4, 52; 4, 53; 4, 54; 4, 55; 4, 56; 4, 57; 4, 58; 4, 59; 4, 60; 7, 61; 7, 62; 8, 63; 8, 64; 9, 65; 9, 66; 10, 67; 10, 68; 11, 69; 11, 70; 12, 71; 12, 72; 13, 73; 13, 74; 14, 75; 14, 76; 15, 77; 15, 78; 16, 79; 16, 80; 17, 81; 17, 82; 18, 83; 18, 84; 19, 85; 19, 86; 20, 87; 20, 88; 21, 89; 21, 90; 22, 91; 22, 92; 23, 93; 23, 94; 24, 95; 24, 96; 25, 97; 25, 98; 26, 99; 26, 100; 27, 101; 27, 102; 28, 103; 28, 104; 29, 105; 30, 106; 31, 107; 33, 108; 34, 109; 39, 110; 40, 111; 41, 112; 41, 113; 50, 114; 51, 115; 53, 116; 54, 117; 54, 118; 54, 119; 55, 120; 56, 121; 57, 122; 58, 123; 59, 124; 59, 125; 59, 126; 60, 127; 70, 128; 74, 129; 76, 130; 78, 131; 88, 132; 88, 133; 92, 134; 92, 135; 107, 136; 107, 137; 108, 138; 108, 139; 109, 140; 109, 141; 110, 142; 110, 143; 111, 144; 111, 145; 112, 146; 112, 147; 113, 148; 114, 149; 114, 150; 115, 151; 115, 152; 116, 153; 116, 154; 117, 155; 117, 156; 118, 157; 118, 158; 118, 159; 119, 160; 119, 161; 120, 162; 120, 163; 121, 164; 121, 165; 122, 166; 122, 167; 123, 168; 123, 169; 124, 170; 124, 171; 125, 172; 126, 173; 137, 174; 137, 175; 139, 176; 141, 177; 141, 178; 143, 179; 143, 180; 144, 181; 144, 182; 145, 183; 148, 184; 149, 185; 149, 186; 150, 187; 150, 188; 151, 189; 151, 190; 152, 191; 152, 192; 153, 193; 153, 194; 156, 195; 157, 196; 158, 197; 159, 198; 160, 199; 160, 200; 161, 201; 161, 202; 161, 203; 162, 204; 162, 205; 163, 206; 164, 207; 164, 208; 167, 209; 167, 210; 168, 211; 168, 212; 171, 213; 172, 214; 173, 215; 178, 216; 178, 217; 179, 218; 179, 219; 180, 220; 183, 221; 184, 222; 184, 223; 186, 224; 190, 225; 194, 226; 195, 227; 195, 228; 196, 229; 196, 230; 197, 231; 197, 232; 198, 233; 198, 234; 199, 235; 199, 236; 200, 237; 201, 238; 202, 239; 203, 240; 206, 241; 208, 242; 210, 243; 214, 244; 214, 245; 215, 246; 216, 247; 216, 248; 217, 249; 217, 250; 220, 251; 223, 252; 223, 253; 227, 254; 227, 255; 228, 256; 232, 257; 232, 258; 234, 259; 234, 260; 236, 261; 238, 262; 238, 263; 239, 264; 239, 265; 240, 266; 240, 267; 243, 268; 245, 269; 245, 270; 246, 271; 246, 272; 248, 273; 248, 274; 250, 275; 250, 276; 255, 277; 258, 278; 260, 279; 265, 280; 265, 281; 267, 282; 267, 283; 269, 284; 269, 285; 270, 286; 270, 287; 270, 288; 270, 289; 270, 290; 272, 291; 272, 292; 279, 293; 279, 294; 281, 295; 281, 296; 283, 297; 283, 298; 283, 299; 283, 300; 286, 301; 286, 302; 287, 303; 287, 304; 288, 305; 288, 306; 289, 307; 289, 308; 290, 309; 290, 310; 291, 311; 291, 312; 292, 313; 292, 314; 292, 315; 292, 316; 292, 317; 295, 318; 295, 319; 296, 320; 296, 321; 297, 322; 297, 323; 298, 324; 298, 325; 299, 326; 299, 327; 300, 328; 300, 329; 314, 330; 314, 331; 315, 332; 315, 333; 316, 334; 316, 335; 317, 336; 317, 337; 318, 338; 319, 339; 320, 340; 321, 341
def create_db(data, dbfn, id_spec=None, force=False, verbose=False, checklines=10, merge_strategy='error', transform=None, gtf_transcript_key='transcript_id', gtf_gene_key='gene_id', gtf_subfeature='exon', force_gff=False, force_dialect_check=False, from_string=False, keep_order=False, text_factory=sqlite3.OptimizedUnicode, force_merge_fields=None, pragmas=constants.default_pragmas, sort_attribute_values=False, dialect=None, _keep_tempfiles=False, infer_gene_extent=True, disable_infer_genes=False, disable_infer_transcripts=False, **kwargs): """ Create a database from a GFF or GTF file. For more details on when and how to use the kwargs below, see the examples in the online documentation (:ref:`examples`). Parameters ---------- data : string or iterable If a string (and `from_string` is False), then `data` is the path to the original GFF or GTF file. If a string and `from_string` is True, then assume `data` is the actual data to use. Otherwise, it's an iterable of Feature objects. dbfn : string Path to the database that will be created. Can be the special string ":memory:" to create an in-memory database. id_spec : string, list, dict, callable, or None This parameter guides what will be used as the primary key for the database, which in turn determines how you will access individual features by name from the database. If `id_spec=None`, then auto-increment primary keys based on the feature type (e.g., "gene_1", "gene_2"). This is also the fallback behavior for the other values below. If `id_spec` is a string, then look for this key in the attributes. If it exists, then use its value as the primary key, otherwise autoincrement based on the feature type. For many GFF3 files, "ID" usually works well. If `id_spec` is a list or tuple of keys, then check for each one in order, using the first one found. For GFF3, this might be ["ID", "Name"], which would use the ID if it exists, otherwise the Name, otherwise autoincrement based on the feature type. If `id_spec` is a dictionary, then it is a mapping of feature types to what should be used as the ID. For example, for GTF files, `{'gene': 'gene_id', 'transcript': 'transcript_id'}` may be useful. The values of this dictionary can also be a list, e.g., `{'gene': ['gene_id', 'geneID']}` If `id_spec` is a callable object, then it accepts a dictionary from the iterator and returns one of the following: * None (in which case the feature type will be auto-incremented) * string (which will be used as the primary key) * special string starting with "autoincrement:X", where "X" is a string that will be used for auto-incrementing. For example, if "autoincrement:chr10", then the first feature will be "chr10_1", the second "chr10_2", and so on. force : bool If `False` (default), then raise an exception if `dbfn` already exists. Use `force=True` to overwrite any existing databases. verbose : bool Report percent complete and other feedback on how the db creation is progressing. In order to report percent complete, the entire file needs to be read once to see how many items there are; for large files you may want to use `verbose=False` to avoid this. checklines : int Number of lines to check the dialect. merge_strategy : str One of {merge, create_unique, error, warning, replace}. This parameter specifies the behavior when two items have an identical primary key. Using `merge_strategy="merge"`, then there will be a single entry in the database, but the attributes of all features with the same primary key will be merged. Using `merge_strategy="create_unique"`, then the first entry will use the original primary key, but the second entry will have a unique, autoincremented primary key assigned to it Using `merge_strategy="error"`, a :class:`gffutils.DuplicateID` exception will be raised. This means you will have to edit the file yourself to fix the duplicated IDs. Using `merge_strategy="warning"`, a warning will be printed to the logger, and the duplicate feature will be skipped. Using `merge_strategy="replace"` will replace the entire existing feature with the new feature. transform : callable Function (or other callable object) that accepts a `Feature` object and returns a (possibly modified) `Feature` object. gtf_transcript_key, gtf_gene_key : string Which attribute to use as the transcript ID and gene ID respectively for GTF files. Default is `transcript_id` and `gene_id` according to the GTF spec. gtf_subfeature : string Feature type to use as a "gene component" when inferring gene and transcript extents for GTF files. Default is `exon` according to the GTF spec. force_gff : bool If True, do not do automatic format detection -- only use GFF. force_dialect_check : bool If True, the dialect will be checkef for every feature (instead of just `checklines` features). This can be slow, but may be necessary for inconsistently-formatted input files. from_string : bool If True, then treat `data` as actual data (rather than the path to a file). keep_order : bool If True, all features returned from this instance will have the order of their attributes maintained. This can be turned on or off database-wide by setting the `keep_order` attribute or with this kwarg, or on a feature-by-feature basis by setting the `keep_order` attribute of an individual feature. Note that a single order of attributes will be used for all features. Specifically, the order will be determined by the order of attribute keys in the first `checklines` of the input data. See helpers._choose_dialect for more information on this. Default is False, since this includes a sorting step that can get time-consuming for many features. infer_gene_extent : bool DEPRECATED in version 0.8.4. See `disable_infer_transcripts` and `disable_infer_genes` for more granular control. disable_infer_transcripts, disable_infer_genes : bool Only used for GTF files. By default -- and according to the GTF spec -- we assume that there are no transcript or gene features in the file. gffutils then infers the extent of each transcript based on its constituent exons and infers the extent of each gene bases on its constituent transcripts. This default behavior is problematic if the input file already contains transcript or gene features (like recent GENCODE GTF files for human), since 1) the work to infer extents is unnecessary, and 2) trying to insert an inferred feature back into the database triggers gffutils' feature-merging routines, which can get time consuming. The solution is to use `disable_infer_transcripts=True` if your GTF already has transcripts in it, and/or `disable_infer_genes=True` if it already has genes in it. This can result in dramatic (100x) speedup. Prior to version 0.8.4, setting `infer_gene_extents=False` would disable both transcript and gene inference simultaneously. As of version 0.8.4, these argument allow more granular control. force_merge_fields : list If merge_strategy="merge", then features will only be merged if their non-attribute values are identical (same chrom, source, start, stop, score, strand, phase). Using `force_merge_fields`, you can override this behavior to allow merges even when fields are different. This list can contain one or more of ['seqid', 'source', 'featuretype', 'score', 'strand', 'frame']. The resulting merged fields will be strings of comma-separated values. Note that 'start' and 'end' are not available, since these fields need to be integers. text_factory : callable Text factory to use for the sqlite3 database. See https://docs.python.org/2/library/\ sqlite3.html#sqlite3.Connection.text_factory for details. The default sqlite3.OptimizedUnicode will return Unicode objects only for non-ASCII data, and bytestrings otherwise. pragmas : dict Dictionary of pragmas used when creating the sqlite3 database. See http://www.sqlite.org/pragma.html for a list of available pragmas. The defaults are stored in constants.default_pragmas, which can be used as a template for supplying a custom dictionary. sort_attribute_values : bool All features returned from the database will have their attribute values sorted. Typically this is only useful for testing, since this can get time-consuming for large numbers of features. _keep_tempfiles : bool or string False by default to clean up intermediate tempfiles created during GTF import. If True, then keep these tempfile for testing or debugging. If string, then keep the tempfile for testing, but also use the string as the suffix fo the tempfile. This can be useful for testing in parallel environments. Returns ------- New :class:`FeatureDB` object. """ _locals = locals() # Check if any older kwargs made it in deprecation_handler(kwargs) kwargs = dict((i, _locals[i]) for i in constants._iterator_kwargs) # First construct an iterator so that we can identify the file format. # DataIterator figures out what kind of data was provided (string of lines, # filename, or iterable of Features) and checks `checklines` lines to # identify the dialect. iterator = iterators.DataIterator(**kwargs) kwargs.update(**_locals) if dialect is None: dialect = iterator.dialect # However, a side-effect of this is that if `data` was a generator, then # we've just consumed `checklines` items (see # iterators.BaseIterator.__init__, which calls iterators.peek). # # But it also chains those consumed items back onto the beginning, and the # result is available as as iterator._iter. # # That's what we should be using now for `data: kwargs['data'] = iterator._iter kwargs['directives'] = iterator.directives # Since we've already checked lines, we don't want to do it again kwargs['checklines'] = 0 if force_gff or (dialect['fmt'] == 'gff3'): cls = _GFFDBCreator id_spec = id_spec or 'ID' add_kwargs = dict( id_spec=id_spec, ) elif dialect['fmt'] == 'gtf': cls = _GTFDBCreator id_spec = id_spec or {'gene': 'gene_id', 'transcript': 'transcript_id'} add_kwargs = dict( transcript_key=gtf_transcript_key, gene_key=gtf_gene_key, subfeature=gtf_subfeature, id_spec=id_spec, ) kwargs.update(**add_kwargs) kwargs['dialect'] = dialect c = cls(**kwargs) c.create() if dbfn == ':memory:': db = interface.FeatureDB(c.conn, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) else: db = interface.FeatureDB(c, keep_order=keep_order, pragmas=pragmas, sort_attribute_values=sort_attribute_values, text_factory=text_factory) return db
0, module; 1, function_definition; 2, function_name:order_by_on_list; 3, parameters; 4, block; 5, identifier:objects; 6, identifier:order_field; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, function_definition; 11, expression_statement; 12, identifier:is_desc; 13, False; 14, comment:""" Utility function to sort objects django-style even for non-query set collections :param objects: list of objects to sort :param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable. :param is_desc: reverse the sorting :return: """; 15, call; 16, block; 17, function_name:order_key; 18, parameters; 19, block; 20, call; 21, identifier:callable; 22, argument_list; 23, expression_statement; 24, return_statement; 25, identifier:x; 26, expression_statement; 27, if_statement; 28, return_statement; 29, attribute; 30, argument_list; 31, identifier:order_field; 32, call; 33, assignment; 34, comparison_operator:v is None; 35, block; 36, identifier:v; 37, identifier:objects; 38, identifier:sort; 39, keyword_argument; 40, keyword_argument; 41, attribute; 42, argument_list; 43, identifier:v; 44, call; 45, identifier:v; 46, None; 47, return_statement; 48, identifier:key; 49, identifier:order_key; 50, identifier:reverse; 51, identifier:is_desc; 52, identifier:objects; 53, identifier:sort; 54, keyword_argument; 55, keyword_argument; 56, identifier:getattr_path; 57, argument_list; 58, identifier:MIN; 59, identifier:key; 60, identifier:order_field; 61, identifier:reverse; 62, identifier:is_desc; 63, identifier:x; 64, identifier:order_field
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 9, 15; 9, 16; 10, 17; 10, 18; 10, 19; 11, 20; 15, 21; 15, 22; 16, 23; 16, 24; 18, 25; 19, 26; 19, 27; 19, 28; 20, 29; 20, 30; 22, 31; 23, 32; 26, 33; 27, 34; 27, 35; 28, 36; 29, 37; 29, 38; 30, 39; 30, 40; 32, 41; 32, 42; 33, 43; 33, 44; 34, 45; 34, 46; 35, 47; 39, 48; 39, 49; 40, 50; 40, 51; 41, 52; 41, 53; 42, 54; 42, 55; 44, 56; 44, 57; 47, 58; 54, 59; 54, 60; 55, 61; 55, 62; 57, 63; 57, 64
def order_by_on_list(objects, order_field, is_desc=False): """ Utility function to sort objects django-style even for non-query set collections :param objects: list of objects to sort :param order_field: field name, follows django conventions, so "foo__bar" means `foo.bar`, can be a callable. :param is_desc: reverse the sorting :return: """ if callable(order_field): objects.sort(key=order_field, reverse=is_desc) return def order_key(x): v = getattr_path(x, order_field) if v is None: return MIN return v objects.sort(key=order_key, reverse=is_desc)
0, module; 1, function_definition; 2, function_name:render_table; 3, parameters; 4, block; 5, identifier:request; 6, identifier:table; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, comment:# pragma: no mutate; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, default_parameter; 18, expression_statement; 19, if_statement; 20, if_statement; 21, assert_statement; 22, expression_statement; 23, expression_statement; 24, if_statement; 25, expression_statement; 26, expression_statement; 27, expression_statement; 28, if_statement; 29, expression_statement; 30, if_statement; 31, if_statement; 32, return_statement; 33, identifier:links; 34, None; 35, identifier:context; 36, None; 37, identifier:template; 38, string; 39, identifier:blank_on_empty; 40, False; 41, identifier:paginate_by; 42, integer:40; 43, identifier:page; 44, None; 45, identifier:paginator; 46, None; 47, identifier:show_hits; 48, False; 49, identifier:hit_label; 50, string; 51, identifier:post_bulk_edit; 52, lambda; 53, comment:""" Render a table. This automatically handles pagination, sorting, filtering and bulk operations. :param request: the request object. This is set on the table object so that it is available for lambda expressions. :param table: an instance of Table :param links: a list of instances of Link :param context: dict of extra context parameters :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance. :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty :param show_hits: Display how many items there are total in the paginator. :param hit_label: Label for the show_hits display. :return: a string with the rendered HTML table """; 54, not_operator; 55, block; 56, call; 57, block; 58, call; 59, identifier:table; 60, assignment; 61, assignment; 62, identifier:should_return; 63, block; 64, assignment; 65, assignment; 66, assignment; 67, boolean_operator; 68, block; 69, assignment; 70, boolean_operator; 71, block; 72, boolean_operator; 73, block; 74, call; 75, string_content:tri_table/list.html; 76, string_content:Items; 77, lambda_parameters; 78, None; 79, identifier:context; 80, expression_statement; 81, identifier:isinstance; 82, argument_list; 83, expression_statement; 84, identifier:isinstance; 85, argument_list; 86, attribute; 87, identifier:request; 88, pattern_list; 89, call; 90, return_statement; 91, subscript; 92, attribute; 93, subscript; 94, attribute; 95, subscript; 96, attribute; 97, attribute; 98, comparison_operator:request.method == 'POST'; 99, if_statement; 100, attribute; 101, call; 102, not_operator; 103, identifier:blank_on_empty; 104, return_statement; 105, attribute; 106, not_operator; 107, expression_statement; 108, expression_statement; 109, identifier:render_template; 110, argument_list; 111, identifier:table; 112, identifier:queryset; 113, identifier:updates; 114, assignment; 115, identifier:table; 116, identifier:Namespace; 117, assignment; 118, identifier:table; 119, identifier:Table; 120, identifier:table; 121, identifier:request; 122, identifier:should_return; 123, identifier:dispatch_result; 124, identifier:handle_dispatch; 125, argument_list; 126, identifier:dispatch_result; 127, identifier:context; 128, string; 129, identifier:table; 130, identifier:bulk_form; 131, identifier:context; 132, string; 133, identifier:table; 134, identifier:query_form; 135, identifier:context; 136, string; 137, identifier:table; 138, identifier:query_error; 139, identifier:table; 140, identifier:bulk_form; 141, attribute; 142, string; 143, call; 144, block; 145, identifier:table; 146, identifier:context; 147, identifier:table_context; 148, argument_list; 149, attribute; 150, string; 151, identifier:table; 152, identifier:query_form; 153, call; 154, assignment; 155, assignment; 156, identifier:request; 157, identifier:template; 158, attribute; 159, identifier:context; 160, dictionary; 161, identifier:table; 162, call; 163, keyword_argument; 164, keyword_argument; 165, string_content:bulk_form; 166, string_content:query_form; 167, string_content:tri_query_error; 168, identifier:request; 169, identifier:method; 170, string_content:POST; 171, attribute; 172, argument_list; 173, expression_statement; 174, expression_statement; 175, expression_statement; 176, expression_statement; 177, return_statement; 178, identifier:request; 179, keyword_argument; 180, keyword_argument; 181, keyword_argument; 182, keyword_argument; 183, keyword_argument; 184, keyword_argument; 185, keyword_argument; 186, keyword_argument; 187, identifier:table; 188, identifier:data; 189, attribute; 190, argument_list; 191, attribute; 192, None; 193, subscript; 194, call; 195, identifier:table; 196, identifier:context; 197, identifier:table; 198, argument_list; 199, identifier:request; 200, identifier:request; 201, identifier:obj; 202, identifier:table; 203, attribute; 204, identifier:is_valid; 205, assignment; 206, assignment; 207, call; 208, call; 209, call; 210, identifier:table; 211, identifier:table; 212, identifier:links; 213, identifier:links; 214, identifier:paginate_by; 215, identifier:paginate_by; 216, identifier:page; 217, identifier:page; 218, identifier:extra_context; 219, identifier:context; 220, identifier:paginator; 221, identifier:paginator; 222, identifier:show_hits; 223, identifier:show_hits; 224, identifier:hit_label; 225, identifier:hit_label; 226, attribute; 227, identifier:is_valid; 228, identifier:table; 229, identifier:data; 230, attribute; 231, string; 232, identifier:mark_safe; 233, argument_list; 234, identifier:table; 235, identifier:bulk_form; 236, identifier:queryset; 237, call; 238, identifier:updates; 239, dictionary_comprehension; 240, attribute; 241, argument_list; 242, identifier:post_bulk_edit; 243, argument_list; 244, identifier:HttpResponseRedirect; 245, argument_list; 246, identifier:table; 247, identifier:query_form; 248, identifier:table; 249, identifier:context; 250, string_content:invalid_form_message; 251, string:'<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>'; 252, attribute; 253, argument_list; 254, pair; 255, for_in_clause; 256, if_clause; 257, identifier:queryset; 258, identifier:update; 259, dictionary_splat; 260, keyword_argument; 261, keyword_argument; 262, keyword_argument; 263, subscript; 264, identifier:table; 265, identifier:bulk_queryset; 266, attribute; 267, attribute; 268, identifier:field; 269, attribute; 270, boolean_operator; 271, identifier:updates; 272, identifier:table; 273, identifier:table; 274, identifier:queryset; 275, identifier:queryset; 276, identifier:updates; 277, identifier:updates; 278, attribute; 279, string; 280, identifier:field; 281, identifier:name; 282, identifier:field; 283, identifier:value; 284, attribute; 285, identifier:fields; 286, boolean_operator; 287, comparison_operator:field.attr is not None; 288, identifier:request; 289, identifier:META; 290, string_content:HTTP_REFERER; 291, identifier:table; 292, identifier:bulk_form; 293, comparison_operator:field.value is not None; 294, comparison_operator:field.value != ''; 295, attribute; 296, None; 297, attribute; 298, None; 299, attribute; 300, string; 301, identifier:field; 302, identifier:attr; 303, identifier:field; 304, identifier:value; 305, identifier:field; 306, identifier:value
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 3, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 4, 32; 7, 33; 7, 34; 8, 35; 8, 36; 9, 37; 9, 38; 10, 39; 10, 40; 11, 41; 11, 42; 13, 43; 13, 44; 14, 45; 14, 46; 15, 47; 15, 48; 16, 49; 16, 50; 17, 51; 17, 52; 18, 53; 19, 54; 19, 55; 20, 56; 20, 57; 21, 58; 21, 59; 22, 60; 23, 61; 24, 62; 24, 63; 25, 64; 26, 65; 27, 66; 28, 67; 28, 68; 29, 69; 30, 70; 30, 71; 31, 72; 31, 73; 32, 74; 38, 75; 50, 76; 52, 77; 52, 78; 54, 79; 55, 80; 56, 81; 56, 82; 57, 83; 58, 84; 58, 85; 60, 86; 60, 87; 61, 88; 61, 89; 63, 90; 64, 91; 64, 92; 65, 93; 65, 94; 66, 95; 66, 96; 67, 97; 67, 98; 68, 99; 69, 100; 69, 101; 70, 102; 70, 103; 71, 104; 72, 105; 72, 106; 73, 107; 73, 108; 74, 109; 74, 110; 77, 111; 77, 112; 77, 113; 80, 114; 82, 115; 82, 116; 83, 117; 85, 118; 85, 119; 86, 120; 86, 121; 88, 122; 88, 123; 89, 124; 89, 125; 90, 126; 91, 127; 91, 128; 92, 129; 92, 130; 93, 131; 93, 132; 94, 133; 94, 134; 95, 135; 95, 136; 96, 137; 96, 138; 97, 139; 97, 140; 98, 141; 98, 142; 99, 143; 99, 144; 100, 145; 100, 146; 101, 147; 101, 148; 102, 149; 104, 150; 105, 151; 105, 152; 106, 153; 107, 154; 108, 155; 110, 156; 110, 157; 110, 158; 114, 159; 114, 160; 117, 161; 117, 162; 125, 163; 125, 164; 128, 165; 132, 166; 136, 167; 141, 168; 141, 169; 142, 170; 143, 171; 143, 172; 144, 173; 144, 174; 144, 175; 144, 176; 144, 177; 148, 178; 148, 179; 148, 180; 148, 181; 148, 182; 148, 183; 148, 184; 148, 185; 148, 186; 149, 187; 149, 188; 153, 189; 153, 190; 154, 191; 154, 192; 155, 193; 155, 194; 158, 195; 158, 196; 162, 197; 162, 198; 163, 199; 163, 200; 164, 201; 164, 202; 171, 203; 171, 204; 173, 205; 174, 206; 175, 207; 176, 208; 177, 209; 179, 210; 179, 211; 180, 212; 180, 213; 181, 214; 181, 215; 182, 216; 182, 217; 183, 218; 183, 219; 184, 220; 184, 221; 185, 222; 185, 223; 186, 224; 186, 225; 189, 226; 189, 227; 191, 228; 191, 229; 193, 230; 193, 231; 194, 232; 194, 233; 203, 234; 203, 235; 205, 236; 205, 237; 206, 238; 206, 239; 207, 240; 207, 241; 208, 242; 208, 243; 209, 244; 209, 245; 226, 246; 226, 247; 230, 248; 230, 249; 231, 250; 233, 251; 237, 252; 237, 253; 239, 254; 239, 255; 239, 256; 240, 257; 240, 258; 241, 259; 243, 260; 243, 261; 243, 262; 245, 263; 252, 264; 252, 265; 254, 266; 254, 267; 255, 268; 255, 269; 256, 270; 259, 271; 260, 272; 260, 273; 261, 274; 261, 275; 262, 276; 262, 277; 263, 278; 263, 279; 266, 280; 266, 281; 267, 282; 267, 283; 269, 284; 269, 285; 270, 286; 270, 287; 278, 288; 278, 289; 279, 290; 284, 291; 284, 292; 286, 293; 286, 294; 287, 295; 287, 296; 293, 297; 293, 298; 294, 299; 294, 300; 295, 301; 295, 302; 297, 303; 297, 304; 299, 305; 299, 306
def render_table(request, table, links=None, context=None, template='tri_table/list.html', blank_on_empty=False, paginate_by=40, # pragma: no mutate page=None, paginator=None, show_hits=False, hit_label='Items', post_bulk_edit=lambda table, queryset, updates: None): """ Render a table. This automatically handles pagination, sorting, filtering and bulk operations. :param request: the request object. This is set on the table object so that it is available for lambda expressions. :param table: an instance of Table :param links: a list of instances of Link :param context: dict of extra context parameters :param template: if you need to render the table differently you can override this parameter with either a name of a template to load or a `Template` instance. :param blank_on_empty: turn off the displaying of `{{ empty_message }}` in the template when the list is empty :param show_hits: Display how many items there are total in the paginator. :param hit_label: Label for the show_hits display. :return: a string with the rendered HTML table """ if not context: context = {} if isinstance(table, Namespace): table = table() assert isinstance(table, Table), table table.request = request should_return, dispatch_result = handle_dispatch(request=request, obj=table) if should_return: return dispatch_result context['bulk_form'] = table.bulk_form context['query_form'] = table.query_form context['tri_query_error'] = table.query_error if table.bulk_form and request.method == 'POST': if table.bulk_form.is_valid(): queryset = table.bulk_queryset() updates = { field.name: field.value for field in table.bulk_form.fields if field.value is not None and field.value != '' and field.attr is not None } queryset.update(**updates) post_bulk_edit(table=table, queryset=queryset, updates=updates) return HttpResponseRedirect(request.META['HTTP_REFERER']) table.context = table_context( request, table=table, links=links, paginate_by=paginate_by, page=page, extra_context=context, paginator=paginator, show_hits=show_hits, hit_label=hit_label, ) if not table.data and blank_on_empty: return '' if table.query_form and not table.query_form.is_valid(): table.data = None table.context['invalid_form_message'] = mark_safe('<i class="fa fa-meh-o fa-5x" aria-hidden="true"></i>') return render_template(request, template, table.context)
0, module; 1, function_definition; 2, function_name:assemble_concatenated_meta; 3, parameters; 4, block; 5, identifier:concated_meta_dfs; 6, identifier:remove_all_metadata_fields; 7, expression_statement; 8, comment:# Concatenate the concated_meta_dfs; 9, if_statement; 10, expression_statement; 11, comment:# Sanity check: the number of rows in all_concated_meta_df should correspond; 12, comment:# to the sum of the number of rows in the input dfs; 13, expression_statement; 14, expression_statement; 15, expression_statement; 16, assert_statement; 17, comment:# Sort the index and columns; 18, expression_statement; 19, return_statement; 20, comment:""" Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df) """; 21, identifier:remove_all_metadata_fields; 22, block; 23, assignment; 24, assignment; 25, call; 26, assignment; 27, comparison_operator:n_rows == n_rows_cumulative; 28, assignment; 29, identifier:all_concated_meta_df_sorted; 30, for_statement; 31, identifier:all_concated_meta_df; 32, call; 33, identifier:n_rows; 34, subscript; 35, attribute; 36, argument_list; 37, identifier:n_rows_cumulative; 38, call; 39, identifier:n_rows; 40, identifier:n_rows_cumulative; 41, identifier:all_concated_meta_df_sorted; 42, call; 43, identifier:df; 44, identifier:concated_meta_dfs; 45, block; 46, attribute; 47, argument_list; 48, attribute; 49, integer:0; 50, identifier:logger; 51, identifier:debug; 52, call; 53, identifier:sum; 54, argument_list; 55, attribute; 56, argument_list; 57, expression_statement; 58, identifier:pd; 59, identifier:concat; 60, identifier:concated_meta_dfs; 61, keyword_argument; 62, identifier:all_concated_meta_df; 63, identifier:shape; 64, attribute; 65, argument_list; 66, list_comprehension; 67, call; 68, identifier:sort_index; 69, keyword_argument; 70, call; 71, identifier:axis; 72, integer:0; 73, string:"all_concated_meta_df.shape[0]: {}"; 74, identifier:format; 75, identifier:n_rows; 76, subscript; 77, for_in_clause; 78, attribute; 79, argument_list; 80, identifier:axis; 81, integer:1; 82, attribute; 83, argument_list; 84, attribute; 85, integer:0; 86, identifier:df; 87, identifier:concated_meta_dfs; 88, identifier:all_concated_meta_df; 89, identifier:sort_index; 90, keyword_argument; 91, identifier:df; 92, identifier:drop; 93, attribute; 94, keyword_argument; 95, keyword_argument; 96, identifier:df; 97, identifier:shape; 98, identifier:axis; 99, integer:0; 100, identifier:df; 101, identifier:columns; 102, identifier:axis; 103, integer:1; 104, identifier:inplace; 105, True
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 7, 20; 9, 21; 9, 22; 10, 23; 13, 24; 14, 25; 15, 26; 16, 27; 18, 28; 19, 29; 22, 30; 23, 31; 23, 32; 24, 33; 24, 34; 25, 35; 25, 36; 26, 37; 26, 38; 27, 39; 27, 40; 28, 41; 28, 42; 30, 43; 30, 44; 30, 45; 32, 46; 32, 47; 34, 48; 34, 49; 35, 50; 35, 51; 36, 52; 38, 53; 38, 54; 42, 55; 42, 56; 45, 57; 46, 58; 46, 59; 47, 60; 47, 61; 48, 62; 48, 63; 52, 64; 52, 65; 54, 66; 55, 67; 55, 68; 56, 69; 57, 70; 61, 71; 61, 72; 64, 73; 64, 74; 65, 75; 66, 76; 66, 77; 67, 78; 67, 79; 69, 80; 69, 81; 70, 82; 70, 83; 76, 84; 76, 85; 77, 86; 77, 87; 78, 88; 78, 89; 79, 90; 82, 91; 82, 92; 83, 93; 83, 94; 83, 95; 84, 96; 84, 97; 90, 98; 90, 99; 93, 100; 93, 101; 94, 102; 94, 103; 95, 104; 95, 105
def assemble_concatenated_meta(concated_meta_dfs, remove_all_metadata_fields): """ Assemble the concatenated metadata dfs together. For example, if horizontally concatenating, the concatenated metadata dfs are the column metadata dfs. Both indices are sorted. Args: concated_meta_dfs (list of pandas dfs) Returns: all_concated_meta_df_sorted (pandas df) """ # Concatenate the concated_meta_dfs if remove_all_metadata_fields: for df in concated_meta_dfs: df.drop(df.columns, axis=1, inplace=True) all_concated_meta_df = pd.concat(concated_meta_dfs, axis=0) # Sanity check: the number of rows in all_concated_meta_df should correspond # to the sum of the number of rows in the input dfs n_rows = all_concated_meta_df.shape[0] logger.debug("all_concated_meta_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in concated_meta_dfs]) assert n_rows == n_rows_cumulative # Sort the index and columns all_concated_meta_df_sorted = all_concated_meta_df.sort_index(axis=0).sort_index(axis=1) return all_concated_meta_df_sorted
0, module; 1, function_definition; 2, function_name:assemble_data; 3, parameters; 4, block; 5, identifier:data_dfs; 6, identifier:concat_direction; 7, expression_statement; 8, if_statement; 9, comment:# Sort both indices; 10, expression_statement; 11, return_statement; 12, comment:""" Assemble the data dfs together. Both indices are sorted. Args: data_dfs (list of pandas dfs) concat_direction (string): 'horiz' or 'vert' Returns: all_data_df_sorted (pandas df) """; 13, comparison_operator:concat_direction == "horiz"; 14, comment:# Concatenate the data_dfs horizontally; 15, block; 16, elif_clause; 17, assignment; 18, identifier:all_data_df_sorted; 19, identifier:concat_direction; 20, string:"horiz"; 21, expression_statement; 22, comment:# Sanity check: the number of columns in all_data_df should; 23, comment:# correspond to the sum of the number of columns in the input dfs; 24, expression_statement; 25, expression_statement; 26, expression_statement; 27, assert_statement; 28, comparison_operator:concat_direction == "vert"; 29, comment:# Concatenate the data_dfs vertically; 30, block; 31, identifier:all_data_df_sorted; 32, call; 33, assignment; 34, assignment; 35, call; 36, assignment; 37, comparison_operator:n_cols == n_cols_cumulative; 38, identifier:concat_direction; 39, string:"vert"; 40, expression_statement; 41, comment:# Sanity check: the number of rows in all_data_df should; 42, comment:# correspond to the sum of the number of rows in the input dfs; 43, expression_statement; 44, expression_statement; 45, expression_statement; 46, assert_statement; 47, attribute; 48, argument_list; 49, identifier:all_data_df; 50, call; 51, identifier:n_cols; 52, subscript; 53, attribute; 54, argument_list; 55, identifier:n_cols_cumulative; 56, call; 57, identifier:n_cols; 58, identifier:n_cols_cumulative; 59, assignment; 60, assignment; 61, call; 62, assignment; 63, comparison_operator:n_rows == n_rows_cumulative; 64, call; 65, identifier:sort_index; 66, keyword_argument; 67, attribute; 68, argument_list; 69, attribute; 70, integer:1; 71, identifier:logger; 72, identifier:debug; 73, call; 74, identifier:sum; 75, argument_list; 76, identifier:all_data_df; 77, call; 78, identifier:n_rows; 79, subscript; 80, attribute; 81, argument_list; 82, identifier:n_rows_cumulative; 83, call; 84, identifier:n_rows; 85, identifier:n_rows_cumulative; 86, attribute; 87, argument_list; 88, identifier:axis; 89, integer:1; 90, identifier:pd; 91, identifier:concat; 92, identifier:data_dfs; 93, keyword_argument; 94, identifier:all_data_df; 95, identifier:shape; 96, attribute; 97, argument_list; 98, list_comprehension; 99, attribute; 100, argument_list; 101, attribute; 102, integer:0; 103, identifier:logger; 104, identifier:debug; 105, call; 106, identifier:sum; 107, argument_list; 108, identifier:all_data_df; 109, identifier:sort_index; 110, keyword_argument; 111, identifier:axis; 112, integer:1; 113, string:"all_data_df.shape[1]: {}"; 114, identifier:format; 115, identifier:n_cols; 116, subscript; 117, for_in_clause; 118, identifier:pd; 119, identifier:concat; 120, identifier:data_dfs; 121, keyword_argument; 122, identifier:all_data_df; 123, identifier:shape; 124, attribute; 125, argument_list; 126, list_comprehension; 127, identifier:axis; 128, integer:0; 129, attribute; 130, integer:1; 131, identifier:df; 132, identifier:data_dfs; 133, identifier:axis; 134, integer:0; 135, string:"all_data_df.shape[0]: {}"; 136, identifier:format; 137, identifier:n_rows; 138, subscript; 139, for_in_clause; 140, identifier:df; 141, identifier:shape; 142, attribute; 143, integer:0; 144, identifier:df; 145, identifier:data_dfs; 146, identifier:df; 147, identifier:shape
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 8, 14; 8, 15; 8, 16; 10, 17; 11, 18; 13, 19; 13, 20; 15, 21; 15, 22; 15, 23; 15, 24; 15, 25; 15, 26; 15, 27; 16, 28; 16, 29; 16, 30; 17, 31; 17, 32; 21, 33; 24, 34; 25, 35; 26, 36; 27, 37; 28, 38; 28, 39; 30, 40; 30, 41; 30, 42; 30, 43; 30, 44; 30, 45; 30, 46; 32, 47; 32, 48; 33, 49; 33, 50; 34, 51; 34, 52; 35, 53; 35, 54; 36, 55; 36, 56; 37, 57; 37, 58; 40, 59; 43, 60; 44, 61; 45, 62; 46, 63; 47, 64; 47, 65; 48, 66; 50, 67; 50, 68; 52, 69; 52, 70; 53, 71; 53, 72; 54, 73; 56, 74; 56, 75; 59, 76; 59, 77; 60, 78; 60, 79; 61, 80; 61, 81; 62, 82; 62, 83; 63, 84; 63, 85; 64, 86; 64, 87; 66, 88; 66, 89; 67, 90; 67, 91; 68, 92; 68, 93; 69, 94; 69, 95; 73, 96; 73, 97; 75, 98; 77, 99; 77, 100; 79, 101; 79, 102; 80, 103; 80, 104; 81, 105; 83, 106; 83, 107; 86, 108; 86, 109; 87, 110; 93, 111; 93, 112; 96, 113; 96, 114; 97, 115; 98, 116; 98, 117; 99, 118; 99, 119; 100, 120; 100, 121; 101, 122; 101, 123; 105, 124; 105, 125; 107, 126; 110, 127; 110, 128; 116, 129; 116, 130; 117, 131; 117, 132; 121, 133; 121, 134; 124, 135; 124, 136; 125, 137; 126, 138; 126, 139; 129, 140; 129, 141; 138, 142; 138, 143; 139, 144; 139, 145; 142, 146; 142, 147
def assemble_data(data_dfs, concat_direction): """ Assemble the data dfs together. Both indices are sorted. Args: data_dfs (list of pandas dfs) concat_direction (string): 'horiz' or 'vert' Returns: all_data_df_sorted (pandas df) """ if concat_direction == "horiz": # Concatenate the data_dfs horizontally all_data_df = pd.concat(data_dfs, axis=1) # Sanity check: the number of columns in all_data_df should # correspond to the sum of the number of columns in the input dfs n_cols = all_data_df.shape[1] logger.debug("all_data_df.shape[1]: {}".format(n_cols)) n_cols_cumulative = sum([df.shape[1] for df in data_dfs]) assert n_cols == n_cols_cumulative elif concat_direction == "vert": # Concatenate the data_dfs vertically all_data_df = pd.concat(data_dfs, axis=0) # Sanity check: the number of rows in all_data_df should # correspond to the sum of the number of rows in the input dfs n_rows = all_data_df.shape[0] logger.debug("all_data_df.shape[0]: {}".format(n_rows)) n_rows_cumulative = sum([df.shape[0] for df in data_dfs]) assert n_rows == n_rows_cumulative # Sort both indices all_data_df_sorted = all_data_df.sort_index(axis=0).sort_index(axis=1) return all_data_df_sorted
0, module; 1, function_definition; 2, function_name:sort_run; 3, parameters; 4, block; 5, identifier:run; 6, expression_statement; 7, comment:# Sort errors first, then by name. Also show errors that were manually; 8, comment:# approved, so the paging sort order stays the same even after users; 9, comment:# approve a diff on the run page.; 10, if_statement; 11, return_statement; 12, comment:"""Sort function for runs within a release."""; 13, comparison_operator:run.status in models.Run.DIFF_NEEDED_STATES; 14, block; 15, tuple; 16, attribute; 17, attribute; 18, return_statement; 19, integer:1; 20, attribute; 21, identifier:run; 22, identifier:status; 23, attribute; 24, identifier:DIFF_NEEDED_STATES; 25, tuple; 26, identifier:run; 27, identifier:name; 28, identifier:models; 29, identifier:Run; 30, integer:0; 31, attribute; 32, identifier:run; 33, identifier:name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 10, 13; 10, 14; 11, 15; 13, 16; 13, 17; 14, 18; 15, 19; 15, 20; 16, 21; 16, 22; 17, 23; 17, 24; 18, 25; 20, 26; 20, 27; 23, 28; 23, 29; 25, 30; 25, 31; 31, 32; 31, 33
def sort_run(run): """Sort function for runs within a release.""" # Sort errors first, then by name. Also show errors that were manually # approved, so the paging sort order stays the same even after users # approve a diff on the run page. if run.status in models.Run.DIFF_NEEDED_STATES: return (0, run.name) return (1, run.name)
0, module; 1, function_definition; 2, function_name:group_and_sort_statements; 3, parameters; 4, block; 5, identifier:stmt_list; 6, default_parameter; 7, expression_statement; 8, function_definition; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, for_statement; 13, comment:# Sort the rows by count and agent names.; 14, function_definition; 15, expression_statement; 16, return_statement; 17, identifier:ev_totals; 18, None; 19, comment:"""Group statements by type and arguments, and sort by prevalence. Parameters ---------- stmt_list : list[Statement] A list of INDRA statements. ev_totals : dict{int: int} A dictionary, keyed by statement hash (shallow) with counts of total evidence as the values. Including this will allow statements to be better sorted. Returns ------- sorted_groups : list[tuple] A list of tuples containing a sort key, the statement type, and a list of statements, also sorted by evidence count, for that key and type. The sort key contains a count of statements with those argument, the arguments (normalized strings), the count of statements with those arguements and type, and then the statement type. """; 20, function_name:_count; 21, parameters; 22, block; 23, assignment; 24, assignment; 25, assignment; 26, pattern_list; 27, call; 28, comment:# Update the counts, and add key if needed.; 29, block; 30, function_name:process_rows; 31, parameters; 32, block; 33, assignment; 34, identifier:sorted_groups; 35, identifier:stmt; 36, if_statement; 37, identifier:stmt_rows; 38, call; 39, identifier:stmt_counts; 40, call; 41, identifier:arg_counts; 42, call; 43, identifier:key; 44, identifier:s; 45, identifier:_get_keyed_stmts; 46, argument_list; 47, expression_statement; 48, comment:# Keep track of the total evidence counts for this statement and the; 49, comment:# arguments.; 50, expression_statement; 51, comment:# Add up the counts for the arguments, pairwise for Complexes and; 52, comment:# Conversions. This allows, for example, a complex between MEK, ERK,; 53, comment:# and something else to lend weight to the interactions between MEK; 54, comment:# and ERK.; 55, if_statement; 56, identifier:stmt_rows; 57, for_statement; 58, identifier:sorted_groups; 59, call; 60, comparison_operator:ev_totals is None; 61, block; 62, else_clause; 63, identifier:defaultdict; 64, argument_list; 65, identifier:defaultdict; 66, argument_list; 67, identifier:defaultdict; 68, argument_list; 69, identifier:stmt_list; 70, call; 71, augmented_assignment; 72, comparison_operator:key[0] == 'Conversion'; 73, block; 74, else_clause; 75, pattern_list; 76, call; 77, block; 78, identifier:sorted; 79, argument_list; 80, identifier:ev_totals; 81, None; 82, return_statement; 83, block; 84, identifier:list; 85, lambda; 86, lambda; 87, attribute; 88, argument_list; 89, subscript; 90, call; 91, subscript; 92, string; 93, expression_statement; 94, for_statement; 95, block; 96, identifier:key; 97, identifier:stmts; 98, attribute; 99, argument_list; 100, expression_statement; 101, expression_statement; 102, expression_statement; 103, expression_statement; 104, if_statement; 105, expression_statement; 106, expression_statement; 107, expression_statement; 108, call; 109, keyword_argument; 110, keyword_argument; 111, call; 112, return_statement; 113, integer:0; 114, integer:0; 115, subscript; 116, identifier:append; 117, identifier:s; 118, identifier:stmt_counts; 119, identifier:key; 120, identifier:_count; 121, argument_list; 122, identifier:key; 123, integer:0; 124, string_content:Conversion; 125, assignment; 126, identifier:obj; 127, binary_operator:key[2] + key[3]; 128, block; 129, expression_statement; 130, identifier:stmt_rows; 131, identifier:items; 132, assignment; 133, assignment; 134, assignment; 135, assignment; 136, boolean_operator; 137, block; 138, assignment; 139, assignment; 140, yield; 141, identifier:process_rows; 142, argument_list; 143, identifier:key; 144, lambda; 145, identifier:reverse; 146, True; 147, identifier:len; 148, argument_list; 149, subscript; 150, identifier:stmt_rows; 151, identifier:key; 152, identifier:s; 153, identifier:subj; 154, subscript; 155, subscript; 156, subscript; 157, expression_statement; 158, augmented_assignment; 159, identifier:verb; 160, subscript; 161, identifier:inps; 162, subscript; 163, identifier:sub_count; 164, subscript; 165, identifier:arg_count; 166, subscript; 167, boolean_operator; 168, comparison_operator:len(inps) <= 2; 169, if_statement; 170, identifier:new_key; 171, tuple; 172, identifier:stmts; 173, call; 174, expression_list; 175, identifier:stmt_rows; 176, lambda_parameters; 177, subscript; 178, attribute; 179, identifier:ev_totals; 180, call; 181, identifier:key; 182, integer:1; 183, identifier:key; 184, integer:2; 185, identifier:key; 186, integer:3; 187, augmented_assignment; 188, subscript; 189, call; 190, identifier:key; 191, integer:0; 192, identifier:key; 193, slice; 194, identifier:stmt_counts; 195, identifier:key; 196, identifier:arg_counts; 197, identifier:inps; 198, comparison_operator:verb == 'Complex'; 199, comparison_operator:sub_count == arg_count; 200, call; 201, integer:2; 202, call; 203, block; 204, identifier:arg_count; 205, identifier:inps; 206, identifier:sub_count; 207, identifier:verb; 208, identifier:sorted; 209, argument_list; 210, identifier:new_key; 211, identifier:verb; 212, identifier:stmts; 213, identifier:tpl; 214, identifier:tpl; 215, integer:0; 216, identifier:stmt; 217, identifier:evidence; 218, attribute; 219, argument_list; 220, subscript; 221, call; 222, identifier:arg_counts; 223, subscript; 224, identifier:_count; 225, argument_list; 226, integer:1; 227, identifier:verb; 228, string; 229, identifier:sub_count; 230, identifier:arg_count; 231, identifier:len; 232, argument_list; 233, identifier:all; 234, argument_list; 235, continue_statement; 236, identifier:stmts; 237, keyword_argument; 238, keyword_argument; 239, identifier:stmt; 240, identifier:get_hash; 241, identifier:arg_counts; 242, tuple; 243, identifier:_count; 244, argument_list; 245, identifier:key; 246, slice; 247, identifier:s; 248, string_content:Complex; 249, identifier:inps; 250, list_comprehension; 251, identifier:key; 252, lambda; 253, identifier:reverse; 254, True; 255, identifier:subj; 256, identifier:obj; 257, identifier:s; 258, integer:1; 259, comparison_operator:len(set(ag.name for ag in s.agent_list())) > 2; 260, for_in_clause; 261, lambda_parameters; 262, binary_operator:_count(s) + 1/(1+len(s.agent_list())); 263, call; 264, integer:2; 265, identifier:s; 266, identifier:stmts; 267, identifier:s; 268, call; 269, binary_operator:1/(1+len(s.agent_list())); 270, identifier:len; 271, argument_list; 272, identifier:_count; 273, argument_list; 274, integer:1; 275, parenthesized_expression; 276, call; 277, identifier:s; 278, binary_operator:1+len(s.agent_list()); 279, identifier:set; 280, generator_expression; 281, integer:1; 282, call; 283, attribute; 284, for_in_clause; 285, identifier:len; 286, argument_list; 287, identifier:ag; 288, identifier:name; 289, identifier:ag; 290, call; 291, call; 292, attribute; 293, argument_list; 294, attribute; 295, argument_list; 296, identifier:s; 297, identifier:agent_list; 298, identifier:s; 299, identifier:agent_list
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 6, 17; 6, 18; 7, 19; 8, 20; 8, 21; 8, 22; 9, 23; 10, 24; 11, 25; 12, 26; 12, 27; 12, 28; 12, 29; 14, 30; 14, 31; 14, 32; 15, 33; 16, 34; 21, 35; 22, 36; 23, 37; 23, 38; 24, 39; 24, 40; 25, 41; 25, 42; 26, 43; 26, 44; 27, 45; 27, 46; 29, 47; 29, 48; 29, 49; 29, 50; 29, 51; 29, 52; 29, 53; 29, 54; 29, 55; 31, 56; 32, 57; 33, 58; 33, 59; 36, 60; 36, 61; 36, 62; 38, 63; 38, 64; 40, 65; 40, 66; 42, 67; 42, 68; 46, 69; 47, 70; 50, 71; 55, 72; 55, 73; 55, 74; 57, 75; 57, 76; 57, 77; 59, 78; 59, 79; 60, 80; 60, 81; 61, 82; 62, 83; 64, 84; 66, 85; 68, 86; 70, 87; 70, 88; 71, 89; 71, 90; 72, 91; 72, 92; 73, 93; 73, 94; 74, 95; 75, 96; 75, 97; 76, 98; 76, 99; 77, 100; 77, 101; 77, 102; 77, 103; 77, 104; 77, 105; 77, 106; 77, 107; 79, 108; 79, 109; 79, 110; 82, 111; 83, 112; 85, 113; 86, 114; 87, 115; 87, 116; 88, 117; 89, 118; 89, 119; 90, 120; 90, 121; 91, 122; 91, 123; 92, 124; 93, 125; 94, 126; 94, 127; 94, 128; 95, 129; 98, 130; 98, 131; 100, 132; 101, 133; 102, 134; 103, 135; 104, 136; 104, 137; 105, 138; 106, 139; 107, 140; 108, 141; 108, 142; 109, 143; 109, 144; 110, 145; 110, 146; 111, 147; 111, 148; 112, 149; 115, 150; 115, 151; 121, 152; 125, 153; 125, 154; 127, 155; 127, 156; 128, 157; 129, 158; 132, 159; 132, 160; 133, 161; 133, 162; 134, 163; 134, 164; 135, 165; 135, 166; 136, 167; 136, 168; 137, 169; 138, 170; 138, 171; 139, 172; 139, 173; 140, 174; 142, 175; 144, 176; 144, 177; 148, 178; 149, 179; 149, 180; 154, 181; 154, 182; 155, 183; 155, 184; 156, 185; 156, 186; 157, 187; 158, 188; 158, 189; 160, 190; 160, 191; 162, 192; 162, 193; 164, 194; 164, 195; 166, 196; 166, 197; 167, 198; 167, 199; 168, 200; 168, 201; 169, 202; 169, 203; 171, 204; 171, 205; 171, 206; 171, 207; 173, 208; 173, 209; 174, 210; 174, 211; 174, 212; 176, 213; 177, 214; 177, 215; 178, 216; 178, 217; 180, 218; 180, 219; 187, 220; 187, 221; 188, 222; 188, 223; 189, 224; 189, 225; 193, 226; 198, 227; 198, 228; 199, 229; 199, 230; 200, 231; 200, 232; 202, 233; 202, 234; 203, 235; 209, 236; 209, 237; 209, 238; 218, 239; 218, 240; 220, 241; 220, 242; 221, 243; 221, 244; 223, 245; 223, 246; 225, 247; 228, 248; 232, 249; 234, 250; 237, 251; 237, 252; 238, 253; 238, 254; 242, 255; 242, 256; 244, 257; 246, 258; 250, 259; 250, 260; 252, 261; 252, 262; 259, 263; 259, 264; 260, 265; 260, 266; 261, 267; 262, 268; 262, 269; 263, 270; 263, 271; 268, 272; 268, 273; 269, 274; 269, 275; 271, 276; 273, 277; 275, 278; 276, 279; 276, 280; 278, 281; 278, 282; 280, 283; 280, 284; 282, 285; 282, 286; 283, 287; 283, 288; 284, 289; 284, 290; 286, 291; 290, 292; 290, 293; 291, 294; 291, 295; 292, 296; 292, 297; 294, 298; 294, 299
def group_and_sort_statements(stmt_list, ev_totals=None): """Group statements by type and arguments, and sort by prevalence. Parameters ---------- stmt_list : list[Statement] A list of INDRA statements. ev_totals : dict{int: int} A dictionary, keyed by statement hash (shallow) with counts of total evidence as the values. Including this will allow statements to be better sorted. Returns ------- sorted_groups : list[tuple] A list of tuples containing a sort key, the statement type, and a list of statements, also sorted by evidence count, for that key and type. The sort key contains a count of statements with those argument, the arguments (normalized strings), the count of statements with those arguements and type, and then the statement type. """ def _count(stmt): if ev_totals is None: return len(stmt.evidence) else: return ev_totals[stmt.get_hash()] stmt_rows = defaultdict(list) stmt_counts = defaultdict(lambda: 0) arg_counts = defaultdict(lambda: 0) for key, s in _get_keyed_stmts(stmt_list): # Update the counts, and add key if needed. stmt_rows[key].append(s) # Keep track of the total evidence counts for this statement and the # arguments. stmt_counts[key] += _count(s) # Add up the counts for the arguments, pairwise for Complexes and # Conversions. This allows, for example, a complex between MEK, ERK, # and something else to lend weight to the interactions between MEK # and ERK. if key[0] == 'Conversion': subj = key[1] for obj in key[2] + key[3]: arg_counts[(subj, obj)] += _count(s) else: arg_counts[key[1:]] += _count(s) # Sort the rows by count and agent names. def process_rows(stmt_rows): for key, stmts in stmt_rows.items(): verb = key[0] inps = key[1:] sub_count = stmt_counts[key] arg_count = arg_counts[inps] if verb == 'Complex' and sub_count == arg_count and len(inps) <= 2: if all([len(set(ag.name for ag in s.agent_list())) > 2 for s in stmts]): continue new_key = (arg_count, inps, sub_count, verb) stmts = sorted(stmts, key=lambda s: _count(s) + 1/(1+len(s.agent_list())), reverse=True) yield new_key, verb, stmts sorted_groups = sorted(process_rows(stmt_rows), key=lambda tpl: tpl[0], reverse=True) return sorted_groups
0, module; 1, function_definition; 2, function_name:make_stmt_from_sort_key; 3, parameters; 4, block; 5, identifier:key; 6, identifier:verb; 7, expression_statement; 8, function_definition; 9, expression_statement; 10, expression_statement; 11, if_statement; 12, return_statement; 13, comment:"""Make a Statement from the sort key. Specifically, the sort key used by `group_and_sort_statements`. """; 14, function_name:make_agent; 15, parameters; 16, block; 17, assignment; 18, assignment; 19, comparison_operator:verb == 'Complex'; 20, block; 21, elif_clause; 22, elif_clause; 23, else_clause; 24, identifier:stmt; 25, identifier:name; 26, if_statement; 27, return_statement; 28, identifier:StmtClass; 29, call; 30, identifier:inps; 31, call; 32, identifier:verb; 33, string; 34, expression_statement; 35, comparison_operator:verb == 'Conversion'; 36, block; 37, boolean_operator; 38, block; 39, block; 40, boolean_operator; 41, block; 42, call; 43, identifier:get_statement_by_name; 44, argument_list; 45, identifier:list; 46, argument_list; 47, string_content:Complex; 48, assignment; 49, identifier:verb; 50, string; 51, expression_statement; 52, comparison_operator:verb == 'ActiveForm'; 53, comparison_operator:verb == 'HasActivity'; 54, expression_statement; 55, expression_statement; 56, comparison_operator:name == 'None'; 57, comparison_operator:name is None; 58, return_statement; 59, identifier:Agent; 60, argument_list; 61, identifier:verb; 62, subscript; 63, identifier:stmt; 64, call; 65, string_content:Conversion; 66, assignment; 67, identifier:verb; 68, string; 69, identifier:verb; 70, string; 71, assignment; 72, assignment; 73, identifier:name; 74, string; 75, identifier:name; 76, None; 77, None; 78, identifier:name; 79, identifier:key; 80, integer:1; 81, identifier:StmtClass; 82, argument_list; 83, identifier:stmt; 84, call; 85, string_content:ActiveForm; 86, string_content:HasActivity; 87, identifier:stmt; 88, call; 89, identifier:stmt; 90, call; 91, string_content:None; 92, list_comprehension; 93, identifier:StmtClass; 94, argument_list; 95, identifier:StmtClass; 96, argument_list; 97, identifier:StmtClass; 98, argument_list; 99, call; 100, for_in_clause; 101, call; 102, list_comprehension; 103, list_comprehension; 104, call; 105, subscript; 106, subscript; 107, list_splat; 108, identifier:make_agent; 109, argument_list; 110, identifier:name; 111, identifier:inps; 112, identifier:make_agent; 113, argument_list; 114, call; 115, for_in_clause; 116, call; 117, for_in_clause; 118, identifier:make_agent; 119, argument_list; 120, identifier:inps; 121, integer:1; 122, identifier:inps; 123, integer:2; 124, list_comprehension; 125, identifier:name; 126, subscript; 127, identifier:make_agent; 128, argument_list; 129, identifier:name; 130, subscript; 131, identifier:make_agent; 132, argument_list; 133, identifier:name; 134, subscript; 135, subscript; 136, call; 137, for_in_clause; 138, identifier:inps; 139, integer:0; 140, identifier:name; 141, identifier:inps; 142, integer:1; 143, identifier:name; 144, identifier:inps; 145, integer:2; 146, identifier:inps; 147, integer:0; 148, identifier:make_agent; 149, argument_list; 150, identifier:name; 151, identifier:inps; 152, identifier:name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 8, 14; 8, 15; 8, 16; 9, 17; 10, 18; 11, 19; 11, 20; 11, 21; 11, 22; 11, 23; 12, 24; 15, 25; 16, 26; 16, 27; 17, 28; 17, 29; 18, 30; 18, 31; 19, 32; 19, 33; 20, 34; 21, 35; 21, 36; 22, 37; 22, 38; 23, 39; 26, 40; 26, 41; 27, 42; 29, 43; 29, 44; 31, 45; 31, 46; 33, 47; 34, 48; 35, 49; 35, 50; 36, 51; 37, 52; 37, 53; 38, 54; 39, 55; 40, 56; 40, 57; 41, 58; 42, 59; 42, 60; 44, 61; 46, 62; 48, 63; 48, 64; 50, 65; 51, 66; 52, 67; 52, 68; 53, 69; 53, 70; 54, 71; 55, 72; 56, 73; 56, 74; 57, 75; 57, 76; 58, 77; 60, 78; 62, 79; 62, 80; 64, 81; 64, 82; 66, 83; 66, 84; 68, 85; 70, 86; 71, 87; 71, 88; 72, 89; 72, 90; 74, 91; 82, 92; 84, 93; 84, 94; 88, 95; 88, 96; 90, 97; 90, 98; 92, 99; 92, 100; 94, 101; 94, 102; 94, 103; 96, 104; 96, 105; 96, 106; 98, 107; 99, 108; 99, 109; 100, 110; 100, 111; 101, 112; 101, 113; 102, 114; 102, 115; 103, 116; 103, 117; 104, 118; 104, 119; 105, 120; 105, 121; 106, 122; 106, 123; 107, 124; 109, 125; 113, 126; 114, 127; 114, 128; 115, 129; 115, 130; 116, 131; 116, 132; 117, 133; 117, 134; 119, 135; 124, 136; 124, 137; 126, 138; 126, 139; 128, 140; 130, 141; 130, 142; 132, 143; 134, 144; 134, 145; 135, 146; 135, 147; 136, 148; 136, 149; 137, 150; 137, 151; 149, 152
def make_stmt_from_sort_key(key, verb): """Make a Statement from the sort key. Specifically, the sort key used by `group_and_sort_statements`. """ def make_agent(name): if name == 'None' or name is None: return None return Agent(name) StmtClass = get_statement_by_name(verb) inps = list(key[1]) if verb == 'Complex': stmt = StmtClass([make_agent(name) for name in inps]) elif verb == 'Conversion': stmt = StmtClass(make_agent(inps[0]), [make_agent(name) for name in inps[1]], [make_agent(name) for name in inps[2]]) elif verb == 'ActiveForm' or verb == 'HasActivity': stmt = StmtClass(make_agent(inps[0]), inps[1], inps[2]) else: stmt = StmtClass(*[make_agent(name) for name in inps]) return stmt
0, module; 1, function_definition; 2, function_name:ungrounded_texts; 3, parameters; 4, block; 5, identifier:stmts; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, return_statement; 12, comment:"""Return a list of all ungrounded entities ordered by number of mentions Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Returns ------- ungroundc : list of tuple list of tuples of the form (text: str, count: int) sorted in descending order by count. """; 13, assignment; 14, assignment; 15, assignment; 16, assignment; 17, identifier:ungroundc; 18, identifier:ungrounded; 19, list_comprehension; 20, identifier:ungroundc; 21, call; 22, identifier:ungroundc; 23, call; 24, identifier:ungroundc; 25, call; 26, subscript; 27, for_in_clause; 28, for_in_clause; 29, if_clause; 30, identifier:Counter; 31, argument_list; 32, attribute; 33, argument_list; 34, identifier:sorted; 35, argument_list; 36, attribute; 37, string; 38, identifier:s; 39, identifier:stmts; 40, identifier:ag; 41, call; 42, boolean_operator; 43, identifier:ungrounded; 44, identifier:ungroundc; 45, identifier:items; 46, identifier:ungroundc; 47, keyword_argument; 48, keyword_argument; 49, identifier:ag; 50, identifier:db_refs; 51, string_content:TEXT; 52, attribute; 53, argument_list; 54, comparison_operator:ag is not None; 55, comparison_operator:list(ag.db_refs.keys()) == ['TEXT']; 56, identifier:key; 57, lambda; 58, identifier:reverse; 59, True; 60, identifier:s; 61, identifier:agent_list; 62, identifier:ag; 63, None; 64, call; 65, list; 66, lambda_parameters; 67, subscript; 68, identifier:list; 69, argument_list; 70, string; 71, identifier:x; 72, identifier:x; 73, integer:1; 74, call; 75, string_content:TEXT; 76, attribute; 77, argument_list; 78, attribute; 79, identifier:keys; 80, identifier:ag; 81, identifier:db_refs
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 8, 14; 9, 15; 10, 16; 11, 17; 13, 18; 13, 19; 14, 20; 14, 21; 15, 22; 15, 23; 16, 24; 16, 25; 19, 26; 19, 27; 19, 28; 19, 29; 21, 30; 21, 31; 23, 32; 23, 33; 25, 34; 25, 35; 26, 36; 26, 37; 27, 38; 27, 39; 28, 40; 28, 41; 29, 42; 31, 43; 32, 44; 32, 45; 35, 46; 35, 47; 35, 48; 36, 49; 36, 50; 37, 51; 41, 52; 41, 53; 42, 54; 42, 55; 47, 56; 47, 57; 48, 58; 48, 59; 52, 60; 52, 61; 54, 62; 54, 63; 55, 64; 55, 65; 57, 66; 57, 67; 64, 68; 64, 69; 65, 70; 66, 71; 67, 72; 67, 73; 69, 74; 70, 75; 74, 76; 74, 77; 76, 78; 76, 79; 78, 80; 78, 81
def ungrounded_texts(stmts): """Return a list of all ungrounded entities ordered by number of mentions Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Returns ------- ungroundc : list of tuple list of tuples of the form (text: str, count: int) sorted in descending order by count. """ ungrounded = [ag.db_refs['TEXT'] for s in stmts for ag in s.agent_list() if ag is not None and list(ag.db_refs.keys()) == ['TEXT']] ungroundc = Counter(ungrounded) ungroundc = ungroundc.items() ungroundc = sorted(ungroundc, key=lambda x: x[1], reverse=True) return ungroundc
0, module; 1, function_definition; 2, function_name:save_sentences; 3, parameters; 4, block; 5, identifier:twg; 6, identifier:stmts; 7, identifier:filename; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, for_statement; 15, comment:# Write sentences to CSV file; 16, expression_statement; 17, identifier:agent_limit; 18, integer:300; 19, comment:"""Write evidence sentences for stmts with ungrounded agents to csv file. Parameters ---------- twg: list of tuple list of tuples of ungrounded agent_texts with counts of the number of times they are mentioned in the list of statements. Should be sorted in descending order by the counts. This is of the form output by the function ungrounded texts. stmts: list of :py:class:`indra.statements.Statement` filename : str Path to output file agent_limit : Optional[int] Number of agents to include in output file. Takes the top agents by count. """; 20, assignment; 21, assignment; 22, assignment; 23, call; 24, identifier:text; 25, identifier:unmapped_texts; 26, block; 27, call; 28, identifier:sentences; 29, list; 30, identifier:unmapped_texts; 31, list_comprehension; 32, identifier:counter; 33, integer:0; 34, attribute; 35, argument_list; 36, expression_statement; 37, expression_statement; 38, expression_statement; 39, if_statement; 40, identifier:write_unicode_csv; 41, argument_list; 42, subscript; 43, for_in_clause; 44, identifier:logger; 45, identifier:info; 46, binary_operator:'Getting sentences for top %d unmapped agent texts.' % agent_limit; 47, assignment; 48, augmented_assignment; 49, augmented_assignment; 50, comparison_operator:counter >= agent_limit; 51, block; 52, identifier:filename; 53, identifier:sentences; 54, keyword_argument; 55, keyword_argument; 56, keyword_argument; 57, keyword_argument; 58, identifier:t; 59, integer:0; 60, identifier:t; 61, identifier:twg; 62, string; 63, identifier:agent_limit; 64, identifier:agent_sentences; 65, call; 66, identifier:sentences; 67, call; 68, identifier:counter; 69, integer:1; 70, identifier:counter; 71, identifier:agent_limit; 72, break_statement; 73, identifier:delimiter; 74, string; 75, identifier:quotechar; 76, string:'"'; 77, identifier:quoting; 78, attribute; 79, identifier:lineterminator; 80, string; 81, string_content:Getting sentences for top %d unmapped agent texts.; 82, identifier:get_sentences_for_agent; 83, argument_list; 84, identifier:map; 85, argument_list; 86, string_content:,; 87, identifier:csv; 88, identifier:QUOTE_MINIMAL; 89, string_content; 90, identifier:text; 91, identifier:stmts; 92, lambda; 93, identifier:agent_sentences; 94, escape_sequence:\r; 95, escape_sequence:\n; 96, lambda_parameters; 97, binary_operator:(text,) + tup; 98, identifier:tup; 99, tuple; 100, identifier:tup; 101, identifier:text
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 8, 17; 8, 18; 9, 19; 10, 20; 11, 21; 12, 22; 13, 23; 14, 24; 14, 25; 14, 26; 16, 27; 20, 28; 20, 29; 21, 30; 21, 31; 22, 32; 22, 33; 23, 34; 23, 35; 26, 36; 26, 37; 26, 38; 26, 39; 27, 40; 27, 41; 31, 42; 31, 43; 34, 44; 34, 45; 35, 46; 36, 47; 37, 48; 38, 49; 39, 50; 39, 51; 41, 52; 41, 53; 41, 54; 41, 55; 41, 56; 41, 57; 42, 58; 42, 59; 43, 60; 43, 61; 46, 62; 46, 63; 47, 64; 47, 65; 48, 66; 48, 67; 49, 68; 49, 69; 50, 70; 50, 71; 51, 72; 54, 73; 54, 74; 55, 75; 55, 76; 56, 77; 56, 78; 57, 79; 57, 80; 62, 81; 65, 82; 65, 83; 67, 84; 67, 85; 74, 86; 78, 87; 78, 88; 80, 89; 83, 90; 83, 91; 85, 92; 85, 93; 89, 94; 89, 95; 92, 96; 92, 97; 96, 98; 97, 99; 97, 100; 99, 101
def save_sentences(twg, stmts, filename, agent_limit=300): """Write evidence sentences for stmts with ungrounded agents to csv file. Parameters ---------- twg: list of tuple list of tuples of ungrounded agent_texts with counts of the number of times they are mentioned in the list of statements. Should be sorted in descending order by the counts. This is of the form output by the function ungrounded texts. stmts: list of :py:class:`indra.statements.Statement` filename : str Path to output file agent_limit : Optional[int] Number of agents to include in output file. Takes the top agents by count. """ sentences = [] unmapped_texts = [t[0] for t in twg] counter = 0 logger.info('Getting sentences for top %d unmapped agent texts.' % agent_limit) for text in unmapped_texts: agent_sentences = get_sentences_for_agent(text, stmts) sentences += map(lambda tup: (text,) + tup, agent_sentences) counter += 1 if counter >= agent_limit: break # Write sentences to CSV file write_unicode_csv(filename, sentences, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL, lineterminator='\r\n')
0, module; 1, function_definition; 2, function_name:_get_node_key; 3, parameters; 4, block; 5, identifier:self; 6, identifier:node_dict_item; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, return_statement; 11, comment:"""Return a tuple of sorted sources and targets given a node dict."""; 12, assignment; 13, assignment; 14, tuple; 15, identifier:s; 16, call; 17, identifier:t; 18, call; 19, identifier:s; 20, identifier:t; 21, identifier:tuple; 22, argument_list; 23, identifier:tuple; 24, argument_list; 25, call; 26, call; 27, identifier:sorted; 28, argument_list; 29, identifier:sorted; 30, argument_list; 31, subscript; 32, subscript; 33, identifier:node_dict_item; 34, string; 35, identifier:node_dict_item; 36, string; 37, string_content:sources; 38, string_content:targets
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 10, 14; 12, 15; 12, 16; 13, 17; 13, 18; 14, 19; 14, 20; 16, 21; 16, 22; 18, 23; 18, 24; 22, 25; 24, 26; 25, 27; 25, 28; 26, 29; 26, 30; 28, 31; 30, 32; 31, 33; 31, 34; 32, 35; 32, 36; 34, 37; 36, 38
def _get_node_key(self, node_dict_item): """Return a tuple of sorted sources and targets given a node dict.""" s = tuple(sorted(node_dict_item['sources'])) t = tuple(sorted(node_dict_item['targets'])) return (s, t)
0, module; 1, function_definition; 2, function_name:_get_node_groups; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, for_statement; 9, comment:# Make a dictionary of nodes based on source/target as a key; 10, expression_statement; 11, for_statement; 12, comment:# Constrain the groups to ones that have more than 1 member; 13, expression_statement; 14, return_statement; 15, comment:"""Return a list of node id lists that are topologically identical. First construct a node_dict which is keyed to the node id and has a value which is a dict with keys 'sources' and 'targets'. The 'sources' and 'targets' each contain a list of tuples (i, polarity, source) edge of the node. node_dict is then processed by _get_node_key() which returns a tuple of (s,t) where s,t are sorted tuples of the ids for the source and target nodes. (s,t) is then used as a key in node_key_dict where the values are the node ids. node_groups is restricted to groups greater than 1 node. """; 16, assignment; 17, identifier:edge; 18, attribute; 19, comment:# Add edge as a source for its target node; 20, block; 21, assignment; 22, pattern_list; 23, call; 24, block; 25, assignment; 26, identifier:node_groups; 27, identifier:node_dict; 28, dictionary_comprehension; 29, identifier:self; 30, identifier:_edges; 31, expression_statement; 32, expression_statement; 33, comment:# Add edge as target for its source node; 34, expression_statement; 35, expression_statement; 36, identifier:node_key_dict; 37, call; 38, identifier:node_id; 39, identifier:node_d; 40, attribute; 41, argument_list; 42, expression_statement; 43, expression_statement; 44, identifier:node_groups; 45, list_comprehension; 46, pair; 47, for_in_clause; 48, assignment; 49, call; 50, assignment; 51, call; 52, attribute; 53, argument_list; 54, identifier:node_dict; 55, identifier:items; 56, assignment; 57, call; 58, identifier:g; 59, for_in_clause; 60, if_clause; 61, subscript; 62, dictionary; 63, identifier:node; 64, attribute; 65, identifier:edge_data; 66, tuple; 67, attribute; 68, argument_list; 69, identifier:edge_data; 70, tuple; 71, attribute; 72, argument_list; 73, identifier:collections; 74, identifier:defaultdict; 75, lambda; 76, identifier:key; 77, call; 78, attribute; 79, argument_list; 80, identifier:g; 81, call; 82, parenthesized_expression; 83, subscript; 84, string; 85, pair; 86, pair; 87, identifier:self; 88, identifier:_nodes; 89, subscript; 90, subscript; 91, subscript; 92, subscript; 93, identifier:append; 94, identifier:edge_data; 95, subscript; 96, subscript; 97, subscript; 98, subscript; 99, identifier:append; 100, identifier:edge_data; 101, list; 102, attribute; 103, argument_list; 104, subscript; 105, identifier:append; 106, identifier:node_id; 107, attribute; 108, argument_list; 109, comparison_operator:len(g) > 1; 110, identifier:node; 111, string; 112, string_content:id; 113, string; 114, list; 115, string; 116, list; 117, subscript; 118, string; 119, subscript; 120, string; 121, subscript; 122, string; 123, subscript; 124, string; 125, subscript; 126, string; 127, subscript; 128, string; 129, subscript; 130, string; 131, subscript; 132, string; 133, identifier:self; 134, identifier:_get_node_key; 135, identifier:node_d; 136, identifier:node_key_dict; 137, identifier:key; 138, identifier:node_key_dict; 139, identifier:values; 140, call; 141, integer:1; 142, string_content:data; 143, string_content:sources; 144, string_content:targets; 145, identifier:edge; 146, string; 147, string_content:i; 148, identifier:edge; 149, string; 150, string_content:polarity; 151, identifier:edge; 152, string; 153, string_content:source; 154, identifier:node_dict; 155, subscript; 156, string_content:sources; 157, identifier:edge; 158, string; 159, string_content:i; 160, identifier:edge; 161, string; 162, string_content:polarity; 163, identifier:edge; 164, string; 165, string_content:target; 166, identifier:node_dict; 167, subscript; 168, string_content:targets; 169, identifier:len; 170, argument_list; 171, string_content:data; 172, string_content:data; 173, string_content:data; 174, subscript; 175, string; 176, string_content:data; 177, string_content:data; 178, string_content:data; 179, subscript; 180, string; 181, identifier:g; 182, identifier:edge; 183, string; 184, string_content:target; 185, identifier:edge; 186, string; 187, string_content:source; 188, string_content:data; 189, string_content:data
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 6, 15; 7, 16; 8, 17; 8, 18; 8, 19; 8, 20; 10, 21; 11, 22; 11, 23; 11, 24; 13, 25; 14, 26; 16, 27; 16, 28; 18, 29; 18, 30; 20, 31; 20, 32; 20, 33; 20, 34; 20, 35; 21, 36; 21, 37; 22, 38; 22, 39; 23, 40; 23, 41; 24, 42; 24, 43; 25, 44; 25, 45; 28, 46; 28, 47; 31, 48; 32, 49; 34, 50; 35, 51; 37, 52; 37, 53; 40, 54; 40, 55; 42, 56; 43, 57; 45, 58; 45, 59; 45, 60; 46, 61; 46, 62; 47, 63; 47, 64; 48, 65; 48, 66; 49, 67; 49, 68; 50, 69; 50, 70; 51, 71; 51, 72; 52, 73; 52, 74; 53, 75; 56, 76; 56, 77; 57, 78; 57, 79; 59, 80; 59, 81; 60, 82; 61, 83; 61, 84; 62, 85; 62, 86; 64, 87; 64, 88; 66, 89; 66, 90; 66, 91; 67, 92; 67, 93; 68, 94; 70, 95; 70, 96; 70, 97; 71, 98; 71, 99; 72, 100; 75, 101; 77, 102; 77, 103; 78, 104; 78, 105; 79, 106; 81, 107; 81, 108; 82, 109; 83, 110; 83, 111; 84, 112; 85, 113; 85, 114; 86, 115; 86, 116; 89, 117; 89, 118; 90, 119; 90, 120; 91, 121; 91, 122; 92, 123; 92, 124; 95, 125; 95, 126; 96, 127; 96, 128; 97, 129; 97, 130; 98, 131; 98, 132; 102, 133; 102, 134; 103, 135; 104, 136; 104, 137; 107, 138; 107, 139; 109, 140; 109, 141; 111, 142; 113, 143; 115, 144; 117, 145; 117, 146; 118, 147; 119, 148; 119, 149; 120, 150; 121, 151; 121, 152; 122, 153; 123, 154; 123, 155; 124, 156; 125, 157; 125, 158; 126, 159; 127, 160; 127, 161; 128, 162; 129, 163; 129, 164; 130, 165; 131, 166; 131, 167; 132, 168; 140, 169; 140, 170; 146, 171; 149, 172; 152, 173; 155, 174; 155, 175; 158, 176; 161, 177; 164, 178; 167, 179; 167, 180; 170, 181; 174, 182; 174, 183; 175, 184; 179, 185; 179, 186; 180, 187; 183, 188; 186, 189
def _get_node_groups(self): """Return a list of node id lists that are topologically identical. First construct a node_dict which is keyed to the node id and has a value which is a dict with keys 'sources' and 'targets'. The 'sources' and 'targets' each contain a list of tuples (i, polarity, source) edge of the node. node_dict is then processed by _get_node_key() which returns a tuple of (s,t) where s,t are sorted tuples of the ids for the source and target nodes. (s,t) is then used as a key in node_key_dict where the values are the node ids. node_groups is restricted to groups greater than 1 node. """ node_dict = {node['data']['id']: {'sources': [], 'targets': []} for node in self._nodes} for edge in self._edges: # Add edge as a source for its target node edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['source']) node_dict[edge['data']['target']]['sources'].append(edge_data) # Add edge as target for its source node edge_data = (edge['data']['i'], edge['data']['polarity'], edge['data']['target']) node_dict[edge['data']['source']]['targets'].append(edge_data) # Make a dictionary of nodes based on source/target as a key node_key_dict = collections.defaultdict(lambda: []) for node_id, node_d in node_dict.items(): key = self._get_node_key(node_d) node_key_dict[key].append(node_id) # Constrain the groups to ones that have more than 1 member node_groups = [g for g in node_key_dict.values() if (len(g) > 1)] return node_groups
0, module; 1, function_definition; 2, function_name:get_statements; 3, parameters; 4, block; 5, default_parameter; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, default_parameter; 14, default_parameter; 15, default_parameter; 16, default_parameter; 17, expression_statement; 18, expression_statement; 19, comment:# Format the result appropriately.; 20, if_statement; 21, return_statement; 22, identifier:subject; 23, None; 24, identifier:object; 25, None; 26, identifier:agents; 27, None; 28, identifier:stmt_type; 29, None; 30, identifier:use_exact_type; 31, False; 32, identifier:persist; 33, True; 34, identifier:timeout; 35, None; 36, identifier:simple_response; 37, False; 38, identifier:ev_limit; 39, integer:10; 40, identifier:best_first; 41, True; 42, identifier:tries; 43, integer:2; 44, identifier:max_stmts; 45, None; 46, comment:"""Get a processor for the INDRA DB web API matching given agents and type. There are two types of responses available. You can just get a list of INDRA Statements, or you can get an IndraDBRestProcessor object, which allow Statements to be loaded in a background thread, providing a sample of the best* content available promptly in the sample_statements attribute, and populates the statements attribute when the paged load is complete. The latter should be used in all new code, and where convenient the prior should be converted to use the processor, as this option may be removed in the future. * In the sense of having the most supporting evidence. Parameters ---------- subject/object : str Optionally specify the subject and/or object of the statements in you wish to get from the database. By default, the namespace is assumed to be HGNC gene names, however you may specify another namespace by including `@<namespace>` at the end of the name string. For example, if you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`, or if you wanted to use the HGNC id, you could use `6871@HGNC`. agents : list[str] A list of agents, specified in the same manner as subject and object, but without specifying their grammatical position. stmt_type : str Specify the types of interactions you are interested in, as indicated by the sub-classes of INDRA's Statements. This argument is *not* case sensitive. If the statement class given has sub-classes (e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both the class itself, and its subclasses, will be queried, by default. If you do not want this behavior, set use_exact_type=True. Note that if max_stmts is set, it is possible only the exact statement type will be returned, as this is the first searched. The processor then cycles through the types, getting a page of results for each type and adding it to the quota, until the max number of statements is reached. use_exact_type : bool If stmt_type is given, and you only want to search for that specific statement type, set this to True. Default is False. persist : bool Default is True. When False, if a query comes back limited (not all results returned), just give up and pass along what was returned. Otherwise, make further queries to get the rest of the data (which may take some time). timeout : positive int or None If an int, block until the work is done and statements are retrieved, or until the timeout has expired, in which case the results so far will be returned in the response object, and further results will be added in a separate thread as they become available. If simple_response is True, all statements available will be returned. Otherwise (if None), block indefinitely until all statements are retrieved. Default is None. simple_response : bool If True, a simple list of statements is returned (thus block should also be True). If block is False, only the original sample will be returned (as though persist was False), until the statements are done loading, in which case the rest should appear in the list. This behavior is not encouraged. Default is False (which breaks backwards compatibility with usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A LATER DATE. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 10. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. max_stmts : int or None Select the maximum number of statements to return. When set less than 1000 the effect is much the same as setting persist to false, and will guarantee a faster response. Default is None. Returns ------- processor : :py:class:`IndraDBRestProcessor` An instance of the IndraDBRestProcessor, which has an attribute `statements` which will be populated when the query/queries are done. This is the default behavior, and is encouraged in all future cases, however a simple list of statements may be returned using the `simple_response` option described above. """; 47, assignment; 48, identifier:simple_response; 49, block; 50, else_clause; 51, identifier:ret; 52, identifier:processor; 53, call; 54, expression_statement; 55, block; 56, identifier:IndraDBRestProcessor; 57, argument_list; 58, assignment; 59, expression_statement; 60, identifier:subject; 61, identifier:object; 62, identifier:agents; 63, identifier:stmt_type; 64, identifier:use_exact_type; 65, identifier:persist; 66, identifier:timeout; 67, identifier:ev_limit; 68, identifier:best_first; 69, identifier:tries; 70, identifier:max_stmts; 71, identifier:ret; 72, attribute; 73, assignment; 74, identifier:processor; 75, identifier:statements; 76, identifier:ret; 77, identifier:processor
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 3, 14; 3, 15; 3, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 5, 22; 5, 23; 6, 24; 6, 25; 7, 26; 7, 27; 8, 28; 8, 29; 9, 30; 9, 31; 10, 32; 10, 33; 11, 34; 11, 35; 12, 36; 12, 37; 13, 38; 13, 39; 14, 40; 14, 41; 15, 42; 15, 43; 16, 44; 16, 45; 17, 46; 18, 47; 20, 48; 20, 49; 20, 50; 21, 51; 47, 52; 47, 53; 49, 54; 50, 55; 53, 56; 53, 57; 54, 58; 55, 59; 57, 60; 57, 61; 57, 62; 57, 63; 57, 64; 57, 65; 57, 66; 57, 67; 57, 68; 57, 69; 57, 70; 58, 71; 58, 72; 59, 73; 72, 74; 72, 75; 73, 76; 73, 77
def get_statements(subject=None, object=None, agents=None, stmt_type=None, use_exact_type=False, persist=True, timeout=None, simple_response=False, ev_limit=10, best_first=True, tries=2, max_stmts=None): """Get a processor for the INDRA DB web API matching given agents and type. There are two types of responses available. You can just get a list of INDRA Statements, or you can get an IndraDBRestProcessor object, which allow Statements to be loaded in a background thread, providing a sample of the best* content available promptly in the sample_statements attribute, and populates the statements attribute when the paged load is complete. The latter should be used in all new code, and where convenient the prior should be converted to use the processor, as this option may be removed in the future. * In the sense of having the most supporting evidence. Parameters ---------- subject/object : str Optionally specify the subject and/or object of the statements in you wish to get from the database. By default, the namespace is assumed to be HGNC gene names, however you may specify another namespace by including `@<namespace>` at the end of the name string. For example, if you want to specify an agent by chebi, you could use `CHEBI:6801@CHEBI`, or if you wanted to use the HGNC id, you could use `6871@HGNC`. agents : list[str] A list of agents, specified in the same manner as subject and object, but without specifying their grammatical position. stmt_type : str Specify the types of interactions you are interested in, as indicated by the sub-classes of INDRA's Statements. This argument is *not* case sensitive. If the statement class given has sub-classes (e.g. RegulateAmount has IncreaseAmount and DecreaseAmount), then both the class itself, and its subclasses, will be queried, by default. If you do not want this behavior, set use_exact_type=True. Note that if max_stmts is set, it is possible only the exact statement type will be returned, as this is the first searched. The processor then cycles through the types, getting a page of results for each type and adding it to the quota, until the max number of statements is reached. use_exact_type : bool If stmt_type is given, and you only want to search for that specific statement type, set this to True. Default is False. persist : bool Default is True. When False, if a query comes back limited (not all results returned), just give up and pass along what was returned. Otherwise, make further queries to get the rest of the data (which may take some time). timeout : positive int or None If an int, block until the work is done and statements are retrieved, or until the timeout has expired, in which case the results so far will be returned in the response object, and further results will be added in a separate thread as they become available. If simple_response is True, all statements available will be returned. Otherwise (if None), block indefinitely until all statements are retrieved. Default is None. simple_response : bool If True, a simple list of statements is returned (thus block should also be True). If block is False, only the original sample will be returned (as though persist was False), until the statements are done loading, in which case the rest should appear in the list. This behavior is not encouraged. Default is False (which breaks backwards compatibility with usage of INDRA versions from before 1/22/2019). WE ENCOURAGE ALL NEW USE-CASES TO USE THE PROCESSOR, AS THIS FEATURE MAY BE REMOVED AT A LATER DATE. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 10. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. max_stmts : int or None Select the maximum number of statements to return. When set less than 1000 the effect is much the same as setting persist to false, and will guarantee a faster response. Default is None. Returns ------- processor : :py:class:`IndraDBRestProcessor` An instance of the IndraDBRestProcessor, which has an attribute `statements` which will be populated when the query/queries are done. This is the default behavior, and is encouraged in all future cases, however a simple list of statements may be returned using the `simple_response` option described above. """ processor = IndraDBRestProcessor(subject, object, agents, stmt_type, use_exact_type, persist, timeout, ev_limit, best_first, tries, max_stmts) # Format the result appropriately. if simple_response: ret = processor.statements else: ret = processor return ret
0, module; 1, function_definition; 2, function_name:get_statements_by_hash; 3, parameters; 4, block; 5, identifier:hash_list; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, if_statement; 14, expression_statement; 15, return_statement; 16, identifier:ev_limit; 17, integer:100; 18, identifier:best_first; 19, True; 20, identifier:tries; 21, integer:2; 22, comment:"""Get fully formed statements from a list of hashes. Parameters ---------- hash_list : list[int or str] A list of statement hashes. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 100. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. """; 23, not_operator; 24, block; 25, not_operator; 26, block; 27, call; 28, block; 29, not_operator; 30, block; 31, assignment; 32, call; 33, call; 34, raise_statement; 35, identifier:hash_list; 36, return_statement; 37, identifier:isinstance; 38, argument_list; 39, expression_statement; 40, call; 41, raise_statement; 42, identifier:resp; 43, call; 44, identifier:stmts_from_json; 45, argument_list; 46, identifier:isinstance; 47, argument_list; 48, call; 49, list; 50, subscript; 51, identifier:str; 52, assignment; 53, identifier:all; 54, argument_list; 55, call; 56, identifier:submit_statement_request; 57, argument_list; 58, call; 59, identifier:hash_list; 60, identifier:list; 61, identifier:ValueError; 62, argument_list; 63, identifier:hash_list; 64, integer:0; 65, identifier:hash_list; 66, list_comprehension; 67, list_comprehension; 68, identifier:ValueError; 69, argument_list; 70, string; 71, string; 72, keyword_argument; 73, keyword_argument; 74, keyword_argument; 75, keyword_argument; 76, attribute; 77, argument_list; 78, binary_operator:"The `hash_list` input is a list, not %s." % type(hash_list); 79, call; 80, for_in_clause; 81, call; 82, for_in_clause; 83, concatenated_string; 84, string_content:post; 85, string_content:from_hashes; 86, identifier:ev_limit; 87, identifier:ev_limit; 88, identifier:data; 89, dictionary; 90, identifier:best_first; 91, identifier:best_first; 92, identifier:tries; 93, identifier:tries; 94, subscript; 95, identifier:values; 96, string:"The `hash_list` input is a list, not %s."; 97, call; 98, identifier:int; 99, argument_list; 100, identifier:h; 101, identifier:hash_list; 102, identifier:isinstance; 103, argument_list; 104, identifier:h; 105, identifier:hash_list; 106, string:"Hashes must be ints or strings that can be "; 107, string:"converted into ints."; 108, pair; 109, call; 110, string; 111, identifier:type; 112, argument_list; 113, identifier:h; 114, identifier:h; 115, identifier:int; 116, string; 117, identifier:hash_list; 118, attribute; 119, argument_list; 120, string_content:statements; 121, identifier:hash_list; 122, string_content:hashes; 123, identifier:resp; 124, identifier:json
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 6, 16; 6, 17; 7, 18; 7, 19; 8, 20; 8, 21; 9, 22; 10, 23; 10, 24; 11, 25; 11, 26; 12, 27; 12, 28; 13, 29; 13, 30; 14, 31; 15, 32; 23, 33; 24, 34; 25, 35; 26, 36; 27, 37; 27, 38; 28, 39; 29, 40; 30, 41; 31, 42; 31, 43; 32, 44; 32, 45; 33, 46; 33, 47; 34, 48; 36, 49; 38, 50; 38, 51; 39, 52; 40, 53; 40, 54; 41, 55; 43, 56; 43, 57; 45, 58; 47, 59; 47, 60; 48, 61; 48, 62; 50, 63; 50, 64; 52, 65; 52, 66; 54, 67; 55, 68; 55, 69; 57, 70; 57, 71; 57, 72; 57, 73; 57, 74; 57, 75; 58, 76; 58, 77; 62, 78; 66, 79; 66, 80; 67, 81; 67, 82; 69, 83; 70, 84; 71, 85; 72, 86; 72, 87; 73, 88; 73, 89; 74, 90; 74, 91; 75, 92; 75, 93; 76, 94; 76, 95; 78, 96; 78, 97; 79, 98; 79, 99; 80, 100; 80, 101; 81, 102; 81, 103; 82, 104; 82, 105; 83, 106; 83, 107; 89, 108; 94, 109; 94, 110; 97, 111; 97, 112; 99, 113; 103, 114; 103, 115; 108, 116; 108, 117; 109, 118; 109, 119; 110, 120; 112, 121; 116, 122; 118, 123; 118, 124
def get_statements_by_hash(hash_list, ev_limit=100, best_first=True, tries=2): """Get fully formed statements from a list of hashes. Parameters ---------- hash_list : list[int or str] A list of statement hashes. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 100. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. """ if not isinstance(hash_list, list): raise ValueError("The `hash_list` input is a list, not %s." % type(hash_list)) if not hash_list: return [] if isinstance(hash_list[0], str): hash_list = [int(h) for h in hash_list] if not all([isinstance(h, int) for h in hash_list]): raise ValueError("Hashes must be ints or strings that can be " "converted into ints.") resp = submit_statement_request('post', 'from_hashes', ev_limit=ev_limit, data={'hashes': hash_list}, best_first=best_first, tries=tries) return stmts_from_json(resp.json()['statements'].values())
0, module; 1, function_definition; 2, function_name:get_statements_for_paper; 3, parameters; 4, block; 5, identifier:ids; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, return_statement; 15, identifier:ev_limit; 16, integer:10; 17, identifier:best_first; 18, True; 19, identifier:tries; 20, integer:2; 21, identifier:max_stmts; 22, None; 23, comment:"""Get the set of raw Statements extracted from a paper given by the id. Parameters ---------- ids : list[(<id type>, <id value>)] A list of tuples with ids and their type. The type can be any one of 'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the primary key id of the text references in the database. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 10. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. max_stmts : int or None Select a maximum number of statements to be returned. Default is None. Returns ------- stmts : list[:py:class:`indra.statements.Statement`] A list of INDRA Statement instances. """; 24, assignment; 25, assignment; 26, assignment; 27, call; 28, identifier:id_l; 29, list_comprehension; 30, identifier:resp; 31, call; 32, identifier:stmts_json; 33, subscript; 34, identifier:stmts_from_json; 35, argument_list; 36, dictionary; 37, for_in_clause; 38, identifier:submit_statement_request; 39, argument_list; 40, call; 41, string; 42, call; 43, pair; 44, pair; 45, pattern_list; 46, identifier:ids; 47, string; 48, string; 49, keyword_argument; 50, keyword_argument; 51, keyword_argument; 52, keyword_argument; 53, keyword_argument; 54, attribute; 55, argument_list; 56, string_content:statements; 57, attribute; 58, argument_list; 59, string; 60, identifier:id_val; 61, string; 62, identifier:id_type; 63, identifier:id_type; 64, identifier:id_val; 65, string_content:post; 66, string_content:from_papers; 67, identifier:data; 68, dictionary; 69, identifier:ev_limit; 70, identifier:ev_limit; 71, identifier:best_first; 72, identifier:best_first; 73, identifier:tries; 74, identifier:tries; 75, identifier:max_stmts; 76, identifier:max_stmts; 77, identifier:resp; 78, identifier:json; 79, identifier:stmts_json; 80, identifier:values; 81, string_content:id; 82, string_content:type; 83, pair; 84, string; 85, identifier:id_l; 86, string_content:ids
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 6, 15; 6, 16; 7, 17; 7, 18; 8, 19; 8, 20; 9, 21; 9, 22; 10, 23; 11, 24; 12, 25; 13, 26; 14, 27; 24, 28; 24, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 27, 35; 29, 36; 29, 37; 31, 38; 31, 39; 33, 40; 33, 41; 35, 42; 36, 43; 36, 44; 37, 45; 37, 46; 39, 47; 39, 48; 39, 49; 39, 50; 39, 51; 39, 52; 39, 53; 40, 54; 40, 55; 41, 56; 42, 57; 42, 58; 43, 59; 43, 60; 44, 61; 44, 62; 45, 63; 45, 64; 47, 65; 48, 66; 49, 67; 49, 68; 50, 69; 50, 70; 51, 71; 51, 72; 52, 73; 52, 74; 53, 75; 53, 76; 54, 77; 54, 78; 57, 79; 57, 80; 59, 81; 61, 82; 68, 83; 83, 84; 83, 85; 84, 86
def get_statements_for_paper(ids, ev_limit=10, best_first=True, tries=2, max_stmts=None): """Get the set of raw Statements extracted from a paper given by the id. Parameters ---------- ids : list[(<id type>, <id value>)] A list of tuples with ids and their type. The type can be any one of 'pmid', 'pmcid', 'doi', 'pii', 'manuscript id', or 'trid', which is the primary key id of the text references in the database. ev_limit : int or None Limit the amount of evidence returned per Statement. Default is 10. best_first : bool If True, the preassembled statements will be sorted by the amount of evidence they have, and those with the most evidence will be prioritized. When using `max_stmts`, this means you will get the "best" statements. If False, statements will be queried in arbitrary order. tries : int > 0 Set the number of times to try the query. The database often caches results, so if a query times out the first time, trying again after a timeout will often succeed fast enough to avoid a timeout. This can also help gracefully handle an unreliable connection, if you're willing to wait. Default is 2. max_stmts : int or None Select a maximum number of statements to be returned. Default is None. Returns ------- stmts : list[:py:class:`indra.statements.Statement`] A list of INDRA Statement instances. """ id_l = [{'id': id_val, 'type': id_type} for id_type, id_val in ids] resp = submit_statement_request('post', 'from_papers', data={'ids': id_l}, ev_limit=ev_limit, best_first=best_first, tries=tries, max_stmts=max_stmts) stmts_json = resp.json()['statements'] return stmts_from_json(stmts_json.values())
0, module; 1, function_definition; 2, function_name:process_directory_statements_sorted_by_pmid; 3, parameters; 4, block; 5, identifier:directory_name; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid """; 12, assignment; 13, assignment; 14, identifier:statement; 15, call; 16, block; 17, identifier:s_dict; 18, identifier:s_dict; 19, call; 20, identifier:mp; 21, call; 22, attribute; 23, argument_list; 24, expression_statement; 25, identifier:defaultdict; 26, argument_list; 27, identifier:process_directory; 28, argument_list; 29, identifier:mp; 30, identifier:iter_statements; 31, call; 32, identifier:list; 33, identifier:directory_name; 34, keyword_argument; 35, attribute; 36, argument_list; 37, identifier:lazy; 38, True; 39, subscript; 40, identifier:append; 41, identifier:statement; 42, identifier:s_dict; 43, attribute; 44, subscript; 45, identifier:pmid; 46, attribute; 47, integer:0; 48, identifier:statement; 49, identifier:evidence
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 8, 13; 9, 14; 9, 15; 9, 16; 10, 17; 12, 18; 12, 19; 13, 20; 13, 21; 15, 22; 15, 23; 16, 24; 19, 25; 19, 26; 21, 27; 21, 28; 22, 29; 22, 30; 24, 31; 26, 32; 28, 33; 28, 34; 31, 35; 31, 36; 34, 37; 34, 38; 35, 39; 35, 40; 36, 41; 39, 42; 39, 43; 43, 44; 43, 45; 44, 46; 44, 47; 46, 48; 46, 49
def process_directory_statements_sorted_by_pmid(directory_name): """Processes a directory filled with CSXML files, first normalizing the character encoding to utf-8, and then processing into INDRA statements sorted by pmid. Parameters ---------- directory_name : str The name of a directory filled with csxml files to process Returns ------- pmid_dict : dict A dictionary mapping pmids to a list of statements corresponding to that pmid """ s_dict = defaultdict(list) mp = process_directory(directory_name, lazy=True) for statement in mp.iter_statements(): s_dict[statement.evidence[0].pmid].append(statement) return s_dict
0, module; 1, function_definition; 2, function_name:process_file_sorted_by_pmid; 3, parameters; 4, block; 5, identifier:file_name; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Processes a file and returns a dictionary mapping pmids to a list of statements corresponding to that pmid. Parameters ---------- file_name : str A csxml file to process Returns ------- s_dict : dict Dictionary mapping pmids to a list of statements corresponding to that pmid """; 12, assignment; 13, assignment; 14, identifier:statement; 15, call; 16, block; 17, identifier:s_dict; 18, identifier:s_dict; 19, call; 20, identifier:mp; 21, call; 22, attribute; 23, argument_list; 24, expression_statement; 25, identifier:defaultdict; 26, argument_list; 27, identifier:process_file; 28, argument_list; 29, identifier:mp; 30, identifier:iter_statements; 31, call; 32, identifier:list; 33, identifier:file_name; 34, keyword_argument; 35, attribute; 36, argument_list; 37, identifier:lazy; 38, True; 39, subscript; 40, identifier:append; 41, identifier:statement; 42, identifier:s_dict; 43, attribute; 44, subscript; 45, identifier:pmid; 46, attribute; 47, integer:0; 48, identifier:statement; 49, identifier:evidence
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 8, 13; 9, 14; 9, 15; 9, 16; 10, 17; 12, 18; 12, 19; 13, 20; 13, 21; 15, 22; 15, 23; 16, 24; 19, 25; 19, 26; 21, 27; 21, 28; 22, 29; 22, 30; 24, 31; 26, 32; 28, 33; 28, 34; 31, 35; 31, 36; 34, 37; 34, 38; 35, 39; 35, 40; 36, 41; 39, 42; 39, 43; 43, 44; 43, 45; 44, 46; 44, 47; 46, 48; 46, 49
def process_file_sorted_by_pmid(file_name): """Processes a file and returns a dictionary mapping pmids to a list of statements corresponding to that pmid. Parameters ---------- file_name : str A csxml file to process Returns ------- s_dict : dict Dictionary mapping pmids to a list of statements corresponding to that pmid """ s_dict = defaultdict(list) mp = process_file(file_name, lazy=True) for statement in mp.iter_statements(): s_dict[statement.evidence[0].pmid].append(statement) return s_dict
0, module; 1, function_definition; 2, function_name:flatten_stmts; 3, parameters; 4, block; 5, identifier:stmts; 6, expression_statement; 7, expression_statement; 8, for_statement; 9, return_statement; 10, comment:"""Return the full set of unique stms in a pre-assembled stmt graph. The flattened list of statements returned by this function can be compared to the original set of unique statements to make sure no statements have been lost during the preassembly process. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. Returns ------- stmts : list of :py:class:`indra.statements.Statement` List of all statements contained in the hierarchical statement graph. Examples -------- Calling :py:meth:`combine_related` on two statements results in one top-level statement; calling :py:func:`flatten_stmts` recovers both: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> flattened = flatten_stmts(pa.related_stmts) >>> flattened.sort(key=lambda x: x.matches_key()) >>> flattened [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)] """; 11, assignment; 12, identifier:stmt; 13, identifier:stmts; 14, block; 15, call; 16, identifier:total_stmts; 17, call; 18, if_statement; 19, identifier:list; 20, argument_list; 21, identifier:set; 22, argument_list; 23, attribute; 24, block; 25, identifier:total_stmts; 26, identifier:stmts; 27, identifier:stmt; 28, identifier:supported_by; 29, expression_statement; 30, expression_statement; 31, assignment; 32, assignment; 33, identifier:children; 34, call; 35, identifier:total_stmts; 36, call; 37, identifier:flatten_stmts; 38, argument_list; 39, attribute; 40, argument_list; 41, attribute; 42, identifier:total_stmts; 43, identifier:union; 44, identifier:children; 45, identifier:stmt; 46, identifier:supported_by
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 8, 12; 8, 13; 8, 14; 9, 15; 11, 16; 11, 17; 14, 18; 15, 19; 15, 20; 17, 21; 17, 22; 18, 23; 18, 24; 20, 25; 22, 26; 23, 27; 23, 28; 24, 29; 24, 30; 29, 31; 30, 32; 31, 33; 31, 34; 32, 35; 32, 36; 34, 37; 34, 38; 36, 39; 36, 40; 38, 41; 39, 42; 39, 43; 40, 44; 41, 45; 41, 46
def flatten_stmts(stmts): """Return the full set of unique stms in a pre-assembled stmt graph. The flattened list of statements returned by this function can be compared to the original set of unique statements to make sure no statements have been lost during the preassembly process. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` A list of top-level statements with associated supporting statements resulting from building a statement hierarchy with :py:meth:`combine_related`. Returns ------- stmts : list of :py:class:`indra.statements.Statement` List of all statements contained in the hierarchical statement graph. Examples -------- Calling :py:meth:`combine_related` on two statements results in one top-level statement; calling :py:func:`flatten_stmts` recovers both: >>> from indra.preassembler.hierarchy_manager import hierarchies >>> braf = Agent('BRAF') >>> map2k1 = Agent('MAP2K1') >>> st1 = Phosphorylation(braf, map2k1) >>> st2 = Phosphorylation(braf, map2k1, residue='S') >>> pa = Preassembler(hierarchies, [st1, st2]) >>> pa.combine_related() # doctest:+ELLIPSIS [Phosphorylation(BRAF(), MAP2K1(), S)] >>> flattened = flatten_stmts(pa.related_stmts) >>> flattened.sort(key=lambda x: x.matches_key()) >>> flattened [Phosphorylation(BRAF(), MAP2K1()), Phosphorylation(BRAF(), MAP2K1(), S)] """ total_stmts = set(stmts) for stmt in stmts: if stmt.supported_by: children = flatten_stmts(stmt.supported_by) total_stmts = total_stmts.union(children) return list(total_stmts)
0, module; 1, function_definition; 2, function_name:combine_duplicate_stmts; 3, parameters; 4, block; 5, identifier:stmts; 6, expression_statement; 7, comment:# Helper function to get a list of evidence matches keys; 8, function_definition; 9, comment:# Iterate over groups of duplicate statements; 10, expression_statement; 11, for_statement; 12, return_statement; 13, comment:"""Combine evidence from duplicate Statements. Statements are deemed to be duplicates if they have the same key returned by the `matches_key()` method of the Statement class. This generally means that statements must be identical in terms of their arguments and can differ only in their associated `Evidence` objects. This function keeps the first instance of each set of duplicate statements and merges the lists of Evidence from all of the other statements. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Set of statements to de-duplicate. Returns ------- list of :py:class:`indra.statements.Statement` Unique statements with accumulated evidence across duplicates. Examples -------- De-duplicate and combine evidence for two statements differing only in their evidence lists: >>> map2k1 = Agent('MAP2K1') >>> mapk1 = Agent('MAPK1') >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 1')]) >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 2')]) >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2]) >>> uniq_stmts [Phosphorylation(MAP2K1(), MAPK1(), T, 185)] >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE ['evidence 1', 'evidence 2'] """; 14, function_name:_ev_keys; 15, parameters; 16, block; 17, assignment; 18, pattern_list; 19, call; 20, block; 21, identifier:unique_stmts; 22, identifier:sts; 23, expression_statement; 24, for_statement; 25, return_statement; 26, identifier:unique_stmts; 27, list; 28, identifier:_; 29, identifier:duplicates; 30, attribute; 31, argument_list; 32, expression_statement; 33, comment:# Get the first statement and add the evidence of all subsequent; 34, comment:# Statements to it; 35, expression_statement; 36, expression_statement; 37, for_statement; 38, expression_statement; 39, if_statement; 40, comment:# This should never be None or anything else; 41, assert_statement; 42, expression_statement; 43, assignment; 44, identifier:stmt; 45, identifier:sts; 46, block; 47, identifier:ev_keys; 48, identifier:Preassembler; 49, identifier:_get_stmt_matching_groups; 50, identifier:stmts; 51, assignment; 52, assignment; 53, assignment; 54, pattern_list; 55, call; 56, block; 57, assignment; 58, comparison_operator:len(end_ev_keys) != len(start_ev_keys); 59, block; 60, call; 61, call; 62, identifier:ev_keys; 63, list; 64, for_statement; 65, identifier:ev_keys; 66, call; 67, identifier:duplicates; 68, call; 69, identifier:start_ev_keys; 70, call; 71, identifier:stmt_ix; 72, identifier:stmt; 73, identifier:enumerate; 74, argument_list; 75, if_statement; 76, if_statement; 77, expression_statement; 78, expression_statement; 79, for_statement; 80, identifier:end_ev_keys; 81, call; 82, call; 83, call; 84, expression_statement; 85, identifier:isinstance; 86, argument_list; 87, attribute; 88, argument_list; 89, identifier:ev; 90, attribute; 91, block; 92, identifier:set; 93, argument_list; 94, identifier:list; 95, argument_list; 96, identifier:_ev_keys; 97, argument_list; 98, identifier:duplicates; 99, comparison_operator:stmt_ix is 0; 100, block; 101, comparison_operator:len(duplicates) == 1; 102, block; 103, assignment; 104, assignment; 105, identifier:ev; 106, attribute; 107, block; 108, identifier:_ev_keys; 109, argument_list; 110, identifier:len; 111, argument_list; 112, identifier:len; 113, argument_list; 114, call; 115, identifier:new_stmt; 116, identifier:Statement; 117, identifier:unique_stmts; 118, identifier:append; 119, identifier:new_stmt; 120, identifier:stmt; 121, identifier:evidence; 122, expression_statement; 123, identifier:duplicates; 124, identifier:duplicates; 125, identifier:stmt_ix; 126, integer:0; 127, expression_statement; 128, call; 129, integer:1; 130, expression_statement; 131, identifier:raw_text; 132, list_comprehension; 133, identifier:raw_grounding; 134, list_comprehension; 135, identifier:stmt; 136, identifier:evidence; 137, expression_statement; 138, if_statement; 139, list; 140, identifier:end_ev_keys; 141, identifier:start_ev_keys; 142, attribute; 143, argument_list; 144, call; 145, assignment; 146, identifier:len; 147, argument_list; 148, assignment; 149, conditional_expression:None if ag is None else ag.db_refs.get('TEXT'); 150, for_in_clause; 151, conditional_expression:None if ag is None else ag.db_refs; 152, for_in_clause; 153, assignment; 154, comparison_operator:ev_key not in ev_keys; 155, comment:# In case there are already agents annotations, we; 156, comment:# just add a new key for raw_text, otherwise create; 157, comment:# a new key; 158, block; 159, identifier:new_stmt; 160, identifier:logger; 161, identifier:debug; 162, binary_operator:'%d redundant evidences eliminated.' % (len(start_ev_keys) - len(end_ev_keys)); 163, attribute; 164, argument_list; 165, identifier:new_stmt; 166, call; 167, identifier:duplicates; 168, attribute; 169, attribute; 170, None; 171, comparison_operator:ag is None; 172, call; 173, identifier:ag; 174, call; 175, None; 176, comparison_operator:ag is None; 177, attribute; 178, identifier:ag; 179, call; 180, identifier:ev_key; 181, binary_operator:ev.matches_key() + str(raw_text) + \ str(raw_grounding); 182, identifier:ev_key; 183, identifier:ev_keys; 184, if_statement; 185, if_statement; 186, expression_statement; 187, expression_statement; 188, expression_statement; 189, string; 190, parenthesized_expression; 191, identifier:ev_keys; 192, identifier:append; 193, call; 194, attribute; 195, argument_list; 196, identifier:new_stmt; 197, identifier:uuid; 198, identifier:stmt; 199, identifier:uuid; 200, identifier:ag; 201, None; 202, attribute; 203, argument_list; 204, attribute; 205, argument_list; 206, identifier:ag; 207, None; 208, identifier:ag; 209, identifier:db_refs; 210, attribute; 211, argument_list; 212, binary_operator:ev.matches_key() + str(raw_text); 213, line_continuation:\; 214, call; 215, comparison_operator:'agents' in ev.annotations; 216, block; 217, else_clause; 218, comparison_operator:'prior_uuids' not in ev.annotations; 219, block; 220, call; 221, call; 222, call; 223, string_content:%d redundant evidences eliminated.; 224, binary_operator:len(start_ev_keys) - len(end_ev_keys); 225, attribute; 226, argument_list; 227, identifier:stmt; 228, identifier:make_generic_copy; 229, attribute; 230, identifier:get; 231, string; 232, identifier:stmt; 233, identifier:agent_list; 234, keyword_argument; 235, identifier:stmt; 236, identifier:agent_list; 237, keyword_argument; 238, call; 239, call; 240, identifier:str; 241, argument_list; 242, string; 243, attribute; 244, expression_statement; 245, expression_statement; 246, block; 247, string; 248, attribute; 249, expression_statement; 250, attribute; 251, argument_list; 252, attribute; 253, argument_list; 254, attribute; 255, argument_list; 256, call; 257, call; 258, identifier:ev; 259, identifier:matches_key; 260, identifier:ag; 261, identifier:db_refs; 262, string_content:TEXT; 263, identifier:deep_sorted; 264, True; 265, identifier:deep_sorted; 266, True; 267, attribute; 268, argument_list; 269, identifier:str; 270, argument_list; 271, identifier:raw_grounding; 272, string_content:agents; 273, identifier:ev; 274, identifier:annotations; 275, assignment; 276, assignment; 277, expression_statement; 278, string_content:prior_uuids; 279, identifier:ev; 280, identifier:annotations; 281, assignment; 282, subscript; 283, identifier:append; 284, attribute; 285, attribute; 286, identifier:append; 287, identifier:ev; 288, identifier:ev_keys; 289, identifier:add; 290, identifier:ev_key; 291, identifier:len; 292, argument_list; 293, identifier:len; 294, argument_list; 295, identifier:ev; 296, identifier:matches_key; 297, identifier:raw_text; 298, subscript; 299, identifier:raw_text; 300, subscript; 301, line_continuation:\; 302, identifier:raw_grounding; 303, assignment; 304, subscript; 305, list; 306, attribute; 307, string; 308, identifier:stmt; 309, identifier:uuid; 310, identifier:new_stmt; 311, identifier:evidence; 312, identifier:start_ev_keys; 313, identifier:end_ev_keys; 314, subscript; 315, string; 316, subscript; 317, string; 318, subscript; 319, line_continuation:\; 320, dictionary; 321, attribute; 322, string; 323, identifier:ev; 324, identifier:annotations; 325, string_content:prior_uuids; 326, attribute; 327, string; 328, string_content:raw_text; 329, attribute; 330, string; 331, string_content:raw_grounding; 332, attribute; 333, string; 334, pair; 335, pair; 336, identifier:ev; 337, identifier:annotations; 338, string_content:prior_uuids; 339, identifier:ev; 340, identifier:annotations; 341, string_content:agents; 342, identifier:ev; 343, identifier:annotations; 344, string_content:agents; 345, identifier:ev; 346, identifier:annotations; 347, string_content:agents; 348, string; 349, identifier:raw_text; 350, string; 351, identifier:raw_grounding; 352, string_content:raw_text; 353, string_content:raw_grounding
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 8, 14; 8, 15; 8, 16; 10, 17; 11, 18; 11, 19; 11, 20; 12, 21; 15, 22; 16, 23; 16, 24; 16, 25; 17, 26; 17, 27; 18, 28; 18, 29; 19, 30; 19, 31; 20, 32; 20, 33; 20, 34; 20, 35; 20, 36; 20, 37; 20, 38; 20, 39; 20, 40; 20, 41; 20, 42; 23, 43; 24, 44; 24, 45; 24, 46; 25, 47; 30, 48; 30, 49; 31, 50; 32, 51; 35, 52; 36, 53; 37, 54; 37, 55; 37, 56; 38, 57; 39, 58; 39, 59; 41, 60; 42, 61; 43, 62; 43, 63; 46, 64; 51, 65; 51, 66; 52, 67; 52, 68; 53, 69; 53, 70; 54, 71; 54, 72; 55, 73; 55, 74; 56, 75; 56, 76; 56, 77; 56, 78; 56, 79; 57, 80; 57, 81; 58, 82; 58, 83; 59, 84; 60, 85; 60, 86; 61, 87; 61, 88; 64, 89; 64, 90; 64, 91; 66, 92; 66, 93; 68, 94; 68, 95; 70, 96; 70, 97; 74, 98; 75, 99; 75, 100; 76, 101; 76, 102; 77, 103; 78, 104; 79, 105; 79, 106; 79, 107; 81, 108; 81, 109; 82, 110; 82, 111; 83, 112; 83, 113; 84, 114; 86, 115; 86, 116; 87, 117; 87, 118; 88, 119; 90, 120; 90, 121; 91, 122; 95, 123; 97, 124; 99, 125; 99, 126; 100, 127; 101, 128; 101, 129; 102, 130; 103, 131; 103, 132; 104, 133; 104, 134; 106, 135; 106, 136; 107, 137; 107, 138; 109, 139; 111, 140; 113, 141; 114, 142; 114, 143; 122, 144; 127, 145; 128, 146; 128, 147; 130, 148; 132, 149; 132, 150; 134, 151; 134, 152; 137, 153; 138, 154; 138, 155; 138, 156; 138, 157; 138, 158; 139, 159; 142, 160; 142, 161; 143, 162; 144, 163; 144, 164; 145, 165; 145, 166; 147, 167; 148, 168; 148, 169; 149, 170; 149, 171; 149, 172; 150, 173; 150, 174; 151, 175; 151, 176; 151, 177; 152, 178; 152, 179; 153, 180; 153, 181; 154, 182; 154, 183; 158, 184; 158, 185; 158, 186; 158, 187; 158, 188; 162, 189; 162, 190; 163, 191; 163, 192; 164, 193; 166, 194; 166, 195; 168, 196; 168, 197; 169, 198; 169, 199; 171, 200; 171, 201; 172, 202; 172, 203; 174, 204; 174, 205; 176, 206; 176, 207; 177, 208; 177, 209; 179, 210; 179, 211; 181, 212; 181, 213; 181, 214; 184, 215; 184, 216; 184, 217; 185, 218; 185, 219; 186, 220; 187, 221; 188, 222; 189, 223; 190, 224; 193, 225; 193, 226; 194, 227; 194, 228; 202, 229; 202, 230; 203, 231; 204, 232; 204, 233; 205, 234; 210, 235; 210, 236; 211, 237; 212, 238; 212, 239; 214, 240; 214, 241; 215, 242; 215, 243; 216, 244; 216, 245; 217, 246; 218, 247; 218, 248; 219, 249; 220, 250; 220, 251; 221, 252; 221, 253; 222, 254; 222, 255; 224, 256; 224, 257; 225, 258; 225, 259; 229, 260; 229, 261; 231, 262; 234, 263; 234, 264; 237, 265; 237, 266; 238, 267; 238, 268; 239, 269; 239, 270; 241, 271; 242, 272; 243, 273; 243, 274; 244, 275; 245, 276; 246, 277; 247, 278; 248, 279; 248, 280; 249, 281; 250, 282; 250, 283; 251, 284; 252, 285; 252, 286; 253, 287; 254, 288; 254, 289; 255, 290; 256, 291; 256, 292; 257, 293; 257, 294; 267, 295; 267, 296; 270, 297; 275, 298; 275, 299; 276, 300; 276, 301; 276, 302; 277, 303; 281, 304; 281, 305; 282, 306; 282, 307; 284, 308; 284, 309; 285, 310; 285, 311; 292, 312; 294, 313; 298, 314; 298, 315; 300, 316; 300, 317; 303, 318; 303, 319; 303, 320; 304, 321; 304, 322; 306, 323; 306, 324; 307, 325; 314, 326; 314, 327; 315, 328; 316, 329; 316, 330; 317, 331; 318, 332; 318, 333; 320, 334; 320, 335; 321, 336; 321, 337; 322, 338; 326, 339; 326, 340; 327, 341; 329, 342; 329, 343; 330, 344; 332, 345; 332, 346; 333, 347; 334, 348; 334, 349; 335, 350; 335, 351; 348, 352; 350, 353
def combine_duplicate_stmts(stmts): """Combine evidence from duplicate Statements. Statements are deemed to be duplicates if they have the same key returned by the `matches_key()` method of the Statement class. This generally means that statements must be identical in terms of their arguments and can differ only in their associated `Evidence` objects. This function keeps the first instance of each set of duplicate statements and merges the lists of Evidence from all of the other statements. Parameters ---------- stmts : list of :py:class:`indra.statements.Statement` Set of statements to de-duplicate. Returns ------- list of :py:class:`indra.statements.Statement` Unique statements with accumulated evidence across duplicates. Examples -------- De-duplicate and combine evidence for two statements differing only in their evidence lists: >>> map2k1 = Agent('MAP2K1') >>> mapk1 = Agent('MAPK1') >>> stmt1 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 1')]) >>> stmt2 = Phosphorylation(map2k1, mapk1, 'T', '185', ... evidence=[Evidence(text='evidence 2')]) >>> uniq_stmts = Preassembler.combine_duplicate_stmts([stmt1, stmt2]) >>> uniq_stmts [Phosphorylation(MAP2K1(), MAPK1(), T, 185)] >>> sorted([e.text for e in uniq_stmts[0].evidence]) # doctest:+IGNORE_UNICODE ['evidence 1', 'evidence 2'] """ # Helper function to get a list of evidence matches keys def _ev_keys(sts): ev_keys = [] for stmt in sts: for ev in stmt.evidence: ev_keys.append(ev.matches_key()) return ev_keys # Iterate over groups of duplicate statements unique_stmts = [] for _, duplicates in Preassembler._get_stmt_matching_groups(stmts): ev_keys = set() # Get the first statement and add the evidence of all subsequent # Statements to it duplicates = list(duplicates) start_ev_keys = _ev_keys(duplicates) for stmt_ix, stmt in enumerate(duplicates): if stmt_ix is 0: new_stmt = stmt.make_generic_copy() if len(duplicates) == 1: new_stmt.uuid = stmt.uuid raw_text = [None if ag is None else ag.db_refs.get('TEXT') for ag in stmt.agent_list(deep_sorted=True)] raw_grounding = [None if ag is None else ag.db_refs for ag in stmt.agent_list(deep_sorted=True)] for ev in stmt.evidence: ev_key = ev.matches_key() + str(raw_text) + \ str(raw_grounding) if ev_key not in ev_keys: # In case there are already agents annotations, we # just add a new key for raw_text, otherwise create # a new key if 'agents' in ev.annotations: ev.annotations['agents']['raw_text'] = raw_text ev.annotations['agents']['raw_grounding'] = \ raw_grounding else: ev.annotations['agents'] = \ {'raw_text': raw_text, 'raw_grounding': raw_grounding} if 'prior_uuids' not in ev.annotations: ev.annotations['prior_uuids'] = [] ev.annotations['prior_uuids'].append(stmt.uuid) new_stmt.evidence.append(ev) ev_keys.add(ev_key) end_ev_keys = _ev_keys([new_stmt]) if len(end_ev_keys) != len(start_ev_keys): logger.debug('%d redundant evidences eliminated.' % (len(start_ev_keys) - len(end_ev_keys))) # This should never be None or anything else assert isinstance(new_stmt, Statement) unique_stmts.append(new_stmt) return unique_stmts
0, module; 1, function_definition; 2, function_name:sort_data; 3, parameters; 4, block; 5, identifier:data; 6, identifier:cols; 7, expression_statement; 8, return_statement; 9, comment:"""Sort `data` rows and order columns"""; 10, call; 11, attribute; 12, argument_list; 13, subscript; 14, identifier:reset_index; 15, keyword_argument; 16, call; 17, binary_operator:cols + ['value']; 18, identifier:drop; 19, True; 20, attribute; 21, argument_list; 22, identifier:cols; 23, list; 24, identifier:data; 25, identifier:sort_values; 26, identifier:cols; 27, string; 28, string_content:value
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 7, 9; 8, 10; 10, 11; 10, 12; 11, 13; 11, 14; 12, 15; 13, 16; 13, 17; 15, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 20, 24; 20, 25; 21, 26; 23, 27; 27, 28
def sort_data(data, cols): """Sort `data` rows and order columns""" return data.sort_values(cols)[cols + ['value']].reset_index(drop=True)
0, module; 1, function_definition; 2, function_name:check_internal_consistency; 3, parameters; 4, block; 5, identifier:self; 6, dictionary_splat_pattern; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, identifier:kwargs; 12, comment:"""Check whether the database is internally consistent We check that all variables are equal to the sum of their sectoral components and that all the regions add up to the World total. If the check is passed, None is returned, otherwise a dictionary of inconsistent variables is returned. Note: at the moment, this method's regional checking is limited to checking that all the regions sum to the World region. We cannot make this more automatic unless we start to store how the regions relate, see [this issue](https://github.com/IAMconsortium/pyam/issues/106). Parameters ---------- kwargs: passed to `np.isclose()` """; 13, assignment; 14, identifier:variable; 15, call; 16, block; 17, conditional_expression:inconsistent_vars if inconsistent_vars else None; 18, identifier:inconsistent_vars; 19, dictionary; 20, attribute; 21, argument_list; 22, expression_statement; 23, if_statement; 24, expression_statement; 25, if_statement; 26, identifier:inconsistent_vars; 27, identifier:inconsistent_vars; 28, None; 29, identifier:self; 30, identifier:variables; 31, assignment; 32, comparison_operator:diff_agg is not None; 33, block; 34, assignment; 35, comparison_operator:diff_regional is not None; 36, block; 37, identifier:diff_agg; 38, call; 39, identifier:diff_agg; 40, None; 41, expression_statement; 42, identifier:diff_regional; 43, call; 44, identifier:diff_regional; 45, None; 46, expression_statement; 47, attribute; 48, argument_list; 49, assignment; 50, attribute; 51, argument_list; 52, assignment; 53, identifier:self; 54, identifier:check_aggregate; 55, identifier:variable; 56, dictionary_splat; 57, subscript; 58, identifier:diff_agg; 59, identifier:self; 60, identifier:check_aggregate_region; 61, identifier:variable; 62, dictionary_splat; 63, subscript; 64, identifier:diff_regional; 65, identifier:kwargs; 66, identifier:inconsistent_vars; 67, binary_operator:variable + "-aggregate"; 68, identifier:kwargs; 69, identifier:inconsistent_vars; 70, binary_operator:variable + "-regional"; 71, identifier:variable; 72, string:"-aggregate"; 73, identifier:variable; 74, string:"-regional"
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 8, 13; 9, 14; 9, 15; 9, 16; 10, 17; 13, 18; 13, 19; 15, 20; 15, 21; 16, 22; 16, 23; 16, 24; 16, 25; 17, 26; 17, 27; 17, 28; 20, 29; 20, 30; 22, 31; 23, 32; 23, 33; 24, 34; 25, 35; 25, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 34, 42; 34, 43; 35, 44; 35, 45; 36, 46; 38, 47; 38, 48; 41, 49; 43, 50; 43, 51; 46, 52; 47, 53; 47, 54; 48, 55; 48, 56; 49, 57; 49, 58; 50, 59; 50, 60; 51, 61; 51, 62; 52, 63; 52, 64; 56, 65; 57, 66; 57, 67; 62, 68; 63, 69; 63, 70; 67, 71; 67, 72; 70, 73; 70, 74
def check_internal_consistency(self, **kwargs): """Check whether the database is internally consistent We check that all variables are equal to the sum of their sectoral components and that all the regions add up to the World total. If the check is passed, None is returned, otherwise a dictionary of inconsistent variables is returned. Note: at the moment, this method's regional checking is limited to checking that all the regions sum to the World region. We cannot make this more automatic unless we start to store how the regions relate, see [this issue](https://github.com/IAMconsortium/pyam/issues/106). Parameters ---------- kwargs: passed to `np.isclose()` """ inconsistent_vars = {} for variable in self.variables(): diff_agg = self.check_aggregate(variable, **kwargs) if diff_agg is not None: inconsistent_vars[variable + "-aggregate"] = diff_agg diff_regional = self.check_aggregate_region(variable, **kwargs) if diff_regional is not None: inconsistent_vars[variable + "-regional"] = diff_regional return inconsistent_vars if inconsistent_vars else None
0, module; 1, function_definition; 2, function_name:tag_create; 3, parameters; 4, block; 5, identifier:self; 6, identifier:label; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, expression_statement; 14, comment:# filter input into lists of ids; 15, expression_statement; 16, for_statement; 17, comment:# filter entities into id lists too; 18, expression_statement; 19, for_statement; 20, comment:# finally, omit all id lists that are empty; 21, expression_statement; 22, expression_statement; 23, if_statement; 24, expression_statement; 25, return_statement; 26, identifier:instances; 27, None; 28, identifier:domains; 29, None; 30, identifier:nodebalancers; 31, None; 32, identifier:volumes; 33, None; 34, identifier:entities; 35, list; 36, comment:""" Creates a new Tag and optionally applies it to the given entities. :param label: The label for the new Tag :type label: str :param entities: A list of objects to apply this Tag to upon creation. May only be taggable types (Linode Instances, Domains, NodeBalancers, or Volumes). These are applied *in addition to* any IDs specified with ``instances``, ``domains``, ``nodebalancers``, or ``volumes``, and is a convenience for sending multiple entity types without sorting them yourself. :type entities: list of Instance, Domain, NodeBalancer, and/or Volume :param instances: A list of Linode Instances to apply this Tag to upon creation :type instances: list of Instance or list of int :param domains: A list of Domains to apply this Tag to upon creation :type domains: list of Domain or list of int :param nodebalancers: A list of NodeBalancers to apply this Tag to upon creation :type nodebalancers: list of NodeBalancer or list of int :param volumes: A list of Volumes to apply this Tag to upon creation :type volumes: list of Volumes or list of int :returns: The new Tag :rtype: Tag """; 37, assignment; 38, assignment; 39, pattern_list; 40, identifier:sorter; 41, comment:# if we got something, we need to find its ID; 42, block; 43, assignment; 44, identifier:e; 45, identifier:entities; 46, block; 47, assignment; 48, assignment; 49, not_operator; 50, block; 51, assignment; 52, identifier:t; 53, pattern_list; 54, expression_list; 55, identifier:sorter; 56, call; 57, identifier:id_list; 58, identifier:input_list; 59, if_statement; 60, identifier:type_map; 61, dictionary; 62, if_statement; 63, identifier:params; 64, dictionary; 65, identifier:result; 66, call; 67, comparison_operator:'label' in result; 68, raise_statement; 69, identifier:t; 70, call; 71, identifier:linode_ids; 72, identifier:nodebalancer_ids; 73, identifier:domain_ids; 74, identifier:volume_ids; 75, list; 76, list; 77, list; 78, list; 79, identifier:zip; 80, argument_list; 81, comparison_operator:input_list is not None; 82, block; 83, pair; 84, pair; 85, pair; 86, pair; 87, comparison_operator:type(e) in type_map; 88, block; 89, else_clause; 90, pair; 91, pair; 92, pair; 93, pair; 94, pair; 95, attribute; 96, argument_list; 97, string; 98, identifier:result; 99, call; 100, identifier:Tag; 101, argument_list; 102, tuple; 103, tuple; 104, identifier:input_list; 105, None; 106, for_statement; 107, identifier:Instance; 108, identifier:linode_ids; 109, identifier:NodeBalancer; 110, identifier:nodebalancer_ids; 111, identifier:Domain; 112, identifier:domain_ids; 113, identifier:Volume; 114, identifier:volume_ids; 115, call; 116, identifier:type_map; 117, expression_statement; 118, block; 119, string; 120, identifier:label; 121, string; 122, boolean_operator; 123, string; 124, boolean_operator; 125, string; 126, boolean_operator; 127, string; 128, boolean_operator; 129, identifier:self; 130, identifier:post; 131, string; 132, keyword_argument; 133, string_content:label; 134, identifier:UnexpectedResponseError; 135, argument_list; 136, identifier:self; 137, subscript; 138, identifier:result; 139, identifier:linode_ids; 140, identifier:nodebalancer_ids; 141, identifier:domain_ids; 142, identifier:volume_ids; 143, identifier:instances; 144, identifier:nodebalancers; 145, identifier:domains; 146, identifier:volumes; 147, identifier:cur; 148, identifier:input_list; 149, block; 150, identifier:type; 151, argument_list; 152, call; 153, raise_statement; 154, string_content:label; 155, string_content:linodes; 156, identifier:linode_ids; 157, None; 158, string_content:nodebalancers; 159, identifier:nodebalancer_ids; 160, None; 161, string_content:domains; 162, identifier:domain_ids; 163, None; 164, string_content:volumes; 165, identifier:volume_ids; 166, None; 167, string_content:/tags; 168, identifier:data; 169, identifier:params; 170, string; 171, keyword_argument; 172, identifier:result; 173, string; 174, if_statement; 175, identifier:e; 176, attribute; 177, argument_list; 178, call; 179, string_content:Unexpected response when creating Tag!; 180, identifier:json; 181, identifier:result; 182, string_content:label; 183, call; 184, block; 185, else_clause; 186, subscript; 187, identifier:append; 188, attribute; 189, identifier:ValueError; 190, argument_list; 191, identifier:isinstance; 192, argument_list; 193, expression_statement; 194, block; 195, identifier:type_map; 196, call; 197, identifier:e; 198, identifier:id; 199, call; 200, identifier:cur; 201, identifier:int; 202, call; 203, expression_statement; 204, identifier:type; 205, argument_list; 206, attribute; 207, argument_list; 208, attribute; 209, argument_list; 210, call; 211, identifier:e; 212, string; 213, identifier:format; 214, call; 215, identifier:id_list; 216, identifier:append; 217, identifier:cur; 218, attribute; 219, argument_list; 220, string_content:Unsupported entity type {}; 221, identifier:type; 222, argument_list; 223, identifier:id_list; 224, identifier:append; 225, attribute; 226, identifier:e; 227, identifier:cur; 228, identifier:id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 7, 26; 7, 27; 8, 28; 8, 29; 9, 30; 9, 31; 10, 32; 10, 33; 11, 34; 11, 35; 12, 36; 13, 37; 15, 38; 16, 39; 16, 40; 16, 41; 16, 42; 18, 43; 19, 44; 19, 45; 19, 46; 21, 47; 22, 48; 23, 49; 23, 50; 24, 51; 25, 52; 37, 53; 37, 54; 38, 55; 38, 56; 39, 57; 39, 58; 42, 59; 43, 60; 43, 61; 46, 62; 47, 63; 47, 64; 48, 65; 48, 66; 49, 67; 50, 68; 51, 69; 51, 70; 53, 71; 53, 72; 53, 73; 53, 74; 54, 75; 54, 76; 54, 77; 54, 78; 56, 79; 56, 80; 59, 81; 59, 82; 61, 83; 61, 84; 61, 85; 61, 86; 62, 87; 62, 88; 62, 89; 64, 90; 64, 91; 64, 92; 64, 93; 64, 94; 66, 95; 66, 96; 67, 97; 67, 98; 68, 99; 70, 100; 70, 101; 80, 102; 80, 103; 81, 104; 81, 105; 82, 106; 83, 107; 83, 108; 84, 109; 84, 110; 85, 111; 85, 112; 86, 113; 86, 114; 87, 115; 87, 116; 88, 117; 89, 118; 90, 119; 90, 120; 91, 121; 91, 122; 92, 123; 92, 124; 93, 125; 93, 126; 94, 127; 94, 128; 95, 129; 95, 130; 96, 131; 96, 132; 97, 133; 99, 134; 99, 135; 101, 136; 101, 137; 101, 138; 102, 139; 102, 140; 102, 141; 102, 142; 103, 143; 103, 144; 103, 145; 103, 146; 106, 147; 106, 148; 106, 149; 115, 150; 115, 151; 117, 152; 118, 153; 119, 154; 121, 155; 122, 156; 122, 157; 123, 158; 124, 159; 124, 160; 125, 161; 126, 162; 126, 163; 127, 164; 128, 165; 128, 166; 131, 167; 132, 168; 132, 169; 135, 170; 135, 171; 137, 172; 137, 173; 149, 174; 151, 175; 152, 176; 152, 177; 153, 178; 170, 179; 171, 180; 171, 181; 173, 182; 174, 183; 174, 184; 174, 185; 176, 186; 176, 187; 177, 188; 178, 189; 178, 190; 183, 191; 183, 192; 184, 193; 185, 194; 186, 195; 186, 196; 188, 197; 188, 198; 190, 199; 192, 200; 192, 201; 193, 202; 194, 203; 196, 204; 196, 205; 199, 206; 199, 207; 202, 208; 202, 209; 203, 210; 205, 211; 206, 212; 206, 213; 207, 214; 208, 215; 208, 216; 209, 217; 210, 218; 210, 219; 212, 220; 214, 221; 214, 222; 218, 223; 218, 224; 219, 225; 222, 226; 225, 227; 225, 228
def tag_create(self, label, instances=None, domains=None, nodebalancers=None, volumes=None, entities=[]): """ Creates a new Tag and optionally applies it to the given entities. :param label: The label for the new Tag :type label: str :param entities: A list of objects to apply this Tag to upon creation. May only be taggable types (Linode Instances, Domains, NodeBalancers, or Volumes). These are applied *in addition to* any IDs specified with ``instances``, ``domains``, ``nodebalancers``, or ``volumes``, and is a convenience for sending multiple entity types without sorting them yourself. :type entities: list of Instance, Domain, NodeBalancer, and/or Volume :param instances: A list of Linode Instances to apply this Tag to upon creation :type instances: list of Instance or list of int :param domains: A list of Domains to apply this Tag to upon creation :type domains: list of Domain or list of int :param nodebalancers: A list of NodeBalancers to apply this Tag to upon creation :type nodebalancers: list of NodeBalancer or list of int :param volumes: A list of Volumes to apply this Tag to upon creation :type volumes: list of Volumes or list of int :returns: The new Tag :rtype: Tag """ linode_ids, nodebalancer_ids, domain_ids, volume_ids = [], [], [], [] # filter input into lists of ids sorter = zip((linode_ids, nodebalancer_ids, domain_ids, volume_ids), (instances, nodebalancers, domains, volumes)) for id_list, input_list in sorter: # if we got something, we need to find its ID if input_list is not None: for cur in input_list: if isinstance(cur, int): id_list.append(cur) else: id_list.append(cur.id) # filter entities into id lists too type_map = { Instance: linode_ids, NodeBalancer: nodebalancer_ids, Domain: domain_ids, Volume: volume_ids, } for e in entities: if type(e) in type_map: type_map[type(e)].append(e.id) else: raise ValueError('Unsupported entity type {}'.format(type(e))) # finally, omit all id lists that are empty params = { 'label': label, 'linodes': linode_ids or None, 'nodebalancers': nodebalancer_ids or None, 'domains': domain_ids or None, 'volumes': volume_ids or None, } result = self.post('/tags', data=params) if not 'label' in result: raise UnexpectedResponseError('Unexpected response when creating Tag!', json=result) t = Tag(self, result['label'], result) return t
0, module; 1, function_definition; 2, function_name:insert_graph; 3, parameters; 4, block; 5, identifier:cur; 6, identifier:nodelist; 7, identifier:edgelist; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, if_statement; 13, if_statement; 14, expression_statement; 15, expression_statement; 16, identifier:encoded_data; 17, None; 18, comment:"""Insert a graph into the cache. A graph is stored by number of nodes, number of edges and a json-encoded list of edges. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. nodelist (list): The nodes in the graph. edgelist (list): The edges in the graph. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times. Notes: This function assumes that the nodes are index-labeled and range from 0 to num_nodes - 1. In order to minimize the total size of the cache, it is a good idea to sort the nodelist and edgelist before inserting. Examples: >>> nodelist = [0, 1, 2] >>> edgelist = [(0, 1), (1, 2)] >>> with pmc.cache_connect(':memory:') as cur: ... pmc.insert_graph(cur, nodelist, edgelist) >>> nodelist = [0, 1, 2] >>> edgelist = [(0, 1), (1, 2)] >>> encoded_data = {} >>> with pmc.cache_connect(':memory:') as cur: ... pmc.insert_graph(cur, nodelist, edgelist, encoded_data) >>> encoded_data['num_nodes'] 3 >>> encoded_data['num_edges'] 2 >>> encoded_data['edges'] '[[0,1],[1,2]]' """; 19, comparison_operator:encoded_data is None; 20, block; 21, comparison_operator:'num_nodes' not in encoded_data; 22, block; 23, comparison_operator:'num_edges' not in encoded_data; 24, block; 25, comparison_operator:'edges' not in encoded_data; 26, block; 27, assignment; 28, call; 29, identifier:encoded_data; 30, None; 31, expression_statement; 32, string; 33, identifier:encoded_data; 34, expression_statement; 35, string; 36, identifier:encoded_data; 37, expression_statement; 38, string; 39, identifier:encoded_data; 40, expression_statement; 41, identifier:insert; 42, comment:""" INSERT OR IGNORE INTO graph(num_nodes, num_edges, edges) VALUES (:num_nodes, :num_edges, :edges); """; 43, attribute; 44, argument_list; 45, assignment; 46, string_content:num_nodes; 47, assignment; 48, string_content:num_edges; 49, assignment; 50, string_content:edges; 51, assignment; 52, identifier:cur; 53, identifier:execute; 54, identifier:insert; 55, identifier:encoded_data; 56, identifier:encoded_data; 57, dictionary; 58, subscript; 59, call; 60, subscript; 61, call; 62, subscript; 63, call; 64, identifier:encoded_data; 65, string; 66, identifier:len; 67, argument_list; 68, identifier:encoded_data; 69, string; 70, identifier:len; 71, argument_list; 72, identifier:encoded_data; 73, string; 74, attribute; 75, argument_list; 76, string_content:num_nodes; 77, identifier:nodelist; 78, string_content:num_edges; 79, identifier:edgelist; 80, string_content:edges; 81, identifier:json; 82, identifier:dumps; 83, identifier:edgelist; 84, keyword_argument; 85, identifier:separators; 86, tuple; 87, string; 88, string; 89, string_content:,; 90, string_content::
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 8, 16; 8, 17; 9, 18; 10, 19; 10, 20; 11, 21; 11, 22; 12, 23; 12, 24; 13, 25; 13, 26; 14, 27; 15, 28; 19, 29; 19, 30; 20, 31; 21, 32; 21, 33; 22, 34; 23, 35; 23, 36; 24, 37; 25, 38; 25, 39; 26, 40; 27, 41; 27, 42; 28, 43; 28, 44; 31, 45; 32, 46; 34, 47; 35, 48; 37, 49; 38, 50; 40, 51; 43, 52; 43, 53; 44, 54; 44, 55; 45, 56; 45, 57; 47, 58; 47, 59; 49, 60; 49, 61; 51, 62; 51, 63; 58, 64; 58, 65; 59, 66; 59, 67; 60, 68; 60, 69; 61, 70; 61, 71; 62, 72; 62, 73; 63, 74; 63, 75; 65, 76; 67, 77; 69, 78; 71, 79; 73, 80; 74, 81; 74, 82; 75, 83; 75, 84; 84, 85; 84, 86; 86, 87; 86, 88; 87, 89; 88, 90
def insert_graph(cur, nodelist, edgelist, encoded_data=None): """Insert a graph into the cache. A graph is stored by number of nodes, number of edges and a json-encoded list of edges. Args: cur (:class:`sqlite3.Cursor`): An sqlite3 cursor. This function is meant to be run within a :obj:`with` statement. nodelist (list): The nodes in the graph. edgelist (list): The edges in the graph. encoded_data (dict, optional): If a dictionary is provided, it will be populated with the serialized data. This is useful for preventing encoding the same information many times. Notes: This function assumes that the nodes are index-labeled and range from 0 to num_nodes - 1. In order to minimize the total size of the cache, it is a good idea to sort the nodelist and edgelist before inserting. Examples: >>> nodelist = [0, 1, 2] >>> edgelist = [(0, 1), (1, 2)] >>> with pmc.cache_connect(':memory:') as cur: ... pmc.insert_graph(cur, nodelist, edgelist) >>> nodelist = [0, 1, 2] >>> edgelist = [(0, 1), (1, 2)] >>> encoded_data = {} >>> with pmc.cache_connect(':memory:') as cur: ... pmc.insert_graph(cur, nodelist, edgelist, encoded_data) >>> encoded_data['num_nodes'] 3 >>> encoded_data['num_edges'] 2 >>> encoded_data['edges'] '[[0,1],[1,2]]' """ if encoded_data is None: encoded_data = {} if 'num_nodes' not in encoded_data: encoded_data['num_nodes'] = len(nodelist) if 'num_edges' not in encoded_data: encoded_data['num_edges'] = len(edgelist) if 'edges' not in encoded_data: encoded_data['edges'] = json.dumps(edgelist, separators=(',', ':')) insert = \ """ INSERT OR IGNORE INTO graph(num_nodes, num_edges, edges) VALUES (:num_nodes, :num_edges, :edges); """ cur.execute(insert, encoded_data)
0, module; 1, function_definition; 2, function_name:embed_ising; 3, parameters; 4, block; 5, identifier:source_h; 6, identifier:source_J; 7, identifier:embedding; 8, identifier:target_adjacency; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, return_statement; 15, identifier:chain_strength; 16, float:1.0; 17, comment:"""Embed an Ising problem onto a target graph. Args: source_h (dict[variable, bias]/list[bias]): Linear biases of the Ising problem. If a list, the list's indices are used as variable labels. source_J (dict[(variable, variable), bias]): Quadratic biases of the Ising problem. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a target-graph variable and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note that the energy penalty of chain breaks is 2 * `chain_strength`. Returns: tuple: A 2-tuple: dict[variable, bias]: Linear biases of the target Ising problem. dict[(variable, variable), bias]: Quadratic biases of the target Ising problem. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Ising problem for a triangular source graph >>> h = {} >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1} >>> # Target graph is a square graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the Ising problem >>> target_h, target_J = dimod.embed_ising(h, J, embedding, target) >>> target_J[(0, 1)] == J[('a', 'b')] True >>> target_J # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Ising problem for a triangular source graph >>> h = {} >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1} >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the Ising problem >>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample_ising(target_h, target_J) >>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP ... print(sample) ... {0: 1, 1: -1, 2: -1, 3: -1} {0: 1, 1: 1, 2: -1, 3: -1} {0: -1, 1: 1, 2: -1, 3: -1} """; 18, assignment; 19, assignment; 20, assignment; 21, expression_list; 22, identifier:source_bqm; 23, call; 24, identifier:target_bqm; 25, call; 26, pattern_list; 27, call; 28, identifier:target_h; 29, identifier:target_J; 30, attribute; 31, argument_list; 32, identifier:embed_bqm; 33, argument_list; 34, identifier:target_h; 35, identifier:target_J; 36, identifier:__; 37, attribute; 38, argument_list; 39, attribute; 40, identifier:from_ising; 41, identifier:source_h; 42, identifier:source_J; 43, identifier:source_bqm; 44, identifier:embedding; 45, identifier:target_adjacency; 46, keyword_argument; 47, identifier:target_bqm; 48, identifier:to_ising; 49, identifier:dimod; 50, identifier:BinaryQuadraticModel; 51, identifier:chain_strength; 52, identifier:chain_strength
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 9, 15; 9, 16; 10, 17; 11, 18; 12, 19; 13, 20; 14, 21; 18, 22; 18, 23; 19, 24; 19, 25; 20, 26; 20, 27; 21, 28; 21, 29; 23, 30; 23, 31; 25, 32; 25, 33; 26, 34; 26, 35; 26, 36; 27, 37; 27, 38; 30, 39; 30, 40; 31, 41; 31, 42; 33, 43; 33, 44; 33, 45; 33, 46; 37, 47; 37, 48; 39, 49; 39, 50; 46, 51; 46, 52
def embed_ising(source_h, source_J, embedding, target_adjacency, chain_strength=1.0): """Embed an Ising problem onto a target graph. Args: source_h (dict[variable, bias]/list[bias]): Linear biases of the Ising problem. If a list, the list's indices are used as variable labels. source_J (dict[(variable, variable), bias]): Quadratic biases of the Ising problem. embedding (dict): Mapping from source graph to target graph as a dict of form {s: {t, ...}, ...}, where s is a source-model variable and t is a target-model variable. target_adjacency (dict/:class:`networkx.Graph`): Adjacency of the target graph as a dict of form {t: Nt, ...}, where t is a target-graph variable and Nt is its set of neighbours. chain_strength (float, optional): Magnitude of the quadratic bias (in SPIN-space) applied between variables to form a chain. Note that the energy penalty of chain breaks is 2 * `chain_strength`. Returns: tuple: A 2-tuple: dict[variable, bias]: Linear biases of the target Ising problem. dict[(variable, variable), bias]: Quadratic biases of the target Ising problem. Examples: This example embeds a fully connected :math:`K_3` graph onto a square target graph. Embedding is accomplished by an edge contraction operation on the target graph: target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> import networkx as nx >>> # Ising problem for a triangular source graph >>> h = {} >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1} >>> # Target graph is a square graph >>> target = nx.cycle_graph(4) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the Ising problem >>> target_h, target_J = dimod.embed_ising(h, J, embedding, target) >>> target_J[(0, 1)] == J[('a', 'b')] True >>> target_J # doctest: +SKIP {(0, 1): 1.0, (0, 3): 1.0, (1, 2): 1.0, (2, 3): -1.0} This example embeds a fully connected :math:`K_3` graph onto the target graph of a dimod reference structured sampler, `StructureComposite`, using the dimod reference `ExactSolver` sampler with a square graph specified. Target-nodes 2 and 3 are chained to represent source-node c. >>> import dimod >>> # Ising problem for a triangular source graph >>> h = {} >>> J = {('a', 'b'): 1, ('b', 'c'): 1, ('a', 'c'): 1} >>> # Structured dimod sampler with a structure defined by a square graph >>> sampler = dimod.StructureComposite(dimod.ExactSolver(), [0, 1, 2, 3], [(0, 1), (1, 2), (2, 3), (0, 3)]) >>> # Embedding from source to target graph >>> embedding = {'a': {0}, 'b': {1}, 'c': {2, 3}} >>> # Embed the Ising problem >>> target_h, target_J = dimod.embed_ising(h, J, embedding, sampler.adjacency) >>> # Sample >>> samples = sampler.sample_ising(target_h, target_J) >>> for sample in samples.samples(n=3, sorted_by='energy'): # doctest: +SKIP ... print(sample) ... {0: 1, 1: -1, 2: -1, 3: -1} {0: 1, 1: 1, 2: -1, 3: -1} {0: -1, 1: 1, 2: -1, 3: -1} """ source_bqm = dimod.BinaryQuadraticModel.from_ising(source_h, source_J) target_bqm = embed_bqm(source_bqm, embedding, target_adjacency, chain_strength=chain_strength) target_h, target_J, __ = target_bqm.to_ising() return target_h, target_J
0, module; 1, function_definition; 2, function_name:add_identity; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:db; 7, identifier:identity; 8, identifier:backend; 9, expression_statement; 10, expression_statement; 11, try_statement; 12, if_statement; 13, return_statement; 14, comment:""" Load and identity list from backend in Sorting Hat """; 15, assignment; 16, block; 17, except_clause; 18, except_clause; 19, except_clause; 20, except_clause; 21, boolean_operator; 22, block; 23, identifier:uuid; 24, identifier:uuid; 25, None; 26, expression_statement; 27, expression_statement; 28, expression_statement; 29, expression_statement; 30, as_pattern; 31, block; 32, as_pattern; 33, block; 34, as_pattern; 35, block; 36, as_pattern; 37, block; 38, comparison_operator:'company' in identity; 39, comparison_operator:identity['company'] is not None; 40, try_statement; 41, assignment; 42, call; 43, assignment; 44, call; 45, identifier:AlreadyExistsError; 46, as_pattern_target; 47, expression_statement; 48, identifier:InvalidValueError; 49, as_pattern_target; 50, expression_statement; 51, identifier:UnicodeEncodeError; 52, as_pattern_target; 53, expression_statement; 54, identifier:Exception; 55, as_pattern_target; 56, expression_statement; 57, string; 58, identifier:identity; 59, subscript; 60, None; 61, block; 62, except_clause; 63, identifier:uuid; 64, call; 65, attribute; 66, argument_list; 67, identifier:profile; 68, dictionary; 69, attribute; 70, argument_list; 71, identifier:ex; 72, assignment; 73, identifier:ex; 74, call; 75, identifier:ex; 76, call; 77, identifier:ex; 78, call; 79, string_content:company; 80, identifier:identity; 81, string; 82, expression_statement; 83, expression_statement; 84, identifier:AlreadyExistsError; 85, block; 86, attribute; 87, argument_list; 88, identifier:logger; 89, identifier:debug; 90, string:"New sortinghat identity %s %s,%s,%s "; 91, identifier:uuid; 92, subscript; 93, subscript; 94, subscript; 95, pair; 96, pair; 97, identifier:api; 98, identifier:edit_profile; 99, identifier:db; 100, identifier:uuid; 101, dictionary_splat; 102, identifier:uuid; 103, attribute; 104, attribute; 105, argument_list; 106, attribute; 107, argument_list; 108, attribute; 109, argument_list; 110, string_content:company; 111, call; 112, call; 113, pass_statement; 114, identifier:api; 115, identifier:add_identity; 116, identifier:db; 117, identifier:backend; 118, subscript; 119, subscript; 120, subscript; 121, identifier:identity; 122, string; 123, identifier:identity; 124, string; 125, identifier:identity; 126, string; 127, string:"name"; 128, conditional_expression:identity['name'] if identity['name'] else identity['username']; 129, string:"email"; 130, subscript; 131, identifier:profile; 132, identifier:ex; 133, identifier:eid; 134, identifier:logger; 135, identifier:warning; 136, string:"Trying to add a None identity. Ignoring it."; 137, identifier:logger; 138, identifier:warning; 139, string:"UnicodeEncodeError. Ignoring it. %s %s %s"; 140, subscript; 141, subscript; 142, subscript; 143, identifier:logger; 144, identifier:warning; 145, string:"Unknown exception adding identity. Ignoring it. %s %s %s"; 146, subscript; 147, subscript; 148, subscript; 149, keyword_argument; 150, attribute; 151, argument_list; 152, attribute; 153, argument_list; 154, identifier:identity; 155, string; 156, identifier:identity; 157, string; 158, identifier:identity; 159, string; 160, string_content:username; 161, string_content:name; 162, string_content:email; 163, subscript; 164, subscript; 165, subscript; 166, identifier:identity; 167, string; 168, identifier:identity; 169, string; 170, identifier:identity; 171, string; 172, identifier:identity; 173, string; 174, identifier:identity; 175, string; 176, identifier:identity; 177, string; 178, identifier:identity; 179, string; 180, identifier:exc_info; 181, True; 182, identifier:api; 183, identifier:add_organization; 184, identifier:db; 185, subscript; 186, identifier:api; 187, identifier:add_enrollment; 188, identifier:db; 189, identifier:uuid; 190, subscript; 191, call; 192, call; 193, string_content:email; 194, string_content:name; 195, string_content:username; 196, identifier:identity; 197, string; 198, identifier:identity; 199, string; 200, identifier:identity; 201, string; 202, string_content:email; 203, string_content:email; 204, string_content:name; 205, string_content:username; 206, string_content:email; 207, string_content:name; 208, string_content:username; 209, identifier:identity; 210, string; 211, identifier:identity; 212, string; 213, identifier:datetime; 214, argument_list; 215, identifier:datetime; 216, argument_list; 217, string_content:name; 218, string_content:name; 219, string_content:username; 220, string_content:company; 221, string_content:company; 222, integer:1900; 223, integer:1; 224, integer:1; 225, integer:2100; 226, integer:1; 227, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 9, 14; 10, 15; 11, 16; 11, 17; 11, 18; 11, 19; 11, 20; 12, 21; 12, 22; 13, 23; 15, 24; 15, 25; 16, 26; 16, 27; 16, 28; 16, 29; 17, 30; 17, 31; 18, 32; 18, 33; 19, 34; 19, 35; 20, 36; 20, 37; 21, 38; 21, 39; 22, 40; 26, 41; 27, 42; 28, 43; 29, 44; 30, 45; 30, 46; 31, 47; 32, 48; 32, 49; 33, 50; 34, 51; 34, 52; 35, 53; 36, 54; 36, 55; 37, 56; 38, 57; 38, 58; 39, 59; 39, 60; 40, 61; 40, 62; 41, 63; 41, 64; 42, 65; 42, 66; 43, 67; 43, 68; 44, 69; 44, 70; 46, 71; 47, 72; 49, 73; 50, 74; 52, 75; 53, 76; 55, 77; 56, 78; 57, 79; 59, 80; 59, 81; 61, 82; 61, 83; 62, 84; 62, 85; 64, 86; 64, 87; 65, 88; 65, 89; 66, 90; 66, 91; 66, 92; 66, 93; 66, 94; 68, 95; 68, 96; 69, 97; 69, 98; 70, 99; 70, 100; 70, 101; 72, 102; 72, 103; 74, 104; 74, 105; 76, 106; 76, 107; 78, 108; 78, 109; 81, 110; 82, 111; 83, 112; 85, 113; 86, 114; 86, 115; 87, 116; 87, 117; 87, 118; 87, 119; 87, 120; 92, 121; 92, 122; 93, 123; 93, 124; 94, 125; 94, 126; 95, 127; 95, 128; 96, 129; 96, 130; 101, 131; 103, 132; 103, 133; 104, 134; 104, 135; 105, 136; 106, 137; 106, 138; 107, 139; 107, 140; 107, 141; 107, 142; 108, 143; 108, 144; 109, 145; 109, 146; 109, 147; 109, 148; 109, 149; 111, 150; 111, 151; 112, 152; 112, 153; 118, 154; 118, 155; 119, 156; 119, 157; 120, 158; 120, 159; 122, 160; 124, 161; 126, 162; 128, 163; 128, 164; 128, 165; 130, 166; 130, 167; 140, 168; 140, 169; 141, 170; 141, 171; 142, 172; 142, 173; 146, 174; 146, 175; 147, 176; 147, 177; 148, 178; 148, 179; 149, 180; 149, 181; 150, 182; 150, 183; 151, 184; 151, 185; 152, 186; 152, 187; 153, 188; 153, 189; 153, 190; 153, 191; 153, 192; 155, 193; 157, 194; 159, 195; 163, 196; 163, 197; 164, 198; 164, 199; 165, 200; 165, 201; 167, 202; 169, 203; 171, 204; 173, 205; 175, 206; 177, 207; 179, 208; 185, 209; 185, 210; 190, 211; 190, 212; 191, 213; 191, 214; 192, 215; 192, 216; 197, 217; 199, 218; 201, 219; 210, 220; 212, 221; 214, 222; 214, 223; 214, 224; 216, 225; 216, 226; 216, 227
def add_identity(cls, db, identity, backend): """ Load and identity list from backend in Sorting Hat """ uuid = None try: uuid = api.add_identity(db, backend, identity['email'], identity['name'], identity['username']) logger.debug("New sortinghat identity %s %s,%s,%s ", uuid, identity['username'], identity['name'], identity['email']) profile = {"name": identity['name'] if identity['name'] else identity['username'], "email": identity['email']} api.edit_profile(db, uuid, **profile) except AlreadyExistsError as ex: uuid = ex.eid except InvalidValueError as ex: logger.warning("Trying to add a None identity. Ignoring it.") except UnicodeEncodeError as ex: logger.warning("UnicodeEncodeError. Ignoring it. %s %s %s", identity['email'], identity['name'], identity['username']) except Exception as ex: logger.warning("Unknown exception adding identity. Ignoring it. %s %s %s", identity['email'], identity['name'], identity['username'], exc_info=True) if 'company' in identity and identity['company'] is not None: try: api.add_organization(db, identity['company']) api.add_enrollment(db, uuid, identity['company'], datetime(1900, 1, 1), datetime(2100, 1, 1)) except AlreadyExistsError: pass return uuid
0, module; 1, function_definition; 2, function_name:add_identities; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:db; 7, identifier:identities; 8, identifier:backend; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, for_statement; 13, expression_statement; 14, comment:""" Load identities list from backend in Sorting Hat """; 15, call; 16, assignment; 17, identifier:identity; 18, identifier:identities; 19, block; 20, call; 21, attribute; 22, argument_list; 23, identifier:total; 24, integer:0; 25, try_statement; 26, attribute; 27, argument_list; 28, identifier:logger; 29, identifier:info; 30, string:"Adding the identities to SortingHat"; 31, block; 32, except_clause; 33, identifier:logger; 34, identifier:info; 35, string:"Total identities added to SH: %i"; 36, identifier:total; 37, expression_statement; 38, expression_statement; 39, as_pattern; 40, block; 41, call; 42, augmented_assignment; 43, identifier:Exception; 44, as_pattern_target; 45, expression_statement; 46, continue_statement; 47, attribute; 48, argument_list; 49, identifier:total; 50, integer:1; 51, identifier:e; 52, call; 53, identifier:cls; 54, identifier:add_identity; 55, identifier:db; 56, identifier:identity; 57, identifier:backend; 58, attribute; 59, argument_list; 60, identifier:logger; 61, identifier:error; 62, binary_operator:"Unexcepted error when adding identities: %s" % e; 63, string:"Unexcepted error when adding identities: %s"; 64, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 9, 14; 10, 15; 11, 16; 12, 17; 12, 18; 12, 19; 13, 20; 15, 21; 15, 22; 16, 23; 16, 24; 19, 25; 20, 26; 20, 27; 21, 28; 21, 29; 22, 30; 25, 31; 25, 32; 26, 33; 26, 34; 27, 35; 27, 36; 31, 37; 31, 38; 32, 39; 32, 40; 37, 41; 38, 42; 39, 43; 39, 44; 40, 45; 40, 46; 41, 47; 41, 48; 42, 49; 42, 50; 44, 51; 45, 52; 47, 53; 47, 54; 48, 55; 48, 56; 48, 57; 52, 58; 52, 59; 58, 60; 58, 61; 59, 62; 62, 63; 62, 64
def add_identities(cls, db, identities, backend): """ Load identities list from backend in Sorting Hat """ logger.info("Adding the identities to SortingHat") total = 0 for identity in identities: try: cls.add_identity(db, identity, backend) total += 1 except Exception as e: logger.error("Unexcepted error when adding identities: %s" % e) continue logger.info("Total identities added to SH: %i", total)
0, module; 1, function_definition; 2, function_name:remove_identity; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:sh_db; 7, identifier:ident_id; 8, expression_statement; 9, expression_statement; 10, try_statement; 11, return_statement; 12, comment:"""Delete an identity from SortingHat. :param sh_db: SortingHat database :param ident_id: identity identifier """; 13, assignment; 14, block; 15, except_clause; 16, identifier:success; 17, identifier:success; 18, False; 19, expression_statement; 20, expression_statement; 21, expression_statement; 22, as_pattern; 23, block; 24, call; 25, call; 26, assignment; 27, identifier:Exception; 28, as_pattern_target; 29, expression_statement; 30, attribute; 31, argument_list; 32, attribute; 33, argument_list; 34, identifier:success; 35, True; 36, identifier:e; 37, call; 38, identifier:api; 39, identifier:delete_identity; 40, identifier:sh_db; 41, identifier:ident_id; 42, identifier:logger; 43, identifier:debug; 44, string:"Identity %s deleted"; 45, identifier:ident_id; 46, attribute; 47, argument_list; 48, identifier:logger; 49, identifier:debug; 50, string:"Identity not deleted due to %s"; 51, call; 52, identifier:str; 53, argument_list; 54, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 8, 12; 9, 13; 10, 14; 10, 15; 11, 16; 13, 17; 13, 18; 14, 19; 14, 20; 14, 21; 15, 22; 15, 23; 19, 24; 20, 25; 21, 26; 22, 27; 22, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33; 26, 34; 26, 35; 28, 36; 29, 37; 30, 38; 30, 39; 31, 40; 31, 41; 32, 42; 32, 43; 33, 44; 33, 45; 37, 46; 37, 47; 46, 48; 46, 49; 47, 50; 47, 51; 51, 52; 51, 53; 53, 54
def remove_identity(cls, sh_db, ident_id): """Delete an identity from SortingHat. :param sh_db: SortingHat database :param ident_id: identity identifier """ success = False try: api.delete_identity(sh_db, ident_id) logger.debug("Identity %s deleted", ident_id) success = True except Exception as e: logger.debug("Identity not deleted due to %s", str(e)) return success
0, module; 1, function_definition; 2, function_name:remove_unique_identity; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:sh_db; 7, identifier:uuid; 8, expression_statement; 9, expression_statement; 10, try_statement; 11, return_statement; 12, comment:"""Delete a unique identity from SortingHat. :param sh_db: SortingHat database :param uuid: Unique identity identifier """; 13, assignment; 14, block; 15, except_clause; 16, identifier:success; 17, identifier:success; 18, False; 19, expression_statement; 20, expression_statement; 21, expression_statement; 22, as_pattern; 23, block; 24, call; 25, call; 26, assignment; 27, identifier:Exception; 28, as_pattern_target; 29, expression_statement; 30, attribute; 31, argument_list; 32, attribute; 33, argument_list; 34, identifier:success; 35, True; 36, identifier:e; 37, call; 38, identifier:api; 39, identifier:delete_unique_identity; 40, identifier:sh_db; 41, identifier:uuid; 42, identifier:logger; 43, identifier:debug; 44, string:"Unique identity %s deleted"; 45, identifier:uuid; 46, attribute; 47, argument_list; 48, identifier:logger; 49, identifier:debug; 50, string:"Unique identity not deleted due to %s"; 51, call; 52, identifier:str; 53, argument_list; 54, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 8, 12; 9, 13; 10, 14; 10, 15; 11, 16; 13, 17; 13, 18; 14, 19; 14, 20; 14, 21; 15, 22; 15, 23; 19, 24; 20, 25; 21, 26; 22, 27; 22, 28; 23, 29; 24, 30; 24, 31; 25, 32; 25, 33; 26, 34; 26, 35; 28, 36; 29, 37; 30, 38; 30, 39; 31, 40; 31, 41; 32, 42; 32, 43; 33, 44; 33, 45; 37, 46; 37, 47; 46, 48; 46, 49; 47, 50; 47, 51; 51, 52; 51, 53; 53, 54
def remove_unique_identity(cls, sh_db, uuid): """Delete a unique identity from SortingHat. :param sh_db: SortingHat database :param uuid: Unique identity identifier """ success = False try: api.delete_unique_identity(sh_db, uuid) logger.debug("Unique identity %s deleted", uuid) success = True except Exception as e: logger.debug("Unique identity not deleted due to %s", str(e)) return success
0, module; 1, function_definition; 2, function_name:unique_identities; 3, parameters; 4, block; 5, identifier:cls; 6, identifier:sh_db; 7, expression_statement; 8, try_statement; 9, comment:"""List the unique identities available in SortingHat. :param sh_db: SortingHat database """; 10, block; 11, except_clause; 12, for_statement; 13, as_pattern; 14, block; 15, identifier:unique_identity; 16, call; 17, block; 18, identifier:Exception; 19, as_pattern_target; 20, expression_statement; 21, attribute; 22, argument_list; 23, expression_statement; 24, identifier:e; 25, call; 26, identifier:api; 27, identifier:unique_identities; 28, identifier:sh_db; 29, yield; 30, attribute; 31, argument_list; 32, identifier:unique_identity; 33, identifier:logger; 34, identifier:debug; 35, string:"Unique identities not returned from SortingHat due to %s"; 36, call; 37, identifier:str; 38, argument_list; 39, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 7, 9; 8, 10; 8, 11; 10, 12; 11, 13; 11, 14; 12, 15; 12, 16; 12, 17; 13, 18; 13, 19; 14, 20; 16, 21; 16, 22; 17, 23; 19, 24; 20, 25; 21, 26; 21, 27; 22, 28; 23, 29; 25, 30; 25, 31; 29, 32; 30, 33; 30, 34; 31, 35; 31, 36; 36, 37; 36, 38; 38, 39
def unique_identities(cls, sh_db): """List the unique identities available in SortingHat. :param sh_db: SortingHat database """ try: for unique_identity in api.unique_identities(sh_db): yield unique_identity except Exception as e: logger.debug("Unique identities not returned from SortingHat due to %s", str(e))
0, module; 1, function_definition; 2, function_name:refresh_identities; 3, parameters; 4, block; 5, identifier:enrich_backend; 6, default_parameter; 7, default_parameter; 8, expression_statement; 9, function_definition; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, expression_statement; 16, identifier:author_field; 17, None; 18, identifier:author_values; 19, None; 20, comment:"""Refresh identities in enriched index. Retrieve items from the enriched index corresponding to enrich_backend, and update their identities information, with fresh data from the SortingHat database. Instead of the whole index, only items matching the filter_author filter are fitered, if that parameters is not None. :param enrich_backend: enriched backend to update :param author_field: field to match items authored by a user :param author_values: values of the authored field to match items """; 21, function_name:update_items; 22, parameters; 23, block; 24, call; 25, assignment; 26, assignment; 27, call; 28, comparison_operator:author_field is None; 29, comment:# No filter, update all items; 30, block; 31, else_clause; 32, call; 33, identifier:new_filter_author; 34, for_statement; 35, attribute; 36, argument_list; 37, identifier:total; 38, integer:0; 39, identifier:max_ids; 40, attribute; 41, attribute; 42, argument_list; 43, identifier:author_field; 44, None; 45, for_statement; 46, block; 47, attribute; 48, argument_list; 49, identifier:eitem; 50, call; 51, block; 52, identifier:logger; 53, identifier:debug; 54, string:"Refreshing identities fields from %s"; 55, call; 56, attribute; 57, identifier:max_items_clause; 58, identifier:logger; 59, identifier:debug; 60, string; 61, identifier:item; 62, call; 63, block; 64, expression_statement; 65, for_statement; 66, if_statement; 67, identifier:logger; 68, identifier:info; 69, string:"Total eitems refreshed for identities fields %i"; 70, identifier:total; 71, attribute; 72, argument_list; 73, expression_statement; 74, try_statement; 75, expression_statement; 76, expression_statement; 77, expression_statement; 78, attribute; 79, argument_list; 80, identifier:enrich_backend; 81, identifier:elastic; 82, string_content:Refreshing identities; 83, identifier:update_items; 84, argument_list; 85, expression_statement; 86, expression_statement; 87, assignment; 88, identifier:author_value; 89, identifier:author_values; 90, block; 91, comparison_operator:len(to_refresh) > 0; 92, block; 93, identifier:enrich_backend; 94, identifier:fetch; 95, identifier:new_filter_author; 96, assignment; 97, block; 98, except_clause; 99, assignment; 100, call; 101, yield; 102, attribute; 103, identifier:anonymize_url; 104, attribute; 105, None; 106, yield; 107, augmented_assignment; 108, identifier:to_refresh; 109, list; 110, expression_statement; 111, if_statement; 112, call; 113, integer:0; 114, expression_statement; 115, for_statement; 116, identifier:roles; 117, None; 118, expression_statement; 119, identifier:AttributeError; 120, block; 121, identifier:new_identities; 122, call; 123, attribute; 124, argument_list; 125, identifier:eitem; 126, identifier:enrich_backend; 127, identifier:elastic; 128, attribute; 129, identifier:index_url; 130, identifier:item; 131, identifier:total; 132, integer:1; 133, call; 134, comparison_operator:len(to_refresh) > max_ids; 135, block; 136, identifier:len; 137, argument_list; 138, assignment; 139, identifier:item; 140, call; 141, block; 142, assignment; 143, pass_statement; 144, attribute; 145, argument_list; 146, identifier:eitem; 147, identifier:update; 148, identifier:new_identities; 149, identifier:enrich_backend; 150, identifier:elastic; 151, attribute; 152, argument_list; 153, call; 154, identifier:max_ids; 155, expression_statement; 156, for_statement; 157, expression_statement; 158, identifier:to_refresh; 159, identifier:filter_author; 160, dictionary; 161, identifier:update_items; 162, argument_list; 163, expression_statement; 164, expression_statement; 165, identifier:roles; 166, attribute; 167, identifier:enrich_backend; 168, identifier:get_item_sh_from_id; 169, identifier:eitem; 170, identifier:roles; 171, identifier:to_refresh; 172, identifier:append; 173, identifier:author_value; 174, identifier:len; 175, argument_list; 176, assignment; 177, identifier:item; 178, call; 179, block; 180, assignment; 181, pair; 182, pair; 183, identifier:filter_author; 184, yield; 185, augmented_assignment; 186, identifier:enrich_backend; 187, identifier:roles; 188, identifier:to_refresh; 189, identifier:filter_author; 190, dictionary; 191, identifier:update_items; 192, argument_list; 193, expression_statement; 194, expression_statement; 195, identifier:to_refresh; 196, list; 197, string:"name"; 198, identifier:author_field; 199, string:"value"; 200, identifier:to_refresh; 201, identifier:item; 202, identifier:total; 203, integer:1; 204, pair; 205, pair; 206, identifier:filter_author; 207, yield; 208, augmented_assignment; 209, string:"name"; 210, identifier:author_field; 211, string:"value"; 212, identifier:to_refresh; 213, identifier:item; 214, identifier:total; 215, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 6, 16; 6, 17; 7, 18; 7, 19; 8, 20; 9, 21; 9, 22; 9, 23; 10, 24; 11, 25; 12, 26; 13, 27; 14, 28; 14, 29; 14, 30; 14, 31; 15, 32; 22, 33; 23, 34; 24, 35; 24, 36; 25, 37; 25, 38; 26, 39; 26, 40; 27, 41; 27, 42; 28, 43; 28, 44; 30, 45; 31, 46; 32, 47; 32, 48; 34, 49; 34, 50; 34, 51; 35, 52; 35, 53; 36, 54; 36, 55; 40, 56; 40, 57; 41, 58; 41, 59; 42, 60; 45, 61; 45, 62; 45, 63; 46, 64; 46, 65; 46, 66; 47, 67; 47, 68; 48, 69; 48, 70; 50, 71; 50, 72; 51, 73; 51, 74; 51, 75; 51, 76; 51, 77; 55, 78; 55, 79; 56, 80; 56, 81; 60, 82; 62, 83; 62, 84; 63, 85; 63, 86; 64, 87; 65, 88; 65, 89; 65, 90; 66, 91; 66, 92; 71, 93; 71, 94; 72, 95; 73, 96; 74, 97; 74, 98; 75, 99; 76, 100; 77, 101; 78, 102; 78, 103; 79, 104; 84, 105; 85, 106; 86, 107; 87, 108; 87, 109; 90, 110; 90, 111; 91, 112; 91, 113; 92, 114; 92, 115; 96, 116; 96, 117; 97, 118; 98, 119; 98, 120; 99, 121; 99, 122; 100, 123; 100, 124; 101, 125; 102, 126; 102, 127; 104, 128; 104, 129; 106, 130; 107, 131; 107, 132; 110, 133; 111, 134; 111, 135; 112, 136; 112, 137; 114, 138; 115, 139; 115, 140; 115, 141; 118, 142; 120, 143; 122, 144; 122, 145; 123, 146; 123, 147; 124, 148; 128, 149; 128, 150; 133, 151; 133, 152; 134, 153; 134, 154; 135, 155; 135, 156; 135, 157; 137, 158; 138, 159; 138, 160; 140, 161; 140, 162; 141, 163; 141, 164; 142, 165; 142, 166; 144, 167; 144, 168; 145, 169; 145, 170; 151, 171; 151, 172; 152, 173; 153, 174; 153, 175; 155, 176; 156, 177; 156, 178; 156, 179; 157, 180; 160, 181; 160, 182; 162, 183; 163, 184; 164, 185; 166, 186; 166, 187; 175, 188; 176, 189; 176, 190; 178, 191; 178, 192; 179, 193; 179, 194; 180, 195; 180, 196; 181, 197; 181, 198; 182, 199; 182, 200; 184, 201; 185, 202; 185, 203; 190, 204; 190, 205; 192, 206; 193, 207; 194, 208; 204, 209; 204, 210; 205, 211; 205, 212; 207, 213; 208, 214; 208, 215
def refresh_identities(enrich_backend, author_field=None, author_values=None): """Refresh identities in enriched index. Retrieve items from the enriched index corresponding to enrich_backend, and update their identities information, with fresh data from the SortingHat database. Instead of the whole index, only items matching the filter_author filter are fitered, if that parameters is not None. :param enrich_backend: enriched backend to update :param author_field: field to match items authored by a user :param author_values: values of the authored field to match items """ def update_items(new_filter_author): for eitem in enrich_backend.fetch(new_filter_author): roles = None try: roles = enrich_backend.roles except AttributeError: pass new_identities = enrich_backend.get_item_sh_from_id(eitem, roles) eitem.update(new_identities) yield eitem logger.debug("Refreshing identities fields from %s", enrich_backend.elastic.anonymize_url(enrich_backend.elastic.index_url)) total = 0 max_ids = enrich_backend.elastic.max_items_clause logger.debug('Refreshing identities') if author_field is None: # No filter, update all items for item in update_items(None): yield item total += 1 else: to_refresh = [] for author_value in author_values: to_refresh.append(author_value) if len(to_refresh) > max_ids: filter_author = {"name": author_field, "value": to_refresh} for item in update_items(filter_author): yield item total += 1 to_refresh = [] if len(to_refresh) > 0: filter_author = {"name": author_field, "value": to_refresh} for item in update_items(filter_author): yield item total += 1 logger.info("Total eitems refreshed for identities fields %i", total)
0, module; 1, function_definition; 2, function_name:delete_orphan_unique_identities; 3, parameters; 4, block; 5, identifier:es; 6, identifier:sortinghat_db; 7, identifier:current_data_source; 8, identifier:active_data_sources; 9, expression_statement; 10, function_definition; 11, function_definition; 12, function_definition; 13, function_definition; 14, expression_statement; 15, expression_statement; 16, expression_statement; 17, comment:# Collect all unique identities; 18, for_statement; 19, comment:# Check that no uuids have been left to process; 20, if_statement; 21, expression_statement; 22, expression_statement; 23, comment:"""Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX. :param es: ElasticSearchDSL object :param sortinghat_db: instance of the SortingHat database :param current_data_source: current data source :param active_data_sources: list of active data sources """; 24, function_name:get_uuids_in_index; 25, parameters; 26, block; 27, function_name:delete_unique_identities; 28, parameters; 29, block; 30, function_name:delete_identities; 31, parameters; 32, block; 33, function_name:has_identities_in_data_sources; 34, parameters; 35, block; 36, assignment; 37, assignment; 38, assignment; 39, identifier:unique_identity; 40, call; 41, comment:# Remove a unique identity if all its identities are in non active data source; 42, block; 43, identifier:uuids_to_process; 44, comment:# Find which uuids to be processed exist in IDENTITIES_INDEX; 45, block; 46, call; 47, call; 48, identifier:target_uuids; 49, expression_statement; 50, expression_statement; 51, expression_statement; 52, if_statement; 53, return_statement; 54, identifier:target_uuids; 55, expression_statement; 56, expression_statement; 57, for_statement; 58, return_statement; 59, identifier:unique_ident; 60, identifier:data_sources; 61, expression_statement; 62, expression_statement; 63, for_statement; 64, return_statement; 65, identifier:unique_ident; 66, identifier:data_sources; 67, expression_statement; 68, expression_statement; 69, for_statement; 70, return_statement; 71, identifier:deleted_unique_identities; 72, integer:0; 73, identifier:deleted_identities; 74, integer:0; 75, identifier:uuids_to_process; 76, list; 77, attribute; 78, argument_list; 79, if_statement; 80, comment:# Remove the identities of non active data source for a given unique identity; 81, expression_statement; 82, comment:# Process only the unique identities that include the current data source, since; 83, comment:# it may be that unique identities in other data source have not been; 84, comment:# added yet to IDENTITIES_INDEX; 85, if_statement; 86, comment:# Add the uuid to the list to check its existence in the IDENTITIES_INDEX; 87, expression_statement; 88, comment:# Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX; 89, if_statement; 90, comment:# Find which uuids to be processed exist in IDENTITIES_INDEX; 91, expression_statement; 92, expression_statement; 93, comment:# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX; 94, expression_statement; 95, comment:# Delete the orphan uuids from SortingHat; 96, expression_statement; 97, comment:# Reset the list; 98, expression_statement; 99, expression_statement; 100, expression_statement; 101, comment:# Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX; 102, expression_statement; 103, comment:# Delete the orphan uuids from SortingHat; 104, expression_statement; 105, attribute; 106, argument_list; 107, attribute; 108, argument_list; 109, comment:"""Find a set of uuids in IDENTITIES_INDEX and return them if exist. :param target_uuids: target uuids """; 110, assignment; 111, assignment; 112, comparison_operator:page['hits']['total'] != 0; 113, block; 114, identifier:hits; 115, comment:"""Delete a list of uuids from SortingHat. :param target_uuids: uuids to be deleted """; 116, assignment; 117, identifier:uuid; 118, identifier:target_uuids; 119, block; 120, identifier:count; 121, comment:"""Remove the identities in non active data sources. :param unique_ident: unique identity object :param data_sources: target data sources """; 122, assignment; 123, identifier:ident; 124, attribute; 125, block; 126, identifier:count; 127, comment:"""Check if a unique identity has identities in a set of data sources. :param unique_ident: unique identity object :param data_sources: target data sources """; 128, assignment; 129, identifier:ident; 130, attribute; 131, block; 132, identifier:in_active; 133, identifier:SortingHat; 134, identifier:unique_identities; 135, identifier:sortinghat_db; 136, not_operator; 137, block; 138, augmented_assignment; 139, not_operator; 140, block; 141, call; 142, comparison_operator:len(uuids_to_process) != SIZE_SCROLL_IDENTITIES_INDEX; 143, block; 144, assignment; 145, assignment; 146, assignment; 147, augmented_assignment; 148, assignment; 149, assignment; 150, assignment; 151, assignment; 152, augmented_assignment; 153, identifier:logger; 154, identifier:debug; 155, string:"[identities retention] Total orphan unique identities deleted from SH: %i"; 156, identifier:deleted_unique_identities; 157, identifier:logger; 158, identifier:debug; 159, string:"[identities retention] Total identities in non-active data sources deleted from SH: %i"; 160, identifier:deleted_identities; 161, identifier:page; 162, call; 163, identifier:hits; 164, list; 165, subscript; 166, integer:0; 167, expression_statement; 168, identifier:count; 169, integer:0; 170, expression_statement; 171, expression_statement; 172, identifier:count; 173, integer:0; 174, identifier:unique_ident; 175, identifier:identities; 176, if_statement; 177, identifier:in_active; 178, False; 179, identifier:unique_ident; 180, identifier:identities; 181, if_statement; 182, call; 183, expression_statement; 184, continue_statement; 185, identifier:deleted_identities; 186, call; 187, call; 188, continue_statement; 189, attribute; 190, argument_list; 191, call; 192, identifier:SIZE_SCROLL_IDENTITIES_INDEX; 193, continue_statement; 194, identifier:results; 195, call; 196, identifier:uuids_found; 197, list_comprehension; 198, identifier:orphan_uuids; 199, binary_operator:set(uuids_to_process) - set(uuids_found); 200, identifier:deleted_unique_identities; 201, call; 202, identifier:uuids_to_process; 203, list; 204, identifier:results; 205, call; 206, identifier:uuids_found; 207, list_comprehension; 208, identifier:orphan_uuids; 209, binary_operator:set(uuids_to_process) - set(uuids_found); 210, identifier:deleted_unique_identities; 211, call; 212, attribute; 213, argument_list; 214, subscript; 215, string; 216, assignment; 217, assignment; 218, assignment; 219, comparison_operator:ident.source not in data_sources; 220, block; 221, comparison_operator:ident.source in data_sources; 222, block; 223, identifier:has_identities_in_data_sources; 224, argument_list; 225, augmented_assignment; 226, identifier:delete_identities; 227, argument_list; 228, identifier:has_identities_in_data_sources; 229, argument_list; 230, identifier:uuids_to_process; 231, identifier:append; 232, attribute; 233, identifier:len; 234, argument_list; 235, identifier:get_uuids_in_index; 236, argument_list; 237, subscript; 238, for_in_clause; 239, call; 240, call; 241, identifier:delete_unique_identities; 242, argument_list; 243, identifier:get_uuids_in_index; 244, argument_list; 245, subscript; 246, for_in_clause; 247, call; 248, call; 249, identifier:delete_unique_identities; 250, argument_list; 251, identifier:es; 252, identifier:search; 253, keyword_argument; 254, keyword_argument; 255, keyword_argument; 256, keyword_argument; 257, identifier:page; 258, string; 259, string_content:total; 260, identifier:hits; 261, subscript; 262, identifier:success; 263, call; 264, identifier:count; 265, conditional_expression:count + 1 if success else count; 266, attribute; 267, identifier:data_sources; 268, expression_statement; 269, expression_statement; 270, attribute; 271, identifier:data_sources; 272, expression_statement; 273, break_statement; 274, identifier:unique_identity; 275, identifier:active_data_sources; 276, identifier:deleted_unique_identities; 277, call; 278, identifier:unique_identity; 279, identifier:active_data_sources; 280, identifier:unique_identity; 281, list; 282, identifier:unique_identity; 283, identifier:uuid; 284, identifier:uuids_to_process; 285, identifier:uuids_to_process; 286, subscript; 287, string; 288, identifier:item; 289, identifier:results; 290, identifier:set; 291, argument_list; 292, identifier:set; 293, argument_list; 294, identifier:orphan_uuids; 295, identifier:uuids_to_process; 296, subscript; 297, string; 298, identifier:item; 299, identifier:results; 300, identifier:set; 301, argument_list; 302, identifier:set; 303, argument_list; 304, identifier:orphan_uuids; 305, identifier:index; 306, identifier:IDENTITIES_INDEX; 307, identifier:scroll; 308, string:"360m"; 309, identifier:size; 310, identifier:SIZE_SCROLL_IDENTITIES_INDEX; 311, identifier:body; 312, dictionary; 313, string_content:hits; 314, subscript; 315, string; 316, attribute; 317, argument_list; 318, binary_operator:count + 1; 319, identifier:success; 320, identifier:count; 321, identifier:ident; 322, identifier:source; 323, assignment; 324, assignment; 325, identifier:ident; 326, identifier:source; 327, assignment; 328, identifier:delete_unique_identities; 329, argument_list; 330, identifier:current_data_source; 331, identifier:item; 332, string; 333, string_content:sh_uuid; 334, identifier:uuids_to_process; 335, identifier:uuids_found; 336, identifier:item; 337, string; 338, string_content:sh_uuid; 339, identifier:uuids_to_process; 340, identifier:uuids_found; 341, pair; 342, identifier:page; 343, string; 344, string_content:hits; 345, identifier:SortingHat; 346, identifier:remove_unique_identity; 347, identifier:sortinghat_db; 348, identifier:uuid; 349, identifier:count; 350, integer:1; 351, identifier:success; 352, call; 353, identifier:count; 354, conditional_expression:count + 1 if success else count; 355, identifier:in_active; 356, True; 357, list; 358, string_content:_source; 359, string_content:_source; 360, string:"query"; 361, dictionary; 362, string_content:hits; 363, attribute; 364, argument_list; 365, binary_operator:count + 1; 366, identifier:success; 367, identifier:count; 368, attribute; 369, pair; 370, identifier:SortingHat; 371, identifier:remove_identity; 372, identifier:sortinghat_db; 373, attribute; 374, identifier:count; 375, integer:1; 376, identifier:unique_identity; 377, identifier:uuid; 378, string:"bool"; 379, dictionary; 380, identifier:ident; 381, identifier:id; 382, pair; 383, string:"filter"; 384, list; 385, dictionary; 386, pair; 387, string:"terms"; 388, dictionary; 389, pair; 390, string:"sh_uuid"; 391, identifier:target_uuids
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 9, 23; 10, 24; 10, 25; 10, 26; 11, 27; 11, 28; 11, 29; 12, 30; 12, 31; 12, 32; 13, 33; 13, 34; 13, 35; 14, 36; 15, 37; 16, 38; 18, 39; 18, 40; 18, 41; 18, 42; 20, 43; 20, 44; 20, 45; 21, 46; 22, 47; 25, 48; 26, 49; 26, 50; 26, 51; 26, 52; 26, 53; 28, 54; 29, 55; 29, 56; 29, 57; 29, 58; 31, 59; 31, 60; 32, 61; 32, 62; 32, 63; 32, 64; 34, 65; 34, 66; 35, 67; 35, 68; 35, 69; 35, 70; 36, 71; 36, 72; 37, 73; 37, 74; 38, 75; 38, 76; 40, 77; 40, 78; 42, 79; 42, 80; 42, 81; 42, 82; 42, 83; 42, 84; 42, 85; 42, 86; 42, 87; 42, 88; 42, 89; 42, 90; 42, 91; 42, 92; 42, 93; 42, 94; 42, 95; 42, 96; 42, 97; 42, 98; 45, 99; 45, 100; 45, 101; 45, 102; 45, 103; 45, 104; 46, 105; 46, 106; 47, 107; 47, 108; 49, 109; 50, 110; 51, 111; 52, 112; 52, 113; 53, 114; 55, 115; 56, 116; 57, 117; 57, 118; 57, 119; 58, 120; 61, 121; 62, 122; 63, 123; 63, 124; 63, 125; 64, 126; 67, 127; 68, 128; 69, 129; 69, 130; 69, 131; 70, 132; 77, 133; 77, 134; 78, 135; 79, 136; 79, 137; 81, 138; 85, 139; 85, 140; 87, 141; 89, 142; 89, 143; 91, 144; 92, 145; 94, 146; 96, 147; 98, 148; 99, 149; 100, 150; 102, 151; 104, 152; 105, 153; 105, 154; 106, 155; 106, 156; 107, 157; 107, 158; 108, 159; 108, 160; 110, 161; 110, 162; 111, 163; 111, 164; 112, 165; 112, 166; 113, 167; 116, 168; 116, 169; 119, 170; 119, 171; 122, 172; 122, 173; 124, 174; 124, 175; 125, 176; 128, 177; 128, 178; 130, 179; 130, 180; 131, 181; 136, 182; 137, 183; 137, 184; 138, 185; 138, 186; 139, 187; 140, 188; 141, 189; 141, 190; 142, 191; 142, 192; 143, 193; 144, 194; 144, 195; 145, 196; 145, 197; 146, 198; 146, 199; 147, 200; 147, 201; 148, 202; 148, 203; 149, 204; 149, 205; 150, 206; 150, 207; 151, 208; 151, 209; 152, 210; 152, 211; 162, 212; 162, 213; 165, 214; 165, 215; 167, 216; 170, 217; 171, 218; 176, 219; 176, 220; 181, 221; 181, 222; 182, 223; 182, 224; 183, 225; 186, 226; 186, 227; 187, 228; 187, 229; 189, 230; 189, 231; 190, 232; 191, 233; 191, 234; 195, 235; 195, 236; 197, 237; 197, 238; 199, 239; 199, 240; 201, 241; 201, 242; 205, 243; 205, 244; 207, 245; 207, 246; 209, 247; 209, 248; 211, 249; 211, 250; 212, 251; 212, 252; 213, 253; 213, 254; 213, 255; 213, 256; 214, 257; 214, 258; 215, 259; 216, 260; 216, 261; 217, 262; 217, 263; 218, 264; 218, 265; 219, 266; 219, 267; 220, 268; 220, 269; 221, 270; 221, 271; 222, 272; 222, 273; 224, 274; 224, 275; 225, 276; 225, 277; 227, 278; 227, 279; 229, 280; 229, 281; 232, 282; 232, 283; 234, 284; 236, 285; 237, 286; 237, 287; 238, 288; 238, 289; 239, 290; 239, 291; 240, 292; 240, 293; 242, 294; 244, 295; 245, 296; 245, 297; 246, 298; 246, 299; 247, 300; 247, 301; 248, 302; 248, 303; 250, 304; 253, 305; 253, 306; 254, 307; 254, 308; 255, 309; 255, 310; 256, 311; 256, 312; 258, 313; 261, 314; 261, 315; 263, 316; 263, 317; 265, 318; 265, 319; 265, 320; 266, 321; 266, 322; 268, 323; 269, 324; 270, 325; 270, 326; 272, 327; 277, 328; 277, 329; 281, 330; 286, 331; 286, 332; 287, 333; 291, 334; 293, 335; 296, 336; 296, 337; 297, 338; 301, 339; 303, 340; 312, 341; 314, 342; 314, 343; 315, 344; 316, 345; 316, 346; 317, 347; 317, 348; 318, 349; 318, 350; 323, 351; 323, 352; 324, 353; 324, 354; 327, 355; 327, 356; 329, 357; 332, 358; 337, 359; 341, 360; 341, 361; 343, 362; 352, 363; 352, 364; 354, 365; 354, 366; 354, 367; 357, 368; 361, 369; 363, 370; 363, 371; 364, 372; 364, 373; 365, 374; 365, 375; 368, 376; 368, 377; 369, 378; 369, 379; 373, 380; 373, 381; 379, 382; 382, 383; 382, 384; 384, 385; 385, 386; 386, 387; 386, 388; 388, 389; 389, 390; 389, 391
def delete_orphan_unique_identities(es, sortinghat_db, current_data_source, active_data_sources): """Delete all unique identities which appear in SortingHat, but not in the IDENTITIES_INDEX. :param es: ElasticSearchDSL object :param sortinghat_db: instance of the SortingHat database :param current_data_source: current data source :param active_data_sources: list of active data sources """ def get_uuids_in_index(target_uuids): """Find a set of uuids in IDENTITIES_INDEX and return them if exist. :param target_uuids: target uuids """ page = es.search( index=IDENTITIES_INDEX, scroll="360m", size=SIZE_SCROLL_IDENTITIES_INDEX, body={ "query": { "bool": { "filter": [ { "terms": { "sh_uuid": target_uuids } } ] } } } ) hits = [] if page['hits']['total'] != 0: hits = page['hits']['hits'] return hits def delete_unique_identities(target_uuids): """Delete a list of uuids from SortingHat. :param target_uuids: uuids to be deleted """ count = 0 for uuid in target_uuids: success = SortingHat.remove_unique_identity(sortinghat_db, uuid) count = count + 1 if success else count return count def delete_identities(unique_ident, data_sources): """Remove the identities in non active data sources. :param unique_ident: unique identity object :param data_sources: target data sources """ count = 0 for ident in unique_ident.identities: if ident.source not in data_sources: success = SortingHat.remove_identity(sortinghat_db, ident.id) count = count + 1 if success else count return count def has_identities_in_data_sources(unique_ident, data_sources): """Check if a unique identity has identities in a set of data sources. :param unique_ident: unique identity object :param data_sources: target data sources """ in_active = False for ident in unique_ident.identities: if ident.source in data_sources: in_active = True break return in_active deleted_unique_identities = 0 deleted_identities = 0 uuids_to_process = [] # Collect all unique identities for unique_identity in SortingHat.unique_identities(sortinghat_db): # Remove a unique identity if all its identities are in non active data source if not has_identities_in_data_sources(unique_identity, active_data_sources): deleted_unique_identities += delete_unique_identities([unique_identity.uuid]) continue # Remove the identities of non active data source for a given unique identity deleted_identities += delete_identities(unique_identity, active_data_sources) # Process only the unique identities that include the current data source, since # it may be that unique identities in other data source have not been # added yet to IDENTITIES_INDEX if not has_identities_in_data_sources(unique_identity, [current_data_source]): continue # Add the uuid to the list to check its existence in the IDENTITIES_INDEX uuids_to_process.append(unique_identity.uuid) # Process the uuids in block of SIZE_SCROLL_IDENTITIES_INDEX if len(uuids_to_process) != SIZE_SCROLL_IDENTITIES_INDEX: continue # Find which uuids to be processed exist in IDENTITIES_INDEX results = get_uuids_in_index(uuids_to_process) uuids_found = [item['_source']['sh_uuid'] for item in results] # Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX orphan_uuids = set(uuids_to_process) - set(uuids_found) # Delete the orphan uuids from SortingHat deleted_unique_identities += delete_unique_identities(orphan_uuids) # Reset the list uuids_to_process = [] # Check that no uuids have been left to process if uuids_to_process: # Find which uuids to be processed exist in IDENTITIES_INDEX results = get_uuids_in_index(uuids_to_process) uuids_found = [item['_source']['sh_uuid'] for item in results] # Find the uuids which exist in SortingHat but not in IDENTITIES_INDEX orphan_uuids = set(uuids_to_process) - set(uuids_found) # Delete the orphan uuids from SortingHat deleted_unique_identities += delete_unique_identities(orphan_uuids) logger.debug("[identities retention] Total orphan unique identities deleted from SH: %i", deleted_unique_identities) logger.debug("[identities retention] Total identities in non-active data sources deleted from SH: %i", deleted_identities)
0, module; 1, function_definition; 2, function_name:delete_inactive_unique_identities; 3, parameters; 4, block; 5, identifier:es; 6, identifier:sortinghat_db; 7, identifier:before_date; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, if_statement; 13, expression_statement; 14, while_statement; 15, expression_statement; 16, comment:"""Select the unique identities not seen before `before_date` and delete them from SortingHat. :param es: ElasticSearchDSL object :param sortinghat_db: instance of the SortingHat database :param before_date: datetime str to filter the identities """; 17, assignment; 18, assignment; 19, assignment; 20, comparison_operator:scroll_size == 0; 21, block; 22, assignment; 23, comparison_operator:scroll_size > 0; 24, block; 25, call; 26, identifier:page; 27, call; 28, identifier:sid; 29, subscript; 30, identifier:scroll_size; 31, subscript; 32, identifier:scroll_size; 33, integer:0; 34, expression_statement; 35, return_statement; 36, identifier:count; 37, integer:0; 38, identifier:scroll_size; 39, integer:0; 40, for_statement; 41, expression_statement; 42, expression_statement; 43, expression_statement; 44, attribute; 45, argument_list; 46, attribute; 47, argument_list; 48, identifier:page; 49, string; 50, subscript; 51, string; 52, call; 53, identifier:item; 54, subscript; 55, block; 56, assignment; 57, assignment; 58, assignment; 59, identifier:logger; 60, identifier:debug; 61, string:"[identities retention] Total inactive identities deleted from SH: %i"; 62, identifier:count; 63, identifier:es; 64, identifier:search; 65, keyword_argument; 66, keyword_argument; 67, keyword_argument; 68, keyword_argument; 69, string_content:_scroll_id; 70, identifier:page; 71, string; 72, string_content:total; 73, attribute; 74, argument_list; 75, subscript; 76, string; 77, expression_statement; 78, expression_statement; 79, comment:# increment the number of deleted identities only if the corresponding command was successful; 80, expression_statement; 81, identifier:page; 82, call; 83, identifier:sid; 84, subscript; 85, identifier:scroll_size; 86, call; 87, identifier:index; 88, identifier:IDENTITIES_INDEX; 89, identifier:scroll; 90, string:"360m"; 91, identifier:size; 92, identifier:SIZE_SCROLL_IDENTITIES_INDEX; 93, identifier:body; 94, dictionary; 95, string_content:hits; 96, identifier:logging; 97, identifier:warning; 98, string:"[identities retention] No inactive identities found in %s after %s!"; 99, identifier:IDENTITIES_INDEX; 100, identifier:before_date; 101, identifier:page; 102, string; 103, string_content:hits; 104, assignment; 105, assignment; 106, assignment; 107, attribute; 108, argument_list; 109, identifier:page; 110, string; 111, identifier:len; 112, argument_list; 113, pair; 114, string_content:hits; 115, identifier:to_delete; 116, subscript; 117, identifier:success; 118, call; 119, identifier:count; 120, conditional_expression:count + 1 if success else count; 121, identifier:es; 122, identifier:scroll; 123, keyword_argument; 124, keyword_argument; 125, string_content:_scroll_id; 126, subscript; 127, string:"query"; 128, dictionary; 129, subscript; 130, string; 131, attribute; 132, argument_list; 133, binary_operator:count + 1; 134, identifier:success; 135, identifier:count; 136, identifier:scroll_id; 137, identifier:sid; 138, identifier:scroll; 139, string; 140, subscript; 141, string; 142, pair; 143, identifier:item; 144, string; 145, string_content:sh_uuid; 146, identifier:SortingHat; 147, identifier:remove_unique_identity; 148, identifier:sortinghat_db; 149, identifier:to_delete; 150, identifier:count; 151, integer:1; 152, string_content:60m; 153, identifier:page; 154, string; 155, string_content:hits; 156, string:"range"; 157, dictionary; 158, string_content:_source; 159, string_content:hits; 160, pair; 161, string:"last_seen"; 162, dictionary; 163, pair; 164, string:"lte"; 165, identifier:before_date
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 8, 16; 9, 17; 10, 18; 11, 19; 12, 20; 12, 21; 13, 22; 14, 23; 14, 24; 15, 25; 17, 26; 17, 27; 18, 28; 18, 29; 19, 30; 19, 31; 20, 32; 20, 33; 21, 34; 21, 35; 22, 36; 22, 37; 23, 38; 23, 39; 24, 40; 24, 41; 24, 42; 24, 43; 25, 44; 25, 45; 27, 46; 27, 47; 29, 48; 29, 49; 31, 50; 31, 51; 34, 52; 40, 53; 40, 54; 40, 55; 41, 56; 42, 57; 43, 58; 44, 59; 44, 60; 45, 61; 45, 62; 46, 63; 46, 64; 47, 65; 47, 66; 47, 67; 47, 68; 49, 69; 50, 70; 50, 71; 51, 72; 52, 73; 52, 74; 54, 75; 54, 76; 55, 77; 55, 78; 55, 79; 55, 80; 56, 81; 56, 82; 57, 83; 57, 84; 58, 85; 58, 86; 65, 87; 65, 88; 66, 89; 66, 90; 67, 91; 67, 92; 68, 93; 68, 94; 71, 95; 73, 96; 73, 97; 74, 98; 74, 99; 74, 100; 75, 101; 75, 102; 76, 103; 77, 104; 78, 105; 80, 106; 82, 107; 82, 108; 84, 109; 84, 110; 86, 111; 86, 112; 94, 113; 102, 114; 104, 115; 104, 116; 105, 117; 105, 118; 106, 119; 106, 120; 107, 121; 107, 122; 108, 123; 108, 124; 110, 125; 112, 126; 113, 127; 113, 128; 116, 129; 116, 130; 118, 131; 118, 132; 120, 133; 120, 134; 120, 135; 123, 136; 123, 137; 124, 138; 124, 139; 126, 140; 126, 141; 128, 142; 129, 143; 129, 144; 130, 145; 131, 146; 131, 147; 132, 148; 132, 149; 133, 150; 133, 151; 139, 152; 140, 153; 140, 154; 141, 155; 142, 156; 142, 157; 144, 158; 154, 159; 157, 160; 160, 161; 160, 162; 162, 163; 163, 164; 163, 165
def delete_inactive_unique_identities(es, sortinghat_db, before_date): """Select the unique identities not seen before `before_date` and delete them from SortingHat. :param es: ElasticSearchDSL object :param sortinghat_db: instance of the SortingHat database :param before_date: datetime str to filter the identities """ page = es.search( index=IDENTITIES_INDEX, scroll="360m", size=SIZE_SCROLL_IDENTITIES_INDEX, body={ "query": { "range": { "last_seen": { "lte": before_date } } } } ) sid = page['_scroll_id'] scroll_size = page['hits']['total'] if scroll_size == 0: logging.warning("[identities retention] No inactive identities found in %s after %s!", IDENTITIES_INDEX, before_date) return count = 0 while scroll_size > 0: for item in page['hits']['hits']: to_delete = item['_source']['sh_uuid'] success = SortingHat.remove_unique_identity(sortinghat_db, to_delete) # increment the number of deleted identities only if the corresponding command was successful count = count + 1 if success else count page = es.scroll(scroll_id=sid, scroll='60m') sid = page['_scroll_id'] scroll_size = len(page['hits']['hits']) logger.debug("[identities retention] Total inactive identities deleted from SH: %i", count)
0, module; 1, function_definition; 2, function_name:retain_identities; 3, parameters; 4, block; 5, identifier:retention_time; 6, identifier:es_enrichment_url; 7, identifier:sortinghat_db; 8, identifier:data_source; 9, identifier:active_data_sources; 10, expression_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, comment:# delete the unique identities which have not been seen after `before_date`; 15, expression_statement; 16, comment:# delete the unique identities for a given data source which are not in the IDENTITIES_INDEX; 17, expression_statement; 18, comment:"""Select the unique identities not seen before `retention_time` and delete them from SortingHat. Furthermore, it deletes also the orphan unique identities, those ones stored in SortingHat but not in IDENTITIES_INDEX. :param retention_time: maximum number of minutes wrt the current date to retain the identities :param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored :param sortinghat_db: instance of the SortingHat database :param data_source: target data source (e.g., git, github, slack) :param active_data_sources: list of active data sources """; 19, assignment; 20, assignment; 21, assignment; 22, call; 23, call; 24, identifier:before_date; 25, call; 26, identifier:before_date_str; 27, call; 28, identifier:es; 29, call; 30, identifier:delete_inactive_unique_identities; 31, argument_list; 32, identifier:delete_orphan_unique_identities; 33, argument_list; 34, identifier:get_diff_current_date; 35, argument_list; 36, attribute; 37, argument_list; 38, identifier:Elasticsearch; 39, argument_list; 40, identifier:es; 41, identifier:sortinghat_db; 42, identifier:before_date_str; 43, identifier:es; 44, identifier:sortinghat_db; 45, identifier:data_source; 46, identifier:active_data_sources; 47, keyword_argument; 48, identifier:before_date; 49, identifier:isoformat; 50, list; 51, keyword_argument; 52, keyword_argument; 53, keyword_argument; 54, keyword_argument; 55, identifier:minutes; 56, identifier:retention_time; 57, identifier:es_enrichment_url; 58, identifier:timeout; 59, integer:120; 60, identifier:max_retries; 61, integer:20; 62, identifier:retry_on_timeout; 63, True; 64, identifier:verify_certs; 65, False
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 10, 18; 11, 19; 12, 20; 13, 21; 15, 22; 17, 23; 19, 24; 19, 25; 20, 26; 20, 27; 21, 28; 21, 29; 22, 30; 22, 31; 23, 32; 23, 33; 25, 34; 25, 35; 27, 36; 27, 37; 29, 38; 29, 39; 31, 40; 31, 41; 31, 42; 33, 43; 33, 44; 33, 45; 33, 46; 35, 47; 36, 48; 36, 49; 39, 50; 39, 51; 39, 52; 39, 53; 39, 54; 47, 55; 47, 56; 50, 57; 51, 58; 51, 59; 52, 60; 52, 61; 53, 62; 53, 63; 54, 64; 54, 65
def retain_identities(retention_time, es_enrichment_url, sortinghat_db, data_source, active_data_sources): """Select the unique identities not seen before `retention_time` and delete them from SortingHat. Furthermore, it deletes also the orphan unique identities, those ones stored in SortingHat but not in IDENTITIES_INDEX. :param retention_time: maximum number of minutes wrt the current date to retain the identities :param es_enrichment_url: URL of the ElasticSearch where the enriched data is stored :param sortinghat_db: instance of the SortingHat database :param data_source: target data source (e.g., git, github, slack) :param active_data_sources: list of active data sources """ before_date = get_diff_current_date(minutes=retention_time) before_date_str = before_date.isoformat() es = Elasticsearch([es_enrichment_url], timeout=120, max_retries=20, retry_on_timeout=True, verify_certs=False) # delete the unique identities which have not been seen after `before_date` delete_inactive_unique_identities(es, sortinghat_db, before_date_str) # delete the unique identities for a given data source which are not in the IDENTITIES_INDEX delete_orphan_unique_identities(es, sortinghat_db, data_source, active_data_sources)
0, module; 1, function_definition; 2, function_name:get_review_sh; 3, parameters; 4, block; 5, identifier:self; 6, identifier:revision; 7, identifier:item; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, comment:""" Add sorting hat enrichment fields for the author of the revision """; 14, assignment; 15, assignment; 16, assignment; 17, identifier:erevision; 18, identifier:identity; 19, call; 20, identifier:update; 21, call; 22, identifier:erevision; 23, call; 24, attribute; 25, argument_list; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, identifier:self; 31, identifier:get_sh_identity; 32, identifier:revision; 33, identifier:parser; 34, identifier:parse; 35, subscript; 36, identifier:self; 37, identifier:get_item_sh_fields; 38, identifier:identity; 39, identifier:update; 40, identifier:item; 41, call; 42, attribute; 43, argument_list; 44, identifier:self; 45, identifier:get_field_date
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 9, 14; 10, 15; 11, 16; 12, 17; 14, 18; 14, 19; 15, 20; 15, 21; 16, 22; 16, 23; 19, 24; 19, 25; 21, 26; 21, 27; 23, 28; 23, 29; 24, 30; 24, 31; 25, 32; 26, 33; 26, 34; 27, 35; 28, 36; 28, 37; 29, 38; 29, 39; 35, 40; 35, 41; 41, 42; 41, 43; 42, 44; 42, 45
def get_review_sh(self, revision, item): """ Add sorting hat enrichment fields for the author of the revision """ identity = self.get_sh_identity(revision) update = parser.parse(item[self.get_field_date()]) erevision = self.get_item_sh_fields(identity, update) return erevision
0, module; 1, function_definition; 2, function_name:get_item_sh; 3, parameters; 4, block; 5, identifier:self; 6, identifier:item; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, comment:# Item enriched; 12, expression_statement; 13, if_statement; 14, if_statement; 15, expression_statement; 16, for_statement; 17, comment:# Add the author field common in all data sources; 18, expression_statement; 19, if_statement; 20, return_statement; 21, identifier:roles; 22, None; 23, identifier:date_field; 24, None; 25, comment:""" Add sorting hat enrichment fields for different roles If there are no roles, just add the author fields. """; 26, assignment; 27, assignment; 28, not_operator; 29, block; 30, not_operator; 31, block; 32, else_clause; 33, assignment; 34, identifier:rol; 35, identifier:roles; 36, block; 37, assignment; 38, boolean_operator; 39, block; 40, identifier:eitem_sh; 41, identifier:eitem_sh; 42, dictionary; 43, identifier:author_field; 44, call; 45, identifier:roles; 46, expression_statement; 47, identifier:date_field; 48, expression_statement; 49, block; 50, identifier:users_data; 51, call; 52, if_statement; 53, identifier:rol_author; 54, string; 55, comparison_operator:author_field in users_data; 56, comparison_operator:author_field != rol_author; 57, expression_statement; 58, expression_statement; 59, if_statement; 60, if_statement; 61, if_statement; 62, attribute; 63, argument_list; 64, assignment; 65, assignment; 66, expression_statement; 67, attribute; 68, argument_list; 69, comparison_operator:rol in users_data; 70, block; 71, string_content:author; 72, identifier:author_field; 73, identifier:users_data; 74, identifier:author_field; 75, identifier:rol_author; 76, assignment; 77, call; 78, not_operator; 79, block; 80, not_operator; 81, block; 82, not_operator; 83, block; 84, identifier:self; 85, identifier:get_field_author; 86, identifier:roles; 87, list; 88, identifier:item_date; 89, call; 90, assignment; 91, identifier:self; 92, identifier:get_users_data; 93, identifier:item; 94, identifier:rol; 95, identifier:users_data; 96, expression_statement; 97, expression_statement; 98, if_statement; 99, if_statement; 100, if_statement; 101, identifier:identity; 102, call; 103, attribute; 104, argument_list; 105, subscript; 106, expression_statement; 107, subscript; 108, expression_statement; 109, subscript; 110, expression_statement; 111, identifier:author_field; 112, identifier:str_to_datetime; 113, argument_list; 114, identifier:item_date; 115, call; 116, assignment; 117, call; 118, not_operator; 119, block; 120, not_operator; 121, block; 122, not_operator; 123, block; 124, attribute; 125, argument_list; 126, identifier:eitem_sh; 127, identifier:update; 128, call; 129, identifier:eitem_sh; 130, string; 131, assignment; 132, identifier:eitem_sh; 133, string; 134, assignment; 135, identifier:eitem_sh; 136, string; 137, assignment; 138, subscript; 139, identifier:str_to_datetime; 140, argument_list; 141, identifier:identity; 142, call; 143, attribute; 144, argument_list; 145, subscript; 146, expression_statement; 147, subscript; 148, expression_statement; 149, subscript; 150, expression_statement; 151, identifier:self; 152, identifier:get_sh_identity; 153, identifier:item; 154, identifier:author_field; 155, attribute; 156, argument_list; 157, string_content:author_org_name; 158, subscript; 159, identifier:SH_UNKNOWN_VALUE; 160, string_content:author_name; 161, subscript; 162, identifier:SH_UNKNOWN_VALUE; 163, string_content:author_user_name; 164, subscript; 165, identifier:SH_UNKNOWN_VALUE; 166, identifier:item; 167, call; 168, subscript; 169, attribute; 170, argument_list; 171, identifier:eitem_sh; 172, identifier:update; 173, call; 174, identifier:eitem_sh; 175, binary_operator:rol + '_org_name'; 176, assignment; 177, identifier:eitem_sh; 178, binary_operator:rol + '_name'; 179, assignment; 180, identifier:eitem_sh; 181, binary_operator:rol + '_user_name'; 182, assignment; 183, identifier:self; 184, identifier:get_item_sh_fields; 185, identifier:identity; 186, identifier:item_date; 187, keyword_argument; 188, identifier:eitem_sh; 189, string; 190, identifier:eitem_sh; 191, string; 192, identifier:eitem_sh; 193, string; 194, attribute; 195, argument_list; 196, identifier:item; 197, identifier:date_field; 198, identifier:self; 199, identifier:get_sh_identity; 200, identifier:item; 201, identifier:rol; 202, attribute; 203, argument_list; 204, identifier:rol; 205, string; 206, subscript; 207, identifier:SH_UNKNOWN_VALUE; 208, identifier:rol; 209, string; 210, subscript; 211, identifier:SH_UNKNOWN_VALUE; 212, identifier:rol; 213, string; 214, subscript; 215, identifier:SH_UNKNOWN_VALUE; 216, identifier:rol; 217, identifier:rol_author; 218, string_content:author_org_name; 219, string_content:author_name; 220, string_content:author_user_name; 221, identifier:self; 222, identifier:get_field_date; 223, identifier:self; 224, identifier:get_item_sh_fields; 225, identifier:identity; 226, identifier:item_date; 227, keyword_argument; 228, string_content:_org_name; 229, identifier:eitem_sh; 230, binary_operator:rol + '_org_name'; 231, string_content:_name; 232, identifier:eitem_sh; 233, binary_operator:rol + '_name'; 234, string_content:_user_name; 235, identifier:eitem_sh; 236, binary_operator:rol + '_user_name'; 237, identifier:rol; 238, identifier:rol; 239, identifier:rol; 240, string; 241, identifier:rol; 242, string; 243, identifier:rol; 244, string; 245, string_content:_org_name; 246, string_content:_name; 247, string_content:_user_name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 7, 21; 7, 22; 8, 23; 8, 24; 9, 25; 10, 26; 12, 27; 13, 28; 13, 29; 14, 30; 14, 31; 14, 32; 15, 33; 16, 34; 16, 35; 16, 36; 18, 37; 19, 38; 19, 39; 20, 40; 26, 41; 26, 42; 27, 43; 27, 44; 28, 45; 29, 46; 30, 47; 31, 48; 32, 49; 33, 50; 33, 51; 36, 52; 37, 53; 37, 54; 38, 55; 38, 56; 39, 57; 39, 58; 39, 59; 39, 60; 39, 61; 44, 62; 44, 63; 46, 64; 48, 65; 49, 66; 51, 67; 51, 68; 52, 69; 52, 70; 54, 71; 55, 72; 55, 73; 56, 74; 56, 75; 57, 76; 58, 77; 59, 78; 59, 79; 60, 80; 60, 81; 61, 82; 61, 83; 62, 84; 62, 85; 64, 86; 64, 87; 65, 88; 65, 89; 66, 90; 67, 91; 67, 92; 68, 93; 69, 94; 69, 95; 70, 96; 70, 97; 70, 98; 70, 99; 70, 100; 76, 101; 76, 102; 77, 103; 77, 104; 78, 105; 79, 106; 80, 107; 81, 108; 82, 109; 83, 110; 87, 111; 89, 112; 89, 113; 90, 114; 90, 115; 96, 116; 97, 117; 98, 118; 98, 119; 99, 120; 99, 121; 100, 122; 100, 123; 102, 124; 102, 125; 103, 126; 103, 127; 104, 128; 105, 129; 105, 130; 106, 131; 107, 132; 107, 133; 108, 134; 109, 135; 109, 136; 110, 137; 113, 138; 115, 139; 115, 140; 116, 141; 116, 142; 117, 143; 117, 144; 118, 145; 119, 146; 120, 147; 121, 148; 122, 149; 123, 150; 124, 151; 124, 152; 125, 153; 125, 154; 128, 155; 128, 156; 130, 157; 131, 158; 131, 159; 133, 160; 134, 161; 134, 162; 136, 163; 137, 164; 137, 165; 138, 166; 138, 167; 140, 168; 142, 169; 142, 170; 143, 171; 143, 172; 144, 173; 145, 174; 145, 175; 146, 176; 147, 177; 147, 178; 148, 179; 149, 180; 149, 181; 150, 182; 155, 183; 155, 184; 156, 185; 156, 186; 156, 187; 158, 188; 158, 189; 161, 190; 161, 191; 164, 192; 164, 193; 167, 194; 167, 195; 168, 196; 168, 197; 169, 198; 169, 199; 170, 200; 170, 201; 173, 202; 173, 203; 175, 204; 175, 205; 176, 206; 176, 207; 178, 208; 178, 209; 179, 210; 179, 211; 181, 212; 181, 213; 182, 214; 182, 215; 187, 216; 187, 217; 189, 218; 191, 219; 193, 220; 194, 221; 194, 222; 202, 223; 202, 224; 203, 225; 203, 226; 203, 227; 205, 228; 206, 229; 206, 230; 209, 231; 210, 232; 210, 233; 213, 234; 214, 235; 214, 236; 227, 237; 227, 238; 230, 239; 230, 240; 233, 241; 233, 242; 236, 243; 236, 244; 240, 245; 242, 246; 244, 247
def get_item_sh(self, item, roles=None, date_field=None): """ Add sorting hat enrichment fields for different roles If there are no roles, just add the author fields. """ eitem_sh = {} # Item enriched author_field = self.get_field_author() if not roles: roles = [author_field] if not date_field: item_date = str_to_datetime(item[self.get_field_date()]) else: item_date = str_to_datetime(item[date_field]) users_data = self.get_users_data(item) for rol in roles: if rol in users_data: identity = self.get_sh_identity(item, rol) eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol)) if not eitem_sh[rol + '_org_name']: eitem_sh[rol + '_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_name']: eitem_sh[rol + '_name'] = SH_UNKNOWN_VALUE if not eitem_sh[rol + '_user_name']: eitem_sh[rol + '_user_name'] = SH_UNKNOWN_VALUE # Add the author field common in all data sources rol_author = 'author' if author_field in users_data and author_field != rol_author: identity = self.get_sh_identity(item, author_field) eitem_sh.update(self.get_item_sh_fields(identity, item_date, rol=rol_author)) if not eitem_sh['author_org_name']: eitem_sh['author_org_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_name']: eitem_sh['author_name'] = SH_UNKNOWN_VALUE if not eitem_sh['author_user_name']: eitem_sh['author_user_name'] = SH_UNKNOWN_VALUE return eitem_sh
0, module; 1, function_definition; 2, function_name:get_sh_ids; 3, parameters; 4, block; 5, identifier:self; 6, identifier:identity; 7, identifier:backend_name; 8, expression_statement; 9, comment:# Convert the dict to tuple so it is hashable; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, comment:""" Return the Sorting Hat id and uuid for an identity """; 14, assignment; 15, assignment; 16, identifier:sh_ids; 17, identifier:identity_tuple; 18, call; 19, identifier:sh_ids; 20, call; 21, identifier:tuple; 22, argument_list; 23, attribute; 24, argument_list; 25, call; 26, identifier:self; 27, identifier:__get_sh_ids_cache; 28, identifier:identity_tuple; 29, identifier:backend_name; 30, attribute; 31, argument_list; 32, identifier:identity; 33, identifier:items
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 10, 14; 11, 15; 12, 16; 14, 17; 14, 18; 15, 19; 15, 20; 18, 21; 18, 22; 20, 23; 20, 24; 22, 25; 23, 26; 23, 27; 24, 28; 24, 29; 25, 30; 25, 31; 30, 32; 30, 33
def get_sh_ids(self, identity, backend_name): """ Return the Sorting Hat id and uuid for an identity """ # Convert the dict to tuple so it is hashable identity_tuple = tuple(identity.items()) sh_ids = self.__get_sh_ids_cache(identity_tuple, backend_name) return sh_ids
0, module; 1, function_definition; 2, function_name:get_sh_identity; 3, parameters; 4, block; 5, identifier:self; 6, identifier:item; 7, default_parameter; 8, expression_statement; 9, function_definition; 10, expression_statement; 11, for_statement; 12, expression_statement; 13, comment:# by default a specific user dict is used; 14, if_statement; 15, expression_statement; 16, return_statement; 17, identifier:identity_field; 18, None; 19, comment:""" Return a Sorting Hat identity using bugzilla user data """; 20, function_name:fill_list_identity; 21, parameters; 22, block; 23, assignment; 24, identifier:field; 25, list; 26, comment:# Basic fields in Sorting Hat; 27, block; 28, assignment; 29, boolean_operator; 30, block; 31, assignment; 32, identifier:identity; 33, identifier:identity; 34, identifier:user_list_data; 35, expression_statement; 36, expression_statement; 37, if_statement; 38, if_statement; 39, return_statement; 40, identifier:identity; 41, dictionary; 42, string; 43, string; 44, string; 45, expression_statement; 46, identifier:user; 47, identifier:item; 48, comparison_operator:'data' in item; 49, comparison_operator:type(item) == dict; 50, expression_statement; 51, identifier:identity; 52, call; 53, comment:""" Fill identity with user data in first item in list """; 54, assignment; 55, comparison_operator:'@' in identity['username']; 56, block; 57, comparison_operator:'name' in user_list_data[0]; 58, block; 59, identifier:identity; 60, string_content:name; 61, string_content:email; 62, string_content:username; 63, assignment; 64, string; 65, identifier:item; 66, call; 67, identifier:dict; 68, assignment; 69, identifier:fill_list_identity; 70, argument_list; 71, subscript; 72, subscript; 73, string; 74, subscript; 75, expression_statement; 76, string; 77, subscript; 78, expression_statement; 79, subscript; 80, None; 81, string_content:data; 82, identifier:type; 83, argument_list; 84, identifier:user; 85, subscript; 86, identifier:identity; 87, identifier:user; 88, identifier:identity; 89, string; 90, subscript; 91, string; 92, string_content:@; 93, identifier:identity; 94, string; 95, assignment; 96, string_content:name; 97, identifier:user_list_data; 98, integer:0; 99, assignment; 100, identifier:identity; 101, identifier:field; 102, identifier:item; 103, subscript; 104, identifier:identity_field; 105, string_content:username; 106, identifier:user_list_data; 107, integer:0; 108, string_content:__text__; 109, string_content:username; 110, subscript; 111, subscript; 112, subscript; 113, subscript; 114, identifier:item; 115, string; 116, identifier:identity; 117, string; 118, identifier:identity; 119, string; 120, identifier:identity; 121, string; 122, subscript; 123, string; 124, string_content:data; 125, string_content:email; 126, string_content:username; 127, string_content:name; 128, identifier:user_list_data; 129, integer:0; 130, string_content:name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 7, 17; 7, 18; 8, 19; 9, 20; 9, 21; 9, 22; 10, 23; 11, 24; 11, 25; 11, 26; 11, 27; 12, 28; 14, 29; 14, 30; 15, 31; 16, 32; 21, 33; 21, 34; 22, 35; 22, 36; 22, 37; 22, 38; 22, 39; 23, 40; 23, 41; 25, 42; 25, 43; 25, 44; 27, 45; 28, 46; 28, 47; 29, 48; 29, 49; 30, 50; 31, 51; 31, 52; 35, 53; 36, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 42, 60; 43, 61; 44, 62; 45, 63; 48, 64; 48, 65; 49, 66; 49, 67; 50, 68; 52, 69; 52, 70; 54, 71; 54, 72; 55, 73; 55, 74; 56, 75; 57, 76; 57, 77; 58, 78; 63, 79; 63, 80; 64, 81; 66, 82; 66, 83; 68, 84; 68, 85; 70, 86; 70, 87; 71, 88; 71, 89; 72, 90; 72, 91; 73, 92; 74, 93; 74, 94; 75, 95; 76, 96; 77, 97; 77, 98; 78, 99; 79, 100; 79, 101; 83, 102; 85, 103; 85, 104; 89, 105; 90, 106; 90, 107; 91, 108; 94, 109; 95, 110; 95, 111; 99, 112; 99, 113; 103, 114; 103, 115; 110, 116; 110, 117; 111, 118; 111, 119; 112, 120; 112, 121; 113, 122; 113, 123; 115, 124; 117, 125; 119, 126; 121, 127; 122, 128; 122, 129; 123, 130
def get_sh_identity(self, item, identity_field=None): """ Return a Sorting Hat identity using bugzilla user data """ def fill_list_identity(identity, user_list_data): """ Fill identity with user data in first item in list """ identity['username'] = user_list_data[0]['__text__'] if '@' in identity['username']: identity['email'] = identity['username'] if 'name' in user_list_data[0]: identity['name'] = user_list_data[0]['name'] return identity identity = {} for field in ['name', 'email', 'username']: # Basic fields in Sorting Hat identity[field] = None user = item # by default a specific user dict is used if 'data' in item and type(item) == dict: user = item['data'][identity_field] identity = fill_list_identity(identity, user) return identity
0, module; 1, function_definition; 2, function_name:areas_of_code; 3, parameters; 4, block; 5, identifier:git_enrich; 6, identifier:in_conn; 7, identifier:out_conn; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:block_size; 14, integer:100; 15, comment:"""Build and index for areas of code from a given Perceval RAW index. :param block_size: size of items block. :param git_enrich: GitEnrich object to deal with SortingHat affiliations. :param in_conn: ESPandasConnector to read from. :param out_conn: ESPandasConnector to write to. :return: number of documents written in ElasticSearch enriched index. """; 16, assignment; 17, assignment; 18, identifier:ndocs; 19, identifier:aoc; 20, call; 21, identifier:ndocs; 22, call; 23, identifier:AreasOfCode; 24, argument_list; 25, attribute; 26, argument_list; 27, keyword_argument; 28, keyword_argument; 29, keyword_argument; 30, keyword_argument; 31, identifier:aoc; 32, identifier:analyze; 33, identifier:in_connector; 34, identifier:in_conn; 35, identifier:out_connector; 36, identifier:out_conn; 37, identifier:block_size; 38, identifier:block_size; 39, identifier:git_enrich; 40, identifier:git_enrich
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 8, 13; 8, 14; 9, 15; 10, 16; 11, 17; 12, 18; 16, 19; 16, 20; 17, 21; 17, 22; 20, 23; 20, 24; 22, 25; 22, 26; 24, 27; 24, 28; 24, 29; 24, 30; 25, 31; 25, 32; 27, 33; 27, 34; 28, 35; 28, 36; 29, 37; 29, 38; 30, 39; 30, 40
def areas_of_code(git_enrich, in_conn, out_conn, block_size=100): """Build and index for areas of code from a given Perceval RAW index. :param block_size: size of items block. :param git_enrich: GitEnrich object to deal with SortingHat affiliations. :param in_conn: ESPandasConnector to read from. :param out_conn: ESPandasConnector to write to. :return: number of documents written in ElasticSearch enriched index. """ aoc = AreasOfCode(in_connector=in_conn, out_connector=out_conn, block_size=block_size, git_enrich=git_enrich) ndocs = aoc.analyze() return ndocs
0, module; 1, function_definition; 2, function_name:get_my_feed; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, expression_statement; 11, expression_statement; 12, return_statement; 13, identifier:limit; 14, integer:150; 15, identifier:offset; 16, integer:20; 17, identifier:sort; 18, string:"updated"; 19, identifier:nid; 20, None; 21, comment:"""Get my feed :type limit: int :param limit: Number of posts from feed to get, starting from ``offset`` :type offset: int :param offset: Offset starting from bottom of feed :type sort: str :param sort: How to sort feed that will be retrieved; only current known value is "updated" :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class """; 22, assignment; 23, call; 24, identifier:r; 25, call; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, identifier:self; 31, identifier:_handle_error; 32, identifier:r; 33, string:"Could not retrieve your feed."; 34, identifier:self; 35, identifier:request; 36, keyword_argument; 37, keyword_argument; 38, keyword_argument; 39, identifier:method; 40, string:"network.get_my_feed"; 41, identifier:nid; 42, identifier:nid; 43, identifier:data; 44, call; 45, identifier:dict; 46, argument_list; 47, keyword_argument; 48, keyword_argument; 49, keyword_argument; 50, identifier:limit; 51, identifier:limit; 52, identifier:offset; 53, identifier:offset; 54, identifier:sort; 55, identifier:sort
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 4, 10; 4, 11; 4, 12; 6, 13; 6, 14; 7, 15; 7, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 11, 22; 12, 23; 22, 24; 22, 25; 23, 26; 23, 27; 25, 28; 25, 29; 26, 30; 26, 31; 27, 32; 27, 33; 28, 34; 28, 35; 29, 36; 29, 37; 29, 38; 36, 39; 36, 40; 37, 41; 37, 42; 38, 43; 38, 44; 44, 45; 44, 46; 46, 47; 46, 48; 46, 49; 47, 50; 47, 51; 48, 52; 48, 53; 49, 54; 49, 55
def get_my_feed(self, limit=150, offset=20, sort="updated", nid=None): """Get my feed :type limit: int :param limit: Number of posts from feed to get, starting from ``offset`` :type offset: int :param offset: Offset starting from bottom of feed :type sort: str :param sort: How to sort feed that will be retrieved; only current known value is "updated" :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class """ r = self.request( method="network.get_my_feed", nid=nid, data=dict( limit=limit, offset=offset, sort=sort ) ) return self._handle_error(r, "Could not retrieve your feed.")
0, module; 1, function_definition; 2, function_name:filter_feed; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, expression_statement; 13, assert_statement; 14, if_statement; 15, if_statement; 16, expression_statement; 17, return_statement; 18, identifier:updated; 19, False; 20, identifier:following; 21, False; 22, identifier:folder; 23, False; 24, identifier:filter_folder; 25, string:""; 26, identifier:sort; 27, string:"updated"; 28, identifier:nid; 29, None; 30, comment:"""Get filtered feed Only one filter type (updated, following, folder) is possible. :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type sort: str :param sort: How to sort feed that will be retrieved; only current known value is "updated" :type updated: bool :param updated: Set to filter through only posts which have been updated since you last read them :type following: bool :param following: Set to filter through only posts which you are following :type folder: bool :param folder: Set to filter through only posts which are in the provided ``filter_folder`` :type filter_folder: str :param filter_folder: Name of folder to show posts from; required only if ``folder`` is set """; 31, comparison_operator:sum([updated, following, folder]) == 1; 32, identifier:folder; 33, block; 34, identifier:updated; 35, block; 36, elif_clause; 37, else_clause; 38, assignment; 39, call; 40, call; 41, integer:1; 42, assert_statement; 43, expression_statement; 44, identifier:following; 45, block; 46, block; 47, identifier:r; 48, call; 49, attribute; 50, argument_list; 51, identifier:sum; 52, argument_list; 53, identifier:filter_folder; 54, assignment; 55, expression_statement; 56, expression_statement; 57, attribute; 58, argument_list; 59, identifier:self; 60, identifier:_handle_error; 61, identifier:r; 62, string:"Could not retrieve filtered feed."; 63, list; 64, identifier:filter_type; 65, call; 66, assignment; 67, assignment; 68, identifier:self; 69, identifier:request; 70, keyword_argument; 71, keyword_argument; 72, keyword_argument; 73, identifier:updated; 74, identifier:following; 75, identifier:folder; 76, identifier:dict; 77, argument_list; 78, identifier:filter_type; 79, call; 80, identifier:filter_type; 81, call; 82, identifier:nid; 83, identifier:nid; 84, identifier:method; 85, string:"network.filter_feed"; 86, identifier:data; 87, call; 88, keyword_argument; 89, identifier:dict; 90, argument_list; 91, identifier:dict; 92, argument_list; 93, identifier:dict; 94, argument_list; 95, identifier:updated; 96, integer:1; 97, keyword_argument; 98, keyword_argument; 99, keyword_argument; 100, keyword_argument; 101, dictionary_splat; 102, identifier:following; 103, integer:1; 104, identifier:folder; 105, integer:1; 106, identifier:filter_folder; 107, identifier:filter_folder; 108, identifier:sort; 109, identifier:sort; 110, identifier:filter_type
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 6, 18; 6, 19; 7, 20; 7, 21; 8, 22; 8, 23; 9, 24; 9, 25; 10, 26; 10, 27; 11, 28; 11, 29; 12, 30; 13, 31; 14, 32; 14, 33; 15, 34; 15, 35; 15, 36; 15, 37; 16, 38; 17, 39; 31, 40; 31, 41; 33, 42; 35, 43; 36, 44; 36, 45; 37, 46; 38, 47; 38, 48; 39, 49; 39, 50; 40, 51; 40, 52; 42, 53; 43, 54; 45, 55; 46, 56; 48, 57; 48, 58; 49, 59; 49, 60; 50, 61; 50, 62; 52, 63; 54, 64; 54, 65; 55, 66; 56, 67; 57, 68; 57, 69; 58, 70; 58, 71; 58, 72; 63, 73; 63, 74; 63, 75; 65, 76; 65, 77; 66, 78; 66, 79; 67, 80; 67, 81; 70, 82; 70, 83; 71, 84; 71, 85; 72, 86; 72, 87; 77, 88; 79, 89; 79, 90; 81, 91; 81, 92; 87, 93; 87, 94; 88, 95; 88, 96; 90, 97; 92, 98; 92, 99; 94, 100; 94, 101; 97, 102; 97, 103; 98, 104; 98, 105; 99, 106; 99, 107; 100, 108; 100, 109; 101, 110
def filter_feed(self, updated=False, following=False, folder=False, filter_folder="", sort="updated", nid=None): """Get filtered feed Only one filter type (updated, following, folder) is possible. :type nid: str :param nid: This is the ID of the network to get the feed from. This is optional and only to override the existing `network_id` entered when created the class :type sort: str :param sort: How to sort feed that will be retrieved; only current known value is "updated" :type updated: bool :param updated: Set to filter through only posts which have been updated since you last read them :type following: bool :param following: Set to filter through only posts which you are following :type folder: bool :param folder: Set to filter through only posts which are in the provided ``filter_folder`` :type filter_folder: str :param filter_folder: Name of folder to show posts from; required only if ``folder`` is set """ assert sum([updated, following, folder]) == 1 if folder: assert filter_folder if updated: filter_type = dict(updated=1) elif following: filter_type = dict(following=1) else: filter_type = dict(folder=1, filter_folder=filter_folder) r = self.request( nid=nid, method="network.filter_feed", data=dict( sort=sort, **filter_type ) ) return self._handle_error(r, "Could not retrieve filtered feed.")
0, module; 1, function_definition; 2, function_name:minus; 3, parameters; 4, block; 5, identifier:repo_list_a; 6, identifier:repo_list_b; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, expression_statement; 11, for_statement; 12, return_statement; 13, comment:"""Method to create a list of repositories such that the repository belongs to repo list a but not repo list b. In an ideal scenario we should be able to do this by set(a) - set(b) but as GithubRepositories have shown that set() on them is not reliable resort to this until it is all sorted out. :param repo_list_a: List of repositories. :param repo_list_b: List of repositories. """; 14, assignment; 15, identifier:repo; 16, identifier:repo_list_b; 17, block; 18, assignment; 19, identifier:repo; 20, identifier:repo_list_a; 21, block; 22, identifier:a_minus_b; 23, identifier:included; 24, call; 25, expression_statement; 26, identifier:a_minus_b; 27, call; 28, if_statement; 29, identifier:defaultdict; 30, argument_list; 31, assignment; 32, identifier:list; 33, argument_list; 34, not_operator; 35, block; 36, lambda; 37, subscript; 38, True; 39, subscript; 40, expression_statement; 41, expression_statement; 42, False; 43, identifier:included; 44, attribute; 45, identifier:included; 46, attribute; 47, assignment; 48, call; 49, identifier:repo; 50, identifier:full_name; 51, identifier:repo; 52, identifier:full_name; 53, subscript; 54, True; 55, attribute; 56, argument_list; 57, identifier:included; 58, attribute; 59, identifier:a_minus_b; 60, identifier:append; 61, identifier:repo; 62, identifier:repo; 63, identifier:full_name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 8, 14; 9, 15; 9, 16; 9, 17; 10, 18; 11, 19; 11, 20; 11, 21; 12, 22; 14, 23; 14, 24; 17, 25; 18, 26; 18, 27; 21, 28; 24, 29; 24, 30; 25, 31; 27, 32; 27, 33; 28, 34; 28, 35; 30, 36; 31, 37; 31, 38; 34, 39; 35, 40; 35, 41; 36, 42; 37, 43; 37, 44; 39, 45; 39, 46; 40, 47; 41, 48; 44, 49; 44, 50; 46, 51; 46, 52; 47, 53; 47, 54; 48, 55; 48, 56; 53, 57; 53, 58; 55, 59; 55, 60; 56, 61; 58, 62; 58, 63
def minus(repo_list_a, repo_list_b): """Method to create a list of repositories such that the repository belongs to repo list a but not repo list b. In an ideal scenario we should be able to do this by set(a) - set(b) but as GithubRepositories have shown that set() on them is not reliable resort to this until it is all sorted out. :param repo_list_a: List of repositories. :param repo_list_b: List of repositories. """ included = defaultdict(lambda: False) for repo in repo_list_b: included[repo.full_name] = True a_minus_b = list() for repo in repo_list_a: if not included[repo.full_name]: included[repo.full_name] = True a_minus_b.append(repo) return a_minus_b
0, module; 1, function_definition; 2, function_name:search; 3, parameters; 4, block; 5, identifier:self; 6, identifier:text; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, return_statement; 15, identifier:limit; 16, integer:1000; 17, identifier:order_by; 18, None; 19, identifier:sort_order; 20, None; 21, identifier:filter; 22, None; 23, comment:""" Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame. Parameters ---------- text : str text to do fulltext search on, e.g., 'Real GDP' limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """; 24, assignment; 25, assignment; 26, identifier:info; 27, identifier:url; 28, binary_operator:"%s/series/search?search_text=%s&" % (self.root_url, quote_plus(text)); 29, identifier:info; 30, call; 31, string:"%s/series/search?search_text=%s&"; 32, tuple; 33, attribute; 34, argument_list; 35, attribute; 36, call; 37, identifier:self; 38, identifier:__get_search_results; 39, identifier:url; 40, identifier:limit; 41, identifier:order_by; 42, identifier:sort_order; 43, identifier:filter; 44, identifier:self; 45, identifier:root_url; 46, identifier:quote_plus; 47, argument_list; 48, identifier:text
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 7, 16; 8, 17; 8, 18; 9, 19; 9, 20; 10, 21; 10, 22; 11, 23; 12, 24; 13, 25; 14, 26; 24, 27; 24, 28; 25, 29; 25, 30; 28, 31; 28, 32; 30, 33; 30, 34; 32, 35; 32, 36; 33, 37; 33, 38; 34, 39; 34, 40; 34, 41; 34, 42; 34, 43; 35, 44; 35, 45; 36, 46; 36, 47; 47, 48
def search(self, text, limit=1000, order_by=None, sort_order=None, filter=None): """ Do a fulltext search for series in the Fred dataset. Returns information about matching series in a DataFrame. Parameters ---------- text : str text to do fulltext search on, e.g., 'Real GDP' limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/series/search?search_text=%s&" % (self.root_url, quote_plus(text)) info = self.__get_search_results(url, limit, order_by, sort_order, filter) return info
0, module; 1, function_definition; 2, function_name:search_by_release; 3, parameters; 4, block; 5, identifier:self; 6, identifier:release_id; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, return_statement; 16, identifier:limit; 17, integer:0; 18, identifier:order_by; 19, None; 20, identifier:sort_order; 21, None; 22, identifier:filter; 23, None; 24, comment:""" Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """; 25, assignment; 26, assignment; 27, comparison_operator:info is None; 28, block; 29, identifier:info; 30, identifier:url; 31, binary_operator:"%s/release/series?release_id=%d" % (self.root_url, release_id); 32, identifier:info; 33, call; 34, identifier:info; 35, None; 36, raise_statement; 37, string:"%s/release/series?release_id=%d"; 38, tuple; 39, attribute; 40, argument_list; 41, call; 42, attribute; 43, identifier:release_id; 44, identifier:self; 45, identifier:__get_search_results; 46, identifier:url; 47, identifier:limit; 48, identifier:order_by; 49, identifier:sort_order; 50, identifier:filter; 51, identifier:ValueError; 52, argument_list; 53, identifier:self; 54, identifier:root_url; 55, binary_operator:'No series exists for release id: ' + str(release_id); 56, string; 57, call; 58, string_content:No series exists for release id:; 59, identifier:str; 60, argument_list; 61, identifier:release_id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 9, 21; 10, 22; 10, 23; 11, 24; 12, 25; 13, 26; 14, 27; 14, 28; 15, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 27, 35; 28, 36; 31, 37; 31, 38; 33, 39; 33, 40; 36, 41; 38, 42; 38, 43; 39, 44; 39, 45; 40, 46; 40, 47; 40, 48; 40, 49; 40, 50; 41, 51; 41, 52; 42, 53; 42, 54; 52, 55; 55, 56; 55, 57; 56, 58; 57, 59; 57, 60; 60, 61
def search_by_release(self, release_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a release id. Returns information about matching series in a DataFrame. Parameters ---------- release_id : int release id, e.g., 151 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/release/series?release_id=%d" % (self.root_url, release_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for release id: ' + str(release_id)) return info
0, module; 1, function_definition; 2, function_name:search_by_category; 3, parameters; 4, block; 5, identifier:self; 6, identifier:category_id; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, if_statement; 15, return_statement; 16, identifier:limit; 17, integer:0; 18, identifier:order_by; 19, None; 20, identifier:sort_order; 21, None; 22, identifier:filter; 23, None; 24, comment:""" Search for series that belongs to a category id. Returns information about matching series in a DataFrame. Parameters ---------- category_id : int category id, e.g., 32145 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """; 25, assignment; 26, assignment; 27, comparison_operator:info is None; 28, block; 29, identifier:info; 30, identifier:url; 31, binary_operator:"%s/category/series?category_id=%d&" % (self.root_url, category_id); 32, identifier:info; 33, call; 34, identifier:info; 35, None; 36, raise_statement; 37, string:"%s/category/series?category_id=%d&"; 38, tuple; 39, attribute; 40, argument_list; 41, call; 42, attribute; 43, identifier:category_id; 44, identifier:self; 45, identifier:__get_search_results; 46, identifier:url; 47, identifier:limit; 48, identifier:order_by; 49, identifier:sort_order; 50, identifier:filter; 51, identifier:ValueError; 52, argument_list; 53, identifier:self; 54, identifier:root_url; 55, binary_operator:'No series exists for category id: ' + str(category_id); 56, string; 57, call; 58, string_content:No series exists for category id:; 59, identifier:str; 60, argument_list; 61, identifier:category_id
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 7, 16; 7, 17; 8, 18; 8, 19; 9, 20; 9, 21; 10, 22; 10, 23; 11, 24; 12, 25; 13, 26; 14, 27; 14, 28; 15, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 27, 35; 28, 36; 31, 37; 31, 38; 33, 39; 33, 40; 36, 41; 38, 42; 38, 43; 39, 44; 39, 45; 40, 46; 40, 47; 40, 48; 40, 49; 40, 50; 41, 51; 41, 52; 42, 53; 42, 54; 52, 55; 55, 56; 55, 57; 56, 58; 57, 59; 57, 60; 60, 61
def search_by_category(self, category_id, limit=0, order_by=None, sort_order=None, filter=None): """ Search for series that belongs to a category id. Returns information about matching series in a DataFrame. Parameters ---------- category_id : int category id, e.g., 32145 limit : int, optional limit the number of results to this value. If limit is 0, it means fetching all results without limit. order_by : str, optional order the results by a criterion. Valid options are 'search_rank', 'series_id', 'title', 'units', 'frequency', 'seasonal_adjustment', 'realtime_start', 'realtime_end', 'last_updated', 'observation_start', 'observation_end', 'popularity' sort_order : str, optional sort the results by ascending or descending order. Valid options are 'asc' or 'desc' filter : tuple, optional filters the results. Expects a tuple like (filter_variable, filter_value). Valid filter_variable values are 'frequency', 'units', and 'seasonal_adjustment' Returns ------- info : DataFrame a DataFrame containing information about the matching Fred series """ url = "%s/category/series?category_id=%d&" % (self.root_url, category_id) info = self.__get_search_results(url, limit, order_by, sort_order, filter) if info is None: raise ValueError('No series exists for category id: ' + str(category_id)) return info
0, module; 1, function_definition; 2, function_name:format_name; 3, parameters; 4, block; 5, identifier:subject; 6, expression_statement; 7, if_statement; 8, return_statement; 9, comment:"""Convert a subject into the canonical form for distinguished names. This function does not take care of sorting the subject in any meaningful order. Examples:: >>> format_name([('CN', 'example.com'), ]) '/CN=example.com' >>> format_name([('CN', 'example.com'), ('O', "My Organization"), ]) '/CN=example.com/O=My Organization' """; 10, call; 11, block; 12, binary_operator:'/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject])); 13, identifier:isinstance; 14, argument_list; 15, expression_statement; 16, string; 17, parenthesized_expression; 18, identifier:subject; 19, attribute; 20, assignment; 21, string_content:/%s; 22, call; 23, identifier:x509; 24, identifier:Name; 25, identifier:subject; 26, list_comprehension; 27, attribute; 28, argument_list; 29, tuple; 30, for_in_clause; 31, string; 32, identifier:join; 33, list_comprehension; 34, subscript; 35, attribute; 36, identifier:s; 37, identifier:subject; 38, string_content:/; 39, binary_operator:'%s=%s' % (force_text(k), force_text(v)); 40, for_in_clause; 41, identifier:OID_NAME_MAPPINGS; 42, attribute; 43, identifier:s; 44, identifier:value; 45, string; 46, tuple; 47, pattern_list; 48, identifier:subject; 49, identifier:s; 50, identifier:oid; 51, string_content:%s=%s; 52, call; 53, call; 54, identifier:k; 55, identifier:v; 56, identifier:force_text; 57, argument_list; 58, identifier:force_text; 59, argument_list; 60, identifier:k; 61, identifier:v
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 7, 11; 8, 12; 10, 13; 10, 14; 11, 15; 12, 16; 12, 17; 14, 18; 14, 19; 15, 20; 16, 21; 17, 22; 19, 23; 19, 24; 20, 25; 20, 26; 22, 27; 22, 28; 26, 29; 26, 30; 27, 31; 27, 32; 28, 33; 29, 34; 29, 35; 30, 36; 30, 37; 31, 38; 33, 39; 33, 40; 34, 41; 34, 42; 35, 43; 35, 44; 39, 45; 39, 46; 40, 47; 40, 48; 42, 49; 42, 50; 45, 51; 46, 52; 46, 53; 47, 54; 47, 55; 52, 56; 52, 57; 53, 58; 53, 59; 57, 60; 59, 61
def format_name(subject): """Convert a subject into the canonical form for distinguished names. This function does not take care of sorting the subject in any meaningful order. Examples:: >>> format_name([('CN', 'example.com'), ]) '/CN=example.com' >>> format_name([('CN', 'example.com'), ('O', "My Organization"), ]) '/CN=example.com/O=My Organization' """ if isinstance(subject, x509.Name): subject = [(OID_NAME_MAPPINGS[s.oid], s.value) for s in subject] return '/%s' % ('/'.join(['%s=%s' % (force_text(k), force_text(v)) for k, v in subject]))
0, module; 1, function_definition; 2, function_name:parse_name; 3, parameters; 4, block; 5, identifier:name; 6, expression_statement; 7, expression_statement; 8, if_statement; 9, try_statement; 10, comment:# Check that no OIDs not in MULTIPLE_OIDS occur more then once; 11, for_statement; 12, return_statement; 13, comment:"""Parses a subject string as used in OpenSSLs command line utilities. The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example ``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``, whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``). >>> parse_name('/CN=example.com') [('CN', 'example.com')] >>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com') [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')] Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted based on x509 name specifications regardless of the given order: >>> parse_name('L="Vienna / District"/[email protected]') [('L', 'Vienna / District'), ('emailAddress', '[email protected]')] >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT') True Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes, so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected. >>> parse_name('L="Vienna / District"/CN=example.com') [('L', 'Vienna / District'), ('CN', 'example.com')] But note that it's still easy to trick this function, if you really want to. The following example is *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's certainly different: >>> parse_name('L="Vienna " District"/CN=example.com') [('L', 'Vienna'), ('CN', 'example.com')] Examples of where this string is used are: .. code-block:: console # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com' # openssl x509 -in cert.pem -noout -subject -nameopt compat /C=AT/L=Vienna/CN=example.com """; 14, assignment; 15, not_operator; 16, comment:# empty subjects are ok; 17, block; 18, block; 19, except_clause; 20, pattern_list; 21, call; 22, block; 23, call; 24, identifier:name; 25, call; 26, identifier:name; 27, return_statement; 28, expression_statement; 29, as_pattern; 30, block; 31, identifier:key; 32, identifier:oid; 33, attribute; 34, argument_list; 35, if_statement; 36, identifier:sort_name; 37, argument_list; 38, attribute; 39, argument_list; 40, list; 41, assignment; 42, identifier:KeyError; 43, as_pattern_target; 44, raise_statement; 45, identifier:NAME_OID_MAPPINGS; 46, identifier:items; 47, boolean_operator; 48, block; 49, identifier:items; 50, identifier:name; 51, identifier:strip; 52, identifier:items; 53, list_comprehension; 54, identifier:e; 55, call; 56, comparison_operator:sum(1 for t in items if t[0] == key) > 1; 57, comparison_operator:oid not in MULTIPLE_OIDS; 58, raise_statement; 59, tuple; 60, for_in_clause; 61, identifier:ValueError; 62, argument_list; 63, call; 64, integer:1; 65, identifier:oid; 66, identifier:MULTIPLE_OIDS; 67, call; 68, subscript; 69, call; 70, identifier:t; 71, call; 72, binary_operator:'Unknown x509 name field: %s' % e.args[0]; 73, identifier:sum; 74, generator_expression; 75, identifier:ValueError; 76, argument_list; 77, identifier:NAME_CASE_MAPPINGS; 78, call; 79, identifier:force_text; 80, argument_list; 81, attribute; 82, argument_list; 83, string; 84, subscript; 85, integer:1; 86, for_in_clause; 87, if_clause; 88, binary_operator:'Subject contains multiple "%s" fields' % key; 89, attribute; 90, argument_list; 91, subscript; 92, identifier:NAME_RE; 93, identifier:findall; 94, identifier:name; 95, string_content:Unknown x509 name field: %s; 96, attribute; 97, integer:0; 98, identifier:t; 99, identifier:items; 100, comparison_operator:t[0] == key; 101, string:'Subject contains multiple "%s" fields'; 102, identifier:key; 103, subscript; 104, identifier:upper; 105, identifier:t; 106, integer:2; 107, identifier:e; 108, identifier:args; 109, subscript; 110, identifier:key; 111, identifier:t; 112, integer:0; 113, identifier:t; 114, integer:0
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 6, 13; 7, 14; 8, 15; 8, 16; 8, 17; 9, 18; 9, 19; 11, 20; 11, 21; 11, 22; 12, 23; 14, 24; 14, 25; 15, 26; 17, 27; 18, 28; 19, 29; 19, 30; 20, 31; 20, 32; 21, 33; 21, 34; 22, 35; 23, 36; 23, 37; 25, 38; 25, 39; 27, 40; 28, 41; 29, 42; 29, 43; 30, 44; 33, 45; 33, 46; 35, 47; 35, 48; 37, 49; 38, 50; 38, 51; 41, 52; 41, 53; 43, 54; 44, 55; 47, 56; 47, 57; 48, 58; 53, 59; 53, 60; 55, 61; 55, 62; 56, 63; 56, 64; 57, 65; 57, 66; 58, 67; 59, 68; 59, 69; 60, 70; 60, 71; 62, 72; 63, 73; 63, 74; 67, 75; 67, 76; 68, 77; 68, 78; 69, 79; 69, 80; 71, 81; 71, 82; 72, 83; 72, 84; 74, 85; 74, 86; 74, 87; 76, 88; 78, 89; 78, 90; 80, 91; 81, 92; 81, 93; 82, 94; 83, 95; 84, 96; 84, 97; 86, 98; 86, 99; 87, 100; 88, 101; 88, 102; 89, 103; 89, 104; 91, 105; 91, 106; 96, 107; 96, 108; 100, 109; 100, 110; 103, 111; 103, 112; 109, 113; 109, 114
def parse_name(name): """Parses a subject string as used in OpenSSLs command line utilities. The ``name`` is expected to be close to the subject format commonly used by OpenSSL, for example ``/C=AT/L=Vienna/CN=example.com/[email protected]``. The function does its best to be lenient on deviations from the format, object identifiers are case-insensitive (e.g. ``cn`` is the same as ``CN``, whitespace at the start and end is stripped and the subject does not have to start with a slash (``/``). >>> parse_name('/CN=example.com') [('CN', 'example.com')] >>> parse_name('c=AT/l= Vienna/o="ex org"/CN=example.com') [('C', 'AT'), ('L', 'Vienna'), ('O', 'ex org'), ('CN', 'example.com')] Dictionary keys are normalized to the values of :py:const:`OID_NAME_MAPPINGS` and keys will be sorted based on x509 name specifications regardless of the given order: >>> parse_name('L="Vienna / District"/[email protected]') [('L', 'Vienna / District'), ('emailAddress', '[email protected]')] >>> parse_name('/C=AT/CN=example.com') == parse_name('/CN=example.com/C=AT') True Due to the magic of :py:const:`NAME_RE`, the function even supports quoting strings and including slashes, so strings like ``/OU="Org / Org Unit"/CN=example.com`` will work as expected. >>> parse_name('L="Vienna / District"/CN=example.com') [('L', 'Vienna / District'), ('CN', 'example.com')] But note that it's still easy to trick this function, if you really want to. The following example is *not* a valid subject, the location is just bogus, and whatever you were expecting as output, it's certainly different: >>> parse_name('L="Vienna " District"/CN=example.com') [('L', 'Vienna'), ('CN', 'example.com')] Examples of where this string is used are: .. code-block:: console # openssl req -new -key priv.key -out csr -utf8 -batch -sha256 -subj '/C=AT/CN=example.com' # openssl x509 -in cert.pem -noout -subject -nameopt compat /C=AT/L=Vienna/CN=example.com """ name = name.strip() if not name: # empty subjects are ok return [] try: items = [(NAME_CASE_MAPPINGS[t[0].upper()], force_text(t[2])) for t in NAME_RE.findall(name)] except KeyError as e: raise ValueError('Unknown x509 name field: %s' % e.args[0]) # Check that no OIDs not in MULTIPLE_OIDS occur more then once for key, oid in NAME_OID_MAPPINGS.items(): if sum(1 for t in items if t[0] == key) > 1 and oid not in MULTIPLE_OIDS: raise ValueError('Subject contains multiple "%s" fields' % key) return sort_name(items)
0, module; 1, function_definition; 2, function_name:prune_overridden; 3, parameters; 4, block; 5, identifier:ansi_string; 6, expression_statement; 7, expression_statement; 8, comment:# Sequences with multiple color codes.; 9, for_statement; 10, return_statement; 11, comment:"""Remove color codes that are rendered ineffective by subsequent codes in one escape sequence then sort codes. :param str ansi_string: Incoming ansi_string with ANSI color codes. :return: Color string with pruned color sequences. :rtype: str """; 12, assignment; 13, pattern_list; 14, identifier:multi_seqs; 15, block; 16, identifier:ansi_string; 17, identifier:multi_seqs; 18, call; 19, identifier:escape; 20, identifier:codes; 21, expression_statement; 22, comment:# Nuke everything before {/all}.; 23, try_statement; 24, comment:# Thin out groups.; 25, for_statement; 26, comment:# Done.; 27, expression_statement; 28, if_statement; 29, identifier:set; 30, generator_expression; 31, assignment; 32, block; 33, except_clause; 34, identifier:group; 35, identifier:CODE_GROUPS; 36, block; 37, assignment; 38, comparison_operator:codes != reduced_codes; 39, block; 40, identifier:p; 41, for_in_clause; 42, if_clause; 43, identifier:r_codes; 44, call; 45, expression_statement; 46, identifier:ValueError; 47, block; 48, for_statement; 49, identifier:reduced_codes; 50, call; 51, identifier:codes; 52, identifier:reduced_codes; 53, expression_statement; 54, identifier:p; 55, call; 56, comparison_operator:';' in p[1]; 57, identifier:list; 58, argument_list; 59, assignment; 60, pass_statement; 61, identifier:pos; 62, call; 63, block; 64, attribute; 65, argument_list; 66, assignment; 67, attribute; 68, argument_list; 69, string; 70, subscript; 71, call; 72, identifier:r_codes; 73, subscript; 74, identifier:reversed; 75, argument_list; 76, expression_statement; 77, string; 78, identifier:join; 79, call; 80, identifier:ansi_string; 81, call; 82, identifier:RE_ANSI; 83, identifier:findall; 84, identifier:ansi_string; 85, string_content:;; 86, identifier:p; 87, integer:1; 88, identifier:reversed; 89, argument_list; 90, identifier:r_codes; 91, slice; 92, subscript; 93, call; 94, string_content:;; 95, identifier:sorted; 96, argument_list; 97, attribute; 98, argument_list; 99, call; 100, binary_operator:r_codes.index('0') + 1; 101, list_comprehension; 102, slice; 103, attribute; 104, argument_list; 105, identifier:r_codes; 106, keyword_argument; 107, identifier:ansi_string; 108, identifier:replace; 109, identifier:escape; 110, binary_operator:'\033[' + reduced_codes + 'm'; 111, attribute; 112, argument_list; 113, call; 114, integer:1; 115, identifier:i; 116, for_in_clause; 117, if_clause; 118, integer:1; 119, identifier:r_codes; 120, identifier:pop; 121, identifier:pos; 122, identifier:key; 123, identifier:int; 124, binary_operator:'\033[' + reduced_codes; 125, string; 126, identifier:codes; 127, identifier:split; 128, string; 129, attribute; 130, argument_list; 131, pattern_list; 132, call; 133, comparison_operator:n in group; 134, string; 135, identifier:reduced_codes; 136, string_content:m; 137, string_content:;; 138, identifier:r_codes; 139, identifier:index; 140, string; 141, identifier:i; 142, identifier:n; 143, identifier:enumerate; 144, argument_list; 145, identifier:n; 146, identifier:group; 147, string_content; 148, string_content:0; 149, identifier:r_codes; 150, escape_sequence:\033
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 7, 12; 9, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 13, 19; 13, 20; 15, 21; 15, 22; 15, 23; 15, 24; 15, 25; 15, 26; 15, 27; 15, 28; 18, 29; 18, 30; 21, 31; 23, 32; 23, 33; 25, 34; 25, 35; 25, 36; 27, 37; 28, 38; 28, 39; 30, 40; 30, 41; 30, 42; 31, 43; 31, 44; 32, 45; 33, 46; 33, 47; 36, 48; 37, 49; 37, 50; 38, 51; 38, 52; 39, 53; 41, 54; 41, 55; 42, 56; 44, 57; 44, 58; 45, 59; 47, 60; 48, 61; 48, 62; 48, 63; 50, 64; 50, 65; 53, 66; 55, 67; 55, 68; 56, 69; 56, 70; 58, 71; 59, 72; 59, 73; 62, 74; 62, 75; 63, 76; 64, 77; 64, 78; 65, 79; 66, 80; 66, 81; 67, 82; 67, 83; 68, 84; 69, 85; 70, 86; 70, 87; 71, 88; 71, 89; 73, 90; 73, 91; 75, 92; 76, 93; 77, 94; 79, 95; 79, 96; 81, 97; 81, 98; 89, 99; 91, 100; 92, 101; 92, 102; 93, 103; 93, 104; 96, 105; 96, 106; 97, 107; 97, 108; 98, 109; 98, 110; 99, 111; 99, 112; 100, 113; 100, 114; 101, 115; 101, 116; 101, 117; 102, 118; 103, 119; 103, 120; 104, 121; 106, 122; 106, 123; 110, 124; 110, 125; 111, 126; 111, 127; 112, 128; 113, 129; 113, 130; 116, 131; 116, 132; 117, 133; 124, 134; 124, 135; 125, 136; 128, 137; 129, 138; 129, 139; 130, 140; 131, 141; 131, 142; 132, 143; 132, 144; 133, 145; 133, 146; 134, 147; 140, 148; 144, 149; 147, 150
def prune_overridden(ansi_string): """Remove color codes that are rendered ineffective by subsequent codes in one escape sequence then sort codes. :param str ansi_string: Incoming ansi_string with ANSI color codes. :return: Color string with pruned color sequences. :rtype: str """ multi_seqs = set(p for p in RE_ANSI.findall(ansi_string) if ';' in p[1]) # Sequences with multiple color codes. for escape, codes in multi_seqs: r_codes = list(reversed(codes.split(';'))) # Nuke everything before {/all}. try: r_codes = r_codes[:r_codes.index('0') + 1] except ValueError: pass # Thin out groups. for group in CODE_GROUPS: for pos in reversed([i for i, n in enumerate(r_codes) if n in group][1:]): r_codes.pop(pos) # Done. reduced_codes = ';'.join(sorted(r_codes, key=int)) if codes != reduced_codes: ansi_string = ansi_string.replace(escape, '\033[' + reduced_codes + 'm') return ansi_string
0, module; 1, function_definition; 2, function_name:LDA_discriminants; 3, parameters; 4, block; 5, identifier:x; 6, identifier:labels; 7, expression_statement; 8, comment:# validate inputs; 9, try_statement; 10, comment:# make the LDA; 11, expression_statement; 12, return_statement; 13, comment:""" Linear Discriminant Analysis helper for determination how many columns of data should be reduced. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `discriminants` : array of eigenvalues sorted in descending order """; 14, block; 15, except_clause; 16, assignment; 17, subscript; 18, expression_statement; 19, block; 20, pattern_list; 21, call; 22, identifier:eigen_values; 23, call; 24, assignment; 25, raise_statement; 26, identifier:eigen_values; 27, identifier:eigen_vectors; 28, identifier:LDA_base; 29, argument_list; 30, attribute; 31, argument_list; 32, identifier:x; 33, call; 34, call; 35, identifier:x; 36, identifier:labels; 37, parenthesized_expression; 38, identifier:argsort; 39, attribute; 40, argument_list; 41, identifier:ValueError; 42, argument_list; 43, unary_operator; 44, identifier:np; 45, identifier:array; 46, identifier:x; 47, string; 48, identifier:eigen_values; 49, string_content:Impossible to convert x to a numpy array.
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 9, 14; 9, 15; 11, 16; 12, 17; 14, 18; 15, 19; 16, 20; 16, 21; 17, 22; 17, 23; 18, 24; 19, 25; 20, 26; 20, 27; 21, 28; 21, 29; 23, 30; 23, 31; 24, 32; 24, 33; 25, 34; 29, 35; 29, 36; 30, 37; 30, 38; 33, 39; 33, 40; 34, 41; 34, 42; 37, 43; 39, 44; 39, 45; 40, 46; 42, 47; 43, 48; 47, 49
def LDA_discriminants(x, labels): """ Linear Discriminant Analysis helper for determination how many columns of data should be reduced. **Args:** * `x` : input matrix (2d array), every row represents new sample * `labels` : list of labels (iterable), every item should be label for \ sample with corresponding index **Returns:** * `discriminants` : array of eigenvalues sorted in descending order """ # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # make the LDA eigen_values, eigen_vectors = LDA_base(x, labels) return eigen_values[(-eigen_values).argsort()]
0, module; 1, function_definition; 2, function_name:PCA_components; 3, parameters; 4, block; 5, identifier:x; 6, expression_statement; 7, comment:# validate inputs; 8, try_statement; 9, comment:# eigen values and eigen vectors of data covariance matrix; 10, expression_statement; 11, comment:# sort eigen vectors according biggest eigen value; 12, expression_statement; 13, comment:# form output - order the eigenvalues; 14, return_statement; 15, comment:""" Principal Component Analysis helper to check out eigenvalues of components. **Args:** * `x` : input matrix (2d array), every row represents new sample **Returns:** * `components`: sorted array of principal components eigenvalues """; 16, block; 17, except_clause; 18, assignment; 19, assignment; 20, subscript; 21, expression_statement; 22, block; 23, pattern_list; 24, call; 25, identifier:eigen_order; 26, subscript; 27, identifier:eigen_values; 28, call; 29, assignment; 30, raise_statement; 31, identifier:eigen_values; 32, identifier:eigen_vectors; 33, attribute; 34, argument_list; 35, attribute; 36, call; 37, attribute; 38, argument_list; 39, identifier:x; 40, call; 41, call; 42, attribute; 43, identifier:eig; 44, call; 45, identifier:eigen_vectors; 46, identifier:T; 47, attribute; 48, argument_list; 49, parenthesized_expression; 50, identifier:argsort; 51, attribute; 52, argument_list; 53, identifier:ValueError; 54, argument_list; 55, identifier:np; 56, identifier:linalg; 57, attribute; 58, argument_list; 59, parenthesized_expression; 60, identifier:argsort; 61, unary_operator; 62, identifier:np; 63, identifier:array; 64, identifier:x; 65, string; 66, identifier:np; 67, identifier:cov; 68, attribute; 69, unary_operator; 70, identifier:eigen_values; 71, string_content:Impossible to convert x to a numpy array.; 72, identifier:x; 73, identifier:T; 74, identifier:eigen_values
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 6, 15; 8, 16; 8, 17; 10, 18; 12, 19; 14, 20; 16, 21; 17, 22; 18, 23; 18, 24; 19, 25; 19, 26; 20, 27; 20, 28; 21, 29; 22, 30; 23, 31; 23, 32; 24, 33; 24, 34; 26, 35; 26, 36; 28, 37; 28, 38; 29, 39; 29, 40; 30, 41; 33, 42; 33, 43; 34, 44; 35, 45; 35, 46; 36, 47; 36, 48; 37, 49; 37, 50; 40, 51; 40, 52; 41, 53; 41, 54; 42, 55; 42, 56; 44, 57; 44, 58; 47, 59; 47, 60; 49, 61; 51, 62; 51, 63; 52, 64; 54, 65; 57, 66; 57, 67; 58, 68; 59, 69; 61, 70; 65, 71; 68, 72; 68, 73; 69, 74
def PCA_components(x): """ Principal Component Analysis helper to check out eigenvalues of components. **Args:** * `x` : input matrix (2d array), every row represents new sample **Returns:** * `components`: sorted array of principal components eigenvalues """ # validate inputs try: x = np.array(x) except: raise ValueError('Impossible to convert x to a numpy array.') # eigen values and eigen vectors of data covariance matrix eigen_values, eigen_vectors = np.linalg.eig(np.cov(x.T)) # sort eigen vectors according biggest eigen value eigen_order = eigen_vectors.T[(-eigen_values).argsort()] # form output - order the eigenvalues return eigen_values[(-eigen_values).argsort()]
0, module; 1, function_definition; 2, function_name:file_search; 3, parameters; 4, block; 5, identifier:self; 6, identifier:query; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, expression_statement; 11, try_statement; 12, return_statement; 13, identifier:offset; 14, None; 15, identifier:timeout; 16, None; 17, comment:""" Search for samples. In addition to retrieving all information on a particular file, VirusTotal allows you to perform what we call "advanced reverse searches". Reverse searches take you from a file property to a list of files that match that property. For example, this functionality enables you to retrieve all those files marked by at least one antivirus vendor as Zbot, or all those files that have a size under 90KB and are detected by at least 10 antivirus solutions, or all those PDF files that have an invalid XREF section, etc. This API is equivalent to VirusTotal Intelligence advanced searches. A very wide variety of search modifiers are available, including: file size, file type, first submission date to VirusTotal, last submission date to VirusTotal, number of positives, dynamic behavioural properties, binary content, submission file name, and a very long etcetera. The full list of search modifiers allowed for file search queries is documented at: https://www.virustotal.com/intelligence/help/file-search/#search-modifiers NOTE: Daily limited! No matter what API step you have licensed, this API call is limited to 50K requests per day. If you need any more, chances are you are approaching your engineering problem erroneously and you can probably solve it using the file distribution call. Do not hesitate to contact us with your particular use case. EXAMPLE: search_options = 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' :param query: A search modifier compliant file search query. :param offset: (optional) The offset value returned by a previously issued identical query, allows you to paginate over the results. If not specified the first 300 matching files sorted according to last submission date to VirusTotal in a descending fashion will be returned. :param timeout: The amount of time in seconds the request should wait before timing out. :return: JSON response - By default the list returned contains at most 300 hashes, ordered according to last submission date to VirusTotal in a descending fashion. """; 18, assignment; 19, block; 20, except_clause; 21, call; 22, identifier:params; 23, call; 24, expression_statement; 25, as_pattern; 26, block; 27, identifier:_return_response_and_status_code; 28, argument_list; 29, identifier:dict; 30, argument_list; 31, assignment; 32, attribute; 33, as_pattern_target; 34, return_statement; 35, identifier:response; 36, keyword_argument; 37, keyword_argument; 38, keyword_argument; 39, identifier:response; 40, call; 41, identifier:requests; 42, identifier:RequestException; 43, identifier:e; 44, call; 45, identifier:apikey; 46, attribute; 47, identifier:query; 48, identifier:query; 49, identifier:offset; 50, identifier:offset; 51, attribute; 52, argument_list; 53, identifier:dict; 54, argument_list; 55, identifier:self; 56, identifier:api_key; 57, identifier:requests; 58, identifier:get; 59, binary_operator:self.base + 'file/search'; 60, keyword_argument; 61, keyword_argument; 62, keyword_argument; 63, keyword_argument; 64, attribute; 65, string; 66, identifier:params; 67, identifier:params; 68, identifier:proxies; 69, attribute; 70, identifier:timeout; 71, identifier:timeout; 72, identifier:error; 73, call; 74, identifier:self; 75, identifier:base; 76, string_content:file/search; 77, identifier:self; 78, identifier:proxies; 79, identifier:str; 80, argument_list; 81, identifier:e
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 8, 16; 9, 17; 10, 18; 11, 19; 11, 20; 12, 21; 18, 22; 18, 23; 19, 24; 20, 25; 20, 26; 21, 27; 21, 28; 23, 29; 23, 30; 24, 31; 25, 32; 25, 33; 26, 34; 28, 35; 30, 36; 30, 37; 30, 38; 31, 39; 31, 40; 32, 41; 32, 42; 33, 43; 34, 44; 36, 45; 36, 46; 37, 47; 37, 48; 38, 49; 38, 50; 40, 51; 40, 52; 44, 53; 44, 54; 46, 55; 46, 56; 51, 57; 51, 58; 52, 59; 52, 60; 52, 61; 52, 62; 54, 63; 59, 64; 59, 65; 60, 66; 60, 67; 61, 68; 61, 69; 62, 70; 62, 71; 63, 72; 63, 73; 64, 74; 64, 75; 65, 76; 69, 77; 69, 78; 73, 79; 73, 80; 80, 81
def file_search(self, query, offset=None, timeout=None): """ Search for samples. In addition to retrieving all information on a particular file, VirusTotal allows you to perform what we call "advanced reverse searches". Reverse searches take you from a file property to a list of files that match that property. For example, this functionality enables you to retrieve all those files marked by at least one antivirus vendor as Zbot, or all those files that have a size under 90KB and are detected by at least 10 antivirus solutions, or all those PDF files that have an invalid XREF section, etc. This API is equivalent to VirusTotal Intelligence advanced searches. A very wide variety of search modifiers are available, including: file size, file type, first submission date to VirusTotal, last submission date to VirusTotal, number of positives, dynamic behavioural properties, binary content, submission file name, and a very long etcetera. The full list of search modifiers allowed for file search queries is documented at: https://www.virustotal.com/intelligence/help/file-search/#search-modifiers NOTE: Daily limited! No matter what API step you have licensed, this API call is limited to 50K requests per day. If you need any more, chances are you are approaching your engineering problem erroneously and you can probably solve it using the file distribution call. Do not hesitate to contact us with your particular use case. EXAMPLE: search_options = 'type:peexe size:90kb+ positives:5+ behaviour:"taskkill"' :param query: A search modifier compliant file search query. :param offset: (optional) The offset value returned by a previously issued identical query, allows you to paginate over the results. If not specified the first 300 matching files sorted according to last submission date to VirusTotal in a descending fashion will be returned. :param timeout: The amount of time in seconds the request should wait before timing out. :return: JSON response - By default the list returned contains at most 300 hashes, ordered according to last submission date to VirusTotal in a descending fashion. """ params = dict(apikey=self.api_key, query=query, offset=offset) try: response = requests.get(self.base + 'file/search', params=params, proxies=self.proxies, timeout=timeout) except requests.RequestException as e: return dict(error=str(e)) return _return_response_and_status_code(response)
0, module; 1, function_definition; 2, function_name:sort; 3, parameters; 4, type; 5, block; 6, typed_parameter; 7, generic_type; 8, expression_statement; 9, return_statement; 10, identifier:records; 11, type; 12, identifier:List; 13, type_parameter; 14, string:"Sort records into a canonical order, suitable for comparison."; 15, call; 16, generic_type; 17, type; 18, identifier:sorted; 19, argument_list; 20, identifier:Sequence; 21, type_parameter; 22, identifier:Record; 23, identifier:records; 24, keyword_argument; 25, type; 26, identifier:key; 27, identifier:_record_key; 28, identifier:Record
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 4, 7; 5, 8; 5, 9; 6, 10; 6, 11; 7, 12; 7, 13; 8, 14; 9, 15; 11, 16; 13, 17; 15, 18; 15, 19; 16, 20; 16, 21; 17, 22; 19, 23; 19, 24; 21, 25; 24, 26; 24, 27; 25, 28
def sort(records: Sequence[Record]) -> List[Record]: "Sort records into a canonical order, suitable for comparison." return sorted(records, key=_record_key)
0, module; 1, function_definition; 2, function_name:hclust_linearize; 3, parameters; 4, block; 5, identifier:U; 6, expression_statement; 7, import_from_statement; 8, expression_statement; 9, return_statement; 10, comment:"""Sorts the rows of a matrix by hierarchical clustering. Parameters: U (ndarray) : matrix of data Returns: prm (ndarray) : permutation of the rows """; 11, dotted_name; 12, dotted_name; 13, assignment; 14, call; 15, identifier:scipy; 16, identifier:cluster; 17, identifier:hierarchy; 18, identifier:Z; 19, call; 20, attribute; 21, argument_list; 22, attribute; 23, argument_list; 24, identifier:hierarchy; 25, identifier:leaves_list; 26, call; 27, identifier:hierarchy; 28, identifier:ward; 29, identifier:U; 30, attribute; 31, argument_list; 32, identifier:hierarchy; 33, identifier:optimal_leaf_ordering; 34, identifier:Z; 35, identifier:U
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 7, 12; 8, 13; 9, 14; 11, 15; 11, 16; 12, 17; 13, 18; 13, 19; 14, 20; 14, 21; 19, 22; 19, 23; 20, 24; 20, 25; 21, 26; 22, 27; 22, 28; 23, 29; 26, 30; 26, 31; 30, 32; 30, 33; 31, 34; 31, 35
def hclust_linearize(U): """Sorts the rows of a matrix by hierarchical clustering. Parameters: U (ndarray) : matrix of data Returns: prm (ndarray) : permutation of the rows """ from scipy.cluster import hierarchy Z = hierarchy.ward(U) return hierarchy.leaves_list(hierarchy.optimal_leaf_ordering(Z, U))
0, module; 1, function_definition; 2, function_name:randn_ktensor; 3, parameters; 4, block; 5, identifier:shape; 6, identifier:rank; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, comment:# Check input.; 11, expression_statement; 12, comment:# Draw low-rank factor matrices with i.i.d. Gaussian elements.; 13, expression_statement; 14, return_statement; 15, identifier:norm; 16, None; 17, identifier:random_state; 18, None; 19, comment:""" Generates a random N-way tensor with rank R, where the entries are drawn from the standard normal distribution. Parameters ---------- shape : tuple shape of the tensor rank : integer rank of the tensor norm : float or None, optional (defaults: None) If not None, the factor matrices are rescaled so that the Frobenius norm of the returned tensor is equal to ``norm``. random_state : integer, RandomState instance or None, optional (default ``None``) If integer, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Returns ------- X : (I_1, ..., I_N) array_like N-way tensor with rank R. Example ------- >>> # Create a rank-2 tensor of dimension 5x5x5: >>> import tensortools as tt >>> X = tt.randn_tensor((5,5,5), rank=2) """; 20, assignment; 21, assignment; 22, call; 23, identifier:rns; 24, call; 25, identifier:factors; 26, call; 27, identifier:_rescale_tensor; 28, argument_list; 29, identifier:_check_random_state; 30, argument_list; 31, identifier:KTensor; 32, argument_list; 33, identifier:factors; 34, identifier:norm; 35, identifier:random_state; 36, list_comprehension; 37, call; 38, for_in_clause; 39, attribute; 40, argument_list; 41, identifier:i; 42, identifier:shape; 43, identifier:rns; 44, identifier:standard_normal; 45, tuple; 46, identifier:i; 47, identifier:rank
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 7, 15; 7, 16; 8, 17; 8, 18; 9, 19; 11, 20; 13, 21; 14, 22; 20, 23; 20, 24; 21, 25; 21, 26; 22, 27; 22, 28; 24, 29; 24, 30; 26, 31; 26, 32; 28, 33; 28, 34; 30, 35; 32, 36; 36, 37; 36, 38; 37, 39; 37, 40; 38, 41; 38, 42; 39, 43; 39, 44; 40, 45; 45, 46; 45, 47
def randn_ktensor(shape, rank, norm=None, random_state=None): """ Generates a random N-way tensor with rank R, where the entries are drawn from the standard normal distribution. Parameters ---------- shape : tuple shape of the tensor rank : integer rank of the tensor norm : float or None, optional (defaults: None) If not None, the factor matrices are rescaled so that the Frobenius norm of the returned tensor is equal to ``norm``. random_state : integer, RandomState instance or None, optional (default ``None``) If integer, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by np.random. Returns ------- X : (I_1, ..., I_N) array_like N-way tensor with rank R. Example ------- >>> # Create a rank-2 tensor of dimension 5x5x5: >>> import tensortools as tt >>> X = tt.randn_tensor((5,5,5), rank=2) """ # Check input. rns = _check_random_state(random_state) # Draw low-rank factor matrices with i.i.d. Gaussian elements. factors = KTensor([rns.standard_normal((i, rank)) for i in shape]) return _rescale_tensor(factors, norm)
0, module; 1, function_definition; 2, function_name:sort_by_number_values; 3, parameters; 4, comment:# pragma: no cover, looks like not used!; 5, block; 6, identifier:x00; 7, identifier:y00; 8, expression_statement; 9, if_statement; 10, if_statement; 11, comment:# So is equal; 12, return_statement; 13, comment:"""Compare x00, y00 base on number of values :param x00: first elem to compare :type x00: list :param y00: second elem to compare :type y00: list :return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else :rtype: int """; 14, comparison_operator:len(x00) < len(y00); 15, block; 16, comparison_operator:len(x00) > len(y00); 17, block; 18, integer:0; 19, call; 20, call; 21, return_statement; 22, call; 23, call; 24, return_statement; 25, identifier:len; 26, argument_list; 27, identifier:len; 28, argument_list; 29, integer:1; 30, identifier:len; 31, argument_list; 32, identifier:len; 33, argument_list; 34, unary_operator; 35, identifier:x00; 36, identifier:y00; 37, identifier:x00; 38, identifier:y00; 39, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 3, 7; 5, 8; 5, 9; 5, 10; 5, 11; 5, 12; 8, 13; 9, 14; 9, 15; 10, 16; 10, 17; 12, 18; 14, 19; 14, 20; 15, 21; 16, 22; 16, 23; 17, 24; 19, 25; 19, 26; 20, 27; 20, 28; 21, 29; 22, 30; 22, 31; 23, 32; 23, 33; 24, 34; 26, 35; 28, 36; 31, 37; 33, 38; 34, 39
def sort_by_number_values(x00, y00): # pragma: no cover, looks like not used! """Compare x00, y00 base on number of values :param x00: first elem to compare :type x00: list :param y00: second elem to compare :type y00: list :return: x00 > y00 (-1) if len(x00) > len(y00), x00 == y00 (0) if id equals, x00 < y00 (1) else :rtype: int """ if len(x00) < len(y00): return 1 if len(x00) > len(y00): return -1 # So is equal return 0
0, module; 1, function_definition; 2, function_name:get_scheduler_ordered_list; 3, parameters; 4, block; 5, identifier:self; 6, identifier:realm; 7, expression_statement; 8, comment:# Get the schedulers for the required realm; 9, expression_statement; 10, for_statement; 11, comment:# Now we sort the schedulers so we take alive, then spare, then dead,; 12, expression_statement; 13, expression_statement; 14, expression_statement; 15, for_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, expression_statement; 20, expression_statement; 21, comment:# I need to pop the list, so reverse the list...; 22, return_statement; 23, comment:"""Get sorted scheduler list for a specific realm List is ordered as: alive first, then spare (if any), then dead scheduler links :param realm: realm we want scheduler from :type realm: alignak.objects.realm.Realm :return: sorted scheduler list :rtype: list[alignak.objects.schedulerlink.SchedulerLink] """; 24, assignment; 25, identifier:scheduler_link_uuid; 26, attribute; 27, block; 28, assignment; 29, assignment; 30, assignment; 31, identifier:sdata; 32, identifier:scheduler_links; 33, block; 34, assignment; 35, call; 36, call; 37, call; 38, call; 39, identifier:scheduler_links; 40, identifier:scheduler_links; 41, list; 42, identifier:realm; 43, identifier:schedulers; 44, expression_statement; 45, identifier:alive; 46, list; 47, identifier:spare; 48, list; 49, identifier:deads; 50, list; 51, if_statement; 52, identifier:scheduler_links; 53, list; 54, attribute; 55, argument_list; 56, attribute; 57, argument_list; 58, attribute; 59, argument_list; 60, attribute; 61, argument_list; 62, call; 63, boolean_operator; 64, block; 65, elif_clause; 66, else_clause; 67, identifier:scheduler_links; 68, identifier:extend; 69, identifier:alive; 70, identifier:scheduler_links; 71, identifier:extend; 72, identifier:spare; 73, identifier:scheduler_links; 74, identifier:extend; 75, identifier:deads; 76, identifier:scheduler_links; 77, identifier:reverse; 78, attribute; 79, argument_list; 80, attribute; 81, not_operator; 82, expression_statement; 83, boolean_operator; 84, block; 85, block; 86, identifier:scheduler_links; 87, identifier:append; 88, subscript; 89, identifier:sdata; 90, identifier:alive; 91, attribute; 92, call; 93, attribute; 94, attribute; 95, expression_statement; 96, expression_statement; 97, attribute; 98, identifier:scheduler_link_uuid; 99, identifier:sdata; 100, identifier:spare; 101, attribute; 102, argument_list; 103, identifier:sdata; 104, identifier:alive; 105, identifier:sdata; 106, identifier:spare; 107, call; 108, call; 109, identifier:self; 110, identifier:schedulers; 111, identifier:alive; 112, identifier:append; 113, identifier:sdata; 114, attribute; 115, argument_list; 116, attribute; 117, argument_list; 118, identifier:spare; 119, identifier:append; 120, identifier:sdata; 121, identifier:deads; 122, identifier:append; 123, identifier:sdata
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 7, 23; 9, 24; 10, 25; 10, 26; 10, 27; 12, 28; 13, 29; 14, 30; 15, 31; 15, 32; 15, 33; 16, 34; 17, 35; 18, 36; 19, 37; 20, 38; 22, 39; 24, 40; 24, 41; 26, 42; 26, 43; 27, 44; 28, 45; 28, 46; 29, 47; 29, 48; 30, 49; 30, 50; 33, 51; 34, 52; 34, 53; 35, 54; 35, 55; 36, 56; 36, 57; 37, 58; 37, 59; 38, 60; 38, 61; 44, 62; 51, 63; 51, 64; 51, 65; 51, 66; 54, 67; 54, 68; 55, 69; 56, 70; 56, 71; 57, 72; 58, 73; 58, 74; 59, 75; 60, 76; 60, 77; 62, 78; 62, 79; 63, 80; 63, 81; 64, 82; 65, 83; 65, 84; 66, 85; 78, 86; 78, 87; 79, 88; 80, 89; 80, 90; 81, 91; 82, 92; 83, 93; 83, 94; 84, 95; 85, 96; 88, 97; 88, 98; 91, 99; 91, 100; 92, 101; 92, 102; 93, 103; 93, 104; 94, 105; 94, 106; 95, 107; 96, 108; 97, 109; 97, 110; 101, 111; 101, 112; 102, 113; 107, 114; 107, 115; 108, 116; 108, 117; 114, 118; 114, 119; 115, 120; 116, 121; 116, 122; 117, 123
def get_scheduler_ordered_list(self, realm): """Get sorted scheduler list for a specific realm List is ordered as: alive first, then spare (if any), then dead scheduler links :param realm: realm we want scheduler from :type realm: alignak.objects.realm.Realm :return: sorted scheduler list :rtype: list[alignak.objects.schedulerlink.SchedulerLink] """ # Get the schedulers for the required realm scheduler_links = [] for scheduler_link_uuid in realm.schedulers: scheduler_links.append(self.schedulers[scheduler_link_uuid]) # Now we sort the schedulers so we take alive, then spare, then dead, alive = [] spare = [] deads = [] for sdata in scheduler_links: if sdata.alive and not sdata.spare: alive.append(sdata) elif sdata.alive and sdata.spare: spare.append(sdata) else: deads.append(sdata) scheduler_links = [] scheduler_links.extend(alive) scheduler_links.extend(spare) scheduler_links.extend(deads) scheduler_links.reverse() # I need to pop the list, so reverse the list... return scheduler_links
0, module; 1, function_definition; 2, function_name:log_initial_states; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, comment:# Raise hosts initial status broks; 8, for_statement; 9, comment:# And then services initial status broks; 10, for_statement; 11, comment:"""Raise hosts and services initial status logs First, raise hosts status and then services. This to allow the events log to be a little sorted. :return: None """; 12, identifier:elt; 13, attribute; 14, block; 15, identifier:elt; 16, attribute; 17, block; 18, identifier:self; 19, identifier:hosts; 20, expression_statement; 21, identifier:self; 22, identifier:services; 23, expression_statement; 24, call; 25, call; 26, attribute; 27, argument_list; 28, attribute; 29, argument_list; 30, identifier:elt; 31, identifier:raise_initial_state; 32, identifier:elt; 33, identifier:raise_initial_state
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 8, 12; 8, 13; 8, 14; 10, 15; 10, 16; 10, 17; 13, 18; 13, 19; 14, 20; 16, 21; 16, 22; 17, 23; 20, 24; 23, 25; 24, 26; 24, 27; 25, 28; 25, 29; 26, 30; 26, 31; 28, 32; 28, 33
def log_initial_states(self): """Raise hosts and services initial status logs First, raise hosts status and then services. This to allow the events log to be a little sorted. :return: None """ # Raise hosts initial status broks for elt in self.hosts: elt.raise_initial_state() # And then services initial status broks for elt in self.services: elt.raise_initial_state()
0, module; 1, function_definition; 2, function_name:get_groupnames; 3, parameters; 4, block; 5, identifier:self; 6, identifier:hostgroups; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Get names of the host's hostgroups :return: comma separated names of hostgroups alphabetically sorted :rtype: str """; 12, assignment; 13, identifier:hostgroup_id; 14, attribute; 15, block; 16, call; 17, identifier:group_names; 18, list; 19, identifier:self; 20, identifier:hostgroups; 21, expression_statement; 22, expression_statement; 23, attribute; 24, argument_list; 25, assignment; 26, call; 27, string; 28, identifier:join; 29, call; 30, identifier:hostgroup; 31, subscript; 32, attribute; 33, argument_list; 34, string_content:,; 35, identifier:sorted; 36, argument_list; 37, identifier:hostgroups; 38, identifier:hostgroup_id; 39, identifier:group_names; 40, identifier:append; 41, call; 42, identifier:group_names; 43, attribute; 44, argument_list; 45, identifier:hostgroup; 46, identifier:get_name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 14, 19; 14, 20; 15, 21; 15, 22; 16, 23; 16, 24; 21, 25; 22, 26; 23, 27; 23, 28; 24, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 29, 35; 29, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 36, 42; 41, 43; 41, 44; 43, 45; 43, 46
def get_groupnames(self, hostgroups): """Get names of the host's hostgroups :return: comma separated names of hostgroups alphabetically sorted :rtype: str """ group_names = [] for hostgroup_id in self.hostgroups: hostgroup = hostgroups[hostgroup_id] group_names.append(hostgroup.get_name()) return ','.join(sorted(group_names))
0, module; 1, function_definition; 2, function_name:get_groupaliases; 3, parameters; 4, block; 5, identifier:self; 6, identifier:hostgroups; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, return_statement; 11, comment:"""Get aliases of the host's hostgroups :return: comma separated aliases of hostgroups alphabetically sorted :rtype: str """; 12, assignment; 13, identifier:hostgroup_id; 14, attribute; 15, block; 16, call; 17, identifier:group_aliases; 18, list; 19, identifier:self; 20, identifier:hostgroups; 21, expression_statement; 22, expression_statement; 23, attribute; 24, argument_list; 25, assignment; 26, call; 27, string; 28, identifier:join; 29, call; 30, identifier:hostgroup; 31, subscript; 32, attribute; 33, argument_list; 34, string_content:,; 35, identifier:sorted; 36, argument_list; 37, identifier:hostgroups; 38, identifier:hostgroup_id; 39, identifier:group_aliases; 40, identifier:append; 41, attribute; 42, identifier:group_aliases; 43, identifier:hostgroup; 44, identifier:alias
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 7, 11; 8, 12; 9, 13; 9, 14; 9, 15; 10, 16; 12, 17; 12, 18; 14, 19; 14, 20; 15, 21; 15, 22; 16, 23; 16, 24; 21, 25; 22, 26; 23, 27; 23, 28; 24, 29; 25, 30; 25, 31; 26, 32; 26, 33; 27, 34; 29, 35; 29, 36; 31, 37; 31, 38; 32, 39; 32, 40; 33, 41; 36, 42; 41, 43; 41, 44
def get_groupaliases(self, hostgroups): """Get aliases of the host's hostgroups :return: comma separated aliases of hostgroups alphabetically sorted :rtype: str """ group_aliases = [] for hostgroup_id in self.hostgroups: hostgroup = hostgroups[hostgroup_id] group_aliases.append(hostgroup.alias) return ','.join(sorted(group_aliases))
0, module; 1, function_definition; 2, function_name:satellites_list; 3, parameters; 4, block; 5, identifier:self; 6, default_parameter; 7, expression_statement; 8, with_statement; 9, identifier:daemon_type; 10, string; 11, comment:"""Get the arbiter satellite names sorted by type Returns a list of the satellites as in: { reactionner: [ "reactionner-master" ], broker: [ "broker-master" ], arbiter: [ "arbiter-master" ], scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ], receiver: [ "receiver-nsca", "receiver-master" ], poller: [ "poller-master" ] } If a specific daemon type is requested, the list is reduced to this unique daemon type: { scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ] } :param daemon_type: daemon type to filter :type daemon_type: str :return: dict with key *daemon_type* and value list of daemon name :rtype: dict """; 12, with_clause; 13, block; 14, with_item; 15, expression_statement; 16, for_statement; 17, return_statement; 18, attribute; 19, assignment; 20, identifier:s_type; 21, list; 22, block; 23, identifier:res; 24, attribute; 25, identifier:conf_lock; 26, identifier:res; 27, dictionary; 28, string; 29, string; 30, string; 31, string; 32, string; 33, string; 34, if_statement; 35, expression_statement; 36, expression_statement; 37, for_statement; 38, identifier:self; 39, identifier:app; 40, string_content:arbiter; 41, string_content:scheduler; 42, string_content:poller; 43, string_content:reactionner; 44, string_content:receiver; 45, string_content:broker; 46, boolean_operator; 47, block; 48, assignment; 49, assignment; 50, identifier:daemon_link; 51, call; 52, block; 53, identifier:daemon_type; 54, comparison_operator:daemon_type != s_type; 55, continue_statement; 56, identifier:satellite_list; 57, list; 58, subscript; 59, identifier:satellite_list; 60, identifier:getattr; 61, argument_list; 62, expression_statement; 63, identifier:daemon_type; 64, identifier:s_type; 65, identifier:res; 66, identifier:s_type; 67, attribute; 68, binary_operator:s_type + 's'; 69, list; 70, call; 71, attribute; 72, identifier:conf; 73, identifier:s_type; 74, string; 75, attribute; 76, argument_list; 77, identifier:self; 78, identifier:app; 79, string_content:s; 80, identifier:satellite_list; 81, identifier:append; 82, attribute; 83, identifier:daemon_link; 84, identifier:name
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 6, 9; 6, 10; 7, 11; 8, 12; 8, 13; 12, 14; 13, 15; 13, 16; 13, 17; 14, 18; 15, 19; 16, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 19, 26; 19, 27; 21, 28; 21, 29; 21, 30; 21, 31; 21, 32; 21, 33; 22, 34; 22, 35; 22, 36; 22, 37; 24, 38; 24, 39; 28, 40; 29, 41; 30, 42; 31, 43; 32, 44; 33, 45; 34, 46; 34, 47; 35, 48; 36, 49; 37, 50; 37, 51; 37, 52; 46, 53; 46, 54; 47, 55; 48, 56; 48, 57; 49, 58; 49, 59; 51, 60; 51, 61; 52, 62; 54, 63; 54, 64; 58, 65; 58, 66; 61, 67; 61, 68; 61, 69; 62, 70; 67, 71; 67, 72; 68, 73; 68, 74; 70, 75; 70, 76; 71, 77; 71, 78; 74, 79; 75, 80; 75, 81; 76, 82; 82, 83; 82, 84
def satellites_list(self, daemon_type=''): """Get the arbiter satellite names sorted by type Returns a list of the satellites as in: { reactionner: [ "reactionner-master" ], broker: [ "broker-master" ], arbiter: [ "arbiter-master" ], scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ], receiver: [ "receiver-nsca", "receiver-master" ], poller: [ "poller-master" ] } If a specific daemon type is requested, the list is reduced to this unique daemon type: { scheduler: [ "scheduler-master-3", "scheduler-master", "scheduler-master-2" ] } :param daemon_type: daemon type to filter :type daemon_type: str :return: dict with key *daemon_type* and value list of daemon name :rtype: dict """ with self.app.conf_lock: res = {} for s_type in ['arbiter', 'scheduler', 'poller', 'reactionner', 'receiver', 'broker']: if daemon_type and daemon_type != s_type: continue satellite_list = [] res[s_type] = satellite_list for daemon_link in getattr(self.app.conf, s_type + 's', []): satellite_list.append(daemon_link.name) return res
0, module; 1, function_definition; 2, function_name:sort_labeled_intervals; 3, parameters; 4, block; 5, identifier:intervals; 6, default_parameter; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, if_statement; 11, identifier:labels; 12, None; 13, string; 14, assignment; 15, assignment; 16, comparison_operator:labels is None; 17, block; 18, else_clause; 19, string_content:Sort intervals, and optionally, their corresponding labels according to start time. Parameters ---------- intervals : np.ndarray, shape=(n, 2) The input intervals labels : list, optional Labels for each interval Returns ------- intervals_sorted or (intervals_sorted, labels_sorted) Labels are only returned if provided as input; 20, identifier:idx; 21, call; 22, identifier:intervals_sorted; 23, subscript; 24, identifier:labels; 25, None; 26, return_statement; 27, block; 28, attribute; 29, argument_list; 30, identifier:intervals; 31, identifier:idx; 32, identifier:intervals_sorted; 33, return_statement; 34, identifier:np; 35, identifier:argsort; 36, subscript; 37, expression_list; 38, identifier:intervals; 39, slice; 40, integer:0; 41, identifier:intervals_sorted; 42, list_comprehension; 43, subscript; 44, for_in_clause; 45, identifier:labels; 46, identifier:_; 47, identifier:_; 48, identifier:idx
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 6, 11; 6, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 13, 19; 14, 20; 14, 21; 15, 22; 15, 23; 16, 24; 16, 25; 17, 26; 18, 27; 21, 28; 21, 29; 23, 30; 23, 31; 26, 32; 27, 33; 28, 34; 28, 35; 29, 36; 33, 37; 36, 38; 36, 39; 36, 40; 37, 41; 37, 42; 42, 43; 42, 44; 43, 45; 43, 46; 44, 47; 44, 48
def sort_labeled_intervals(intervals, labels=None): '''Sort intervals, and optionally, their corresponding labels according to start time. Parameters ---------- intervals : np.ndarray, shape=(n, 2) The input intervals labels : list, optional Labels for each interval Returns ------- intervals_sorted or (intervals_sorted, labels_sorted) Labels are only returned if provided as input ''' idx = np.argsort(intervals[:, 0]) intervals_sorted = intervals[idx] if labels is None: return intervals_sorted else: return intervals_sorted, [labels[_] for _ in idx]
0, module; 1, function_definition; 2, function_name:labeled_intervals; 3, parameters; 4, block; 5, identifier:intervals; 6, identifier:labels; 7, default_parameter; 8, default_parameter; 9, default_parameter; 10, default_parameter; 11, default_parameter; 12, default_parameter; 13, dictionary_splat_pattern; 14, expression_statement; 15, comment:# Get the axes handle; 16, expression_statement; 17, comment:# Make sure we have a numpy array; 18, expression_statement; 19, if_statement; 20, comment:# Put additional labels at the end, in order; 21, if_statement; 22, expression_statement; 23, expression_statement; 24, comment:# Swap color -> facecolor here so we preserve edgecolor on rects; 25, expression_statement; 26, expression_statement; 27, if_statement; 28, if_statement; 29, if_statement; 30, expression_statement; 31, for_statement; 32, expression_statement; 33, for_statement; 34, for_statement; 35, comment:# Draw a line separating the new labels from pre-existing labels; 36, if_statement; 37, if_statement; 38, if_statement; 39, if_statement; 40, return_statement; 41, identifier:label_set; 42, None; 43, identifier:base; 44, None; 45, identifier:height; 46, None; 47, identifier:extend_labels; 48, True; 49, identifier:ax; 50, None; 51, identifier:tick; 52, True; 53, identifier:kwargs; 54, string; 55, assignment; 56, assignment; 57, comparison_operator:label_set is None; 58, comment:# If we have non-empty pre-existing tick labels, use them; 59, block; 60, else_clause; 61, identifier:extend_labels; 62, block; 63, elif_clause; 64, else_clause; 65, assignment; 66, call; 67, assignment; 68, call; 69, comparison_operator:base is None; 70, block; 71, comparison_operator:height is None; 72, block; 73, call; 74, block; 75, assignment; 76, pattern_list; 77, call; 78, block; 79, assignment; 80, pattern_list; 81, call; 82, block; 83, identifier:lab; 84, identifier:seg_y; 85, block; 86, comparison_operator:label_set != ticks; 87, block; 88, identifier:tick; 89, block; 90, attribute; 91, block; 92, attribute; 93, block; 94, identifier:ax; 95, string_content:Plot labeled intervals with each label on its own row. Parameters ---------- intervals : np.ndarray, shape=(n, 2) segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. label_set : list An (ordered) list of labels to determine the plotting order. If not provided, the labels will be inferred from ``ax.get_yticklabels()``. If no ``yticklabels`` exist, then the sorted set of unique values in ``labels`` is taken as the label set. base : np.ndarray, shape=(n,), optional Vertical positions of each label. By default, labels are positioned at integers ``np.arange(len(labels))``. height : scalar or np.ndarray, shape=(n,), optional Height for each label. If scalar, the same value is applied to all labels. By default, each label has ``height=1``. extend_labels : bool If ``False``, only values of ``labels`` that also exist in ``label_set`` will be shown. If ``True``, all labels are shown, with those in `labels` but not in `label_set` appended to the top of the plot. A horizontal line is drawn to indicate the separation between values in or out of ``label_set``. ax : matplotlib.pyplot.axes An axis handle on which to draw the intervals. If none is provided, a new set of axes is created. tick : bool If ``True``, sets tick positions and labels on the y-axis. kwargs Additional keyword arguments to pass to `matplotlib.collection.BrokenBarHCollection`. Returns ------- ax : matplotlib.pyplot.axes._subplots.AxesSubplot A handle to the (possibly constructed) plot axes; 96, pattern_list; 97, call; 98, identifier:intervals; 99, call; 100, identifier:label_set; 101, None; 102, expression_statement; 103, comment:# If none of the label strings have content, treat it as empty; 104, if_statement; 105, block; 106, expression_statement; 107, identifier:label_set; 108, block; 109, block; 110, identifier:style; 111, call; 112, attribute; 113, argument_list; 114, subscript; 115, call; 116, attribute; 117, argument_list; 118, identifier:base; 119, None; 120, expression_statement; 121, identifier:height; 122, None; 123, expression_statement; 124, attribute; 125, argument_list; 126, expression_statement; 127, identifier:seg_y; 128, call; 129, identifier:ybase; 130, identifier:yheight; 131, identifier:lab; 132, identifier:zip; 133, argument_list; 134, expression_statement; 135, identifier:xvals; 136, call; 137, identifier:ival; 138, identifier:lab; 139, identifier:zip; 140, argument_list; 141, if_statement; 142, expression_statement; 143, expression_statement; 144, comment:# Pop the label after the first time we see it, so we only get; 145, comment:# one legend entry; 146, expression_statement; 147, identifier:label_set; 148, identifier:ticks; 149, expression_statement; 150, expression_statement; 151, expression_statement; 152, expression_statement; 153, expression_statement; 154, expression_statement; 155, identifier:base; 156, identifier:size; 157, expression_statement; 158, identifier:intervals; 159, identifier:size; 160, expression_statement; 161, identifier:ax; 162, identifier:_; 163, identifier:__get_axes; 164, argument_list; 165, attribute; 166, argument_list; 167, assignment; 168, not_operator; 169, block; 170, expression_statement; 171, assignment; 172, expression_statement; 173, expression_statement; 174, identifier:dict; 175, argument_list; 176, identifier:style; 177, identifier:update; 178, call; 179, identifier:style; 180, string; 181, attribute; 182, argument_list; 183, identifier:style; 184, identifier:update; 185, identifier:kwargs; 186, assignment; 187, assignment; 188, identifier:np; 189, identifier:isscalar; 190, identifier:height; 191, assignment; 192, identifier:dict; 193, argument_list; 194, identifier:base; 195, identifier:height; 196, identifier:ticks; 197, assignment; 198, identifier:defaultdict; 199, argument_list; 200, identifier:intervals; 201, identifier:labels; 202, comparison_operator:lab not in seg_y; 203, block; 204, call; 205, call; 206, call; 207, call; 208, call; 209, call; 210, call; 211, call; 212, call; 213, call; 214, call; 215, keyword_argument; 216, identifier:np; 217, identifier:atleast_2d; 218, identifier:intervals; 219, identifier:label_set; 220, list_comprehension; 221, call; 222, expression_statement; 223, assignment; 224, identifier:ticks; 225, binary_operator:label_set + sorted(set(labels) - set(label_set)); 226, assignment; 227, assignment; 228, keyword_argument; 229, identifier:next; 230, argument_list; 231, string_content:facecolor; 232, identifier:style; 233, identifier:pop; 234, string; 235, identifier:base; 236, call; 237, identifier:height; 238, integer:1; 239, identifier:height; 240, binary_operator:height * np.ones_like(base); 241, subscript; 242, tuple; 243, identifier:list; 244, identifier:lab; 245, identifier:seg_y; 246, continue_statement; 247, attribute; 248, argument_list; 249, attribute; 250, argument_list; 251, attribute; 252, argument_list; 253, attribute; 254, argument_list; 255, attribute; 256, argument_list; 257, attribute; 258, argument_list; 259, attribute; 260, argument_list; 261, attribute; 262, argument_list; 263, attribute; 264, argument_list; 265, identifier:__expand_limits; 266, argument_list; 267, identifier:__expand_limits; 268, argument_list; 269, identifier:ax; 270, identifier:ax; 271, call; 272, for_in_clause; 273, identifier:any; 274, argument_list; 275, assignment; 276, identifier:label_set; 277, call; 278, identifier:label_set; 279, call; 280, identifier:ticks; 281, identifier:label_set; 282, identifier:ticks; 283, call; 284, identifier:linewidth; 285, integer:1; 286, attribute; 287, string_content:color; 288, attribute; 289, argument_list; 290, identifier:height; 291, call; 292, identifier:seg_y; 293, identifier:lab; 294, identifier:ybase; 295, identifier:yheight; 296, subscript; 297, identifier:append; 298, tuple; 299, identifier:ax; 300, identifier:add_collection; 301, call; 302, identifier:style; 303, identifier:pop; 304, string; 305, None; 306, identifier:ax; 307, identifier:axhline; 308, call; 309, keyword_argument; 310, keyword_argument; 311, identifier:ax; 312, identifier:grid; 313, True; 314, keyword_argument; 315, identifier:ax; 316, identifier:set_yticks; 317, list; 318, identifier:ax; 319, identifier:set_yticks; 320, identifier:base; 321, identifier:ax; 322, identifier:set_yticklabels; 323, identifier:ticks; 324, keyword_argument; 325, attribute; 326, identifier:set_major_formatter; 327, call; 328, identifier:ax; 329, list; 330, keyword_argument; 331, identifier:ax; 332, list; 333, keyword_argument; 334, attribute; 335, argument_list; 336, identifier:_; 337, call; 338, identifier:label_set; 339, identifier:label_set; 340, list; 341, identifier:list; 342, argument_list; 343, identifier:sorted; 344, argument_list; 345, identifier:sorted; 346, argument_list; 347, attribute; 348, identifier:prop_cycler; 349, identifier:np; 350, identifier:arange; 351, call; 352, attribute; 353, argument_list; 354, identifier:xvals; 355, identifier:lab; 356, subscript; 357, binary_operator:ival[1] - ival[0]; 358, identifier:BrokenBarHCollection; 359, argument_list; 360, string_content:label; 361, identifier:len; 362, argument_list; 363, identifier:color; 364, string; 365, identifier:alpha; 366, float:0.5; 367, identifier:axis; 368, string; 369, identifier:va; 370, string; 371, identifier:ax; 372, identifier:yaxis; 373, identifier:IntervalFormatter; 374, argument_list; 375, call; 376, call; 377, identifier:which; 378, string; 379, call; 380, call; 381, identifier:which; 382, string; 383, identifier:_; 384, identifier:get_text; 385, attribute; 386, argument_list; 387, identifier:label_set; 388, binary_operator:set(labels) - set(label_set); 389, call; 390, identifier:ax; 391, identifier:_get_patches_for_fill; 392, identifier:len; 393, argument_list; 394, identifier:np; 395, identifier:ones_like; 396, identifier:base; 397, identifier:ival; 398, integer:0; 399, subscript; 400, subscript; 401, subscript; 402, subscript; 403, dictionary_splat; 404, identifier:label_set; 405, string_content:k; 406, string_content:y; 407, string_content:bottom; 408, identifier:base; 409, identifier:ticks; 410, attribute; 411, argument_list; 412, attribute; 413, argument_list; 414, string_content:y; 415, attribute; 416, argument_list; 417, attribute; 418, argument_list; 419, string_content:x; 420, identifier:ax; 421, identifier:get_yticklabels; 422, call; 423, call; 424, identifier:set; 425, argument_list; 426, identifier:ticks; 427, identifier:ival; 428, integer:1; 429, identifier:ival; 430, integer:0; 431, identifier:xvals; 432, identifier:lab; 433, identifier:seg_y; 434, identifier:lab; 435, identifier:style; 436, identifier:base; 437, identifier:min; 438, parenthesized_expression; 439, identifier:max; 440, identifier:intervals; 441, identifier:min; 442, identifier:intervals; 443, identifier:max; 444, identifier:set; 445, argument_list; 446, identifier:set; 447, argument_list; 448, identifier:labels; 449, binary_operator:base + height; 450, identifier:labels; 451, identifier:label_set; 452, identifier:base; 453, identifier:height
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 3, 9; 3, 10; 3, 11; 3, 12; 3, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 4, 29; 4, 30; 4, 31; 4, 32; 4, 33; 4, 34; 4, 35; 4, 36; 4, 37; 4, 38; 4, 39; 4, 40; 7, 41; 7, 42; 8, 43; 8, 44; 9, 45; 9, 46; 10, 47; 10, 48; 11, 49; 11, 50; 12, 51; 12, 52; 13, 53; 14, 54; 16, 55; 18, 56; 19, 57; 19, 58; 19, 59; 19, 60; 21, 61; 21, 62; 21, 63; 21, 64; 22, 65; 23, 66; 25, 67; 26, 68; 27, 69; 27, 70; 28, 71; 28, 72; 29, 73; 29, 74; 30, 75; 31, 76; 31, 77; 31, 78; 32, 79; 33, 80; 33, 81; 33, 82; 34, 83; 34, 84; 34, 85; 36, 86; 36, 87; 37, 88; 37, 89; 38, 90; 38, 91; 39, 92; 39, 93; 40, 94; 54, 95; 55, 96; 55, 97; 56, 98; 56, 99; 57, 100; 57, 101; 59, 102; 59, 103; 59, 104; 60, 105; 62, 106; 63, 107; 63, 108; 64, 109; 65, 110; 65, 111; 66, 112; 66, 113; 67, 114; 67, 115; 68, 116; 68, 117; 69, 118; 69, 119; 70, 120; 71, 121; 71, 122; 72, 123; 73, 124; 73, 125; 74, 126; 75, 127; 75, 128; 76, 129; 76, 130; 76, 131; 77, 132; 77, 133; 78, 134; 79, 135; 79, 136; 80, 137; 80, 138; 81, 139; 81, 140; 82, 141; 82, 142; 85, 143; 85, 144; 85, 145; 85, 146; 86, 147; 86, 148; 87, 149; 89, 150; 89, 151; 89, 152; 89, 153; 89, 154; 90, 155; 90, 156; 91, 157; 92, 158; 92, 159; 93, 160; 96, 161; 96, 162; 97, 163; 97, 164; 99, 165; 99, 166; 102, 167; 104, 168; 104, 169; 105, 170; 106, 171; 108, 172; 109, 173; 111, 174; 111, 175; 112, 176; 112, 177; 113, 178; 114, 179; 114, 180; 115, 181; 115, 182; 116, 183; 116, 184; 117, 185; 120, 186; 123, 187; 124, 188; 124, 189; 125, 190; 126, 191; 128, 192; 128, 193; 133, 194; 133, 195; 133, 196; 134, 197; 136, 198; 136, 199; 140, 200; 140, 201; 141, 202; 141, 203; 142, 204; 143, 205; 146, 206; 149, 207; 150, 208; 151, 209; 152, 210; 153, 211; 154, 212; 157, 213; 160, 214; 164, 215; 165, 216; 165, 217; 166, 218; 167, 219; 167, 220; 168, 221; 169, 222; 170, 223; 171, 224; 171, 225; 172, 226; 173, 227; 175, 228; 178, 229; 178, 230; 180, 231; 181, 232; 181, 233; 182, 234; 186, 235; 186, 236; 187, 237; 187, 238; 191, 239; 191, 240; 197, 241; 197, 242; 199, 243; 202, 244; 202, 245; 203, 246; 204, 247; 204, 248; 205, 249; 205, 250; 206, 251; 206, 252; 207, 253; 207, 254; 208, 255; 208, 256; 209, 257; 209, 258; 210, 259; 210, 260; 211, 261; 211, 262; 212, 263; 212, 264; 213, 265; 213, 266; 214, 267; 214, 268; 215, 269; 215, 270; 220, 271; 220, 272; 221, 273; 221, 274; 222, 275; 223, 276; 223, 277; 225, 278; 225, 279; 226, 280; 226, 281; 227, 282; 227, 283; 228, 284; 228, 285; 230, 286; 234, 287; 236, 288; 236, 289; 240, 290; 240, 291; 241, 292; 241, 293; 242, 294; 242, 295; 247, 296; 247, 297; 248, 298; 249, 299; 249, 300; 250, 301; 251, 302; 251, 303; 252, 304; 252, 305; 253, 306; 253, 307; 254, 308; 254, 309; 254, 310; 255, 311; 255, 312; 256, 313; 256, 314; 257, 315; 257, 316; 258, 317; 259, 318; 259, 319; 260, 320; 261, 321; 261, 322; 262, 323; 262, 324; 263, 325; 263, 326; 264, 327; 266, 328; 266, 329; 266, 330; 268, 331; 268, 332; 268, 333; 271, 334; 271, 335; 272, 336; 272, 337; 274, 338; 275, 339; 275, 340; 277, 341; 277, 342; 279, 343; 279, 344; 283, 345; 283, 346; 286, 347; 286, 348; 288, 349; 288, 350; 289, 351; 291, 352; 291, 353; 296, 354; 296, 355; 298, 356; 298, 357; 301, 358; 301, 359; 304, 360; 308, 361; 308, 362; 309, 363; 309, 364; 310, 365; 310, 366; 314, 367; 314, 368; 324, 369; 324, 370; 325, 371; 325, 372; 327, 373; 327, 374; 329, 375; 329, 376; 330, 377; 330, 378; 332, 379; 332, 380; 333, 381; 333, 382; 334, 383; 334, 384; 337, 385; 337, 386; 342, 387; 344, 388; 346, 389; 347, 390; 347, 391; 351, 392; 351, 393; 352, 394; 352, 395; 353, 396; 356, 397; 356, 398; 357, 399; 357, 400; 359, 401; 359, 402; 359, 403; 362, 404; 364, 405; 368, 406; 370, 407; 374, 408; 374, 409; 375, 410; 375, 411; 376, 412; 376, 413; 378, 414; 379, 415; 379, 416; 380, 417; 380, 418; 382, 419; 385, 420; 385, 421; 388, 422; 388, 423; 389, 424; 389, 425; 393, 426; 399, 427; 399, 428; 400, 429; 400, 430; 401, 431; 401, 432; 402, 433; 402, 434; 403, 435; 410, 436; 410, 437; 412, 438; 412, 439; 415, 440; 415, 441; 417, 442; 417, 443; 422, 444; 422, 445; 423, 446; 423, 447; 425, 448; 438, 449; 445, 450; 447, 451; 449, 452; 449, 453
def labeled_intervals(intervals, labels, label_set=None, base=None, height=None, extend_labels=True, ax=None, tick=True, **kwargs): '''Plot labeled intervals with each label on its own row. Parameters ---------- intervals : np.ndarray, shape=(n, 2) segment intervals, in the format returned by :func:`mir_eval.io.load_intervals` or :func:`mir_eval.io.load_labeled_intervals`. labels : list, shape=(n,) reference segment labels, in the format returned by :func:`mir_eval.io.load_labeled_intervals`. label_set : list An (ordered) list of labels to determine the plotting order. If not provided, the labels will be inferred from ``ax.get_yticklabels()``. If no ``yticklabels`` exist, then the sorted set of unique values in ``labels`` is taken as the label set. base : np.ndarray, shape=(n,), optional Vertical positions of each label. By default, labels are positioned at integers ``np.arange(len(labels))``. height : scalar or np.ndarray, shape=(n,), optional Height for each label. If scalar, the same value is applied to all labels. By default, each label has ``height=1``. extend_labels : bool If ``False``, only values of ``labels`` that also exist in ``label_set`` will be shown. If ``True``, all labels are shown, with those in `labels` but not in `label_set` appended to the top of the plot. A horizontal line is drawn to indicate the separation between values in or out of ``label_set``. ax : matplotlib.pyplot.axes An axis handle on which to draw the intervals. If none is provided, a new set of axes is created. tick : bool If ``True``, sets tick positions and labels on the y-axis. kwargs Additional keyword arguments to pass to `matplotlib.collection.BrokenBarHCollection`. Returns ------- ax : matplotlib.pyplot.axes._subplots.AxesSubplot A handle to the (possibly constructed) plot axes ''' # Get the axes handle ax, _ = __get_axes(ax=ax) # Make sure we have a numpy array intervals = np.atleast_2d(intervals) if label_set is None: # If we have non-empty pre-existing tick labels, use them label_set = [_.get_text() for _ in ax.get_yticklabels()] # If none of the label strings have content, treat it as empty if not any(label_set): label_set = [] else: label_set = list(label_set) # Put additional labels at the end, in order if extend_labels: ticks = label_set + sorted(set(labels) - set(label_set)) elif label_set: ticks = label_set else: ticks = sorted(set(labels)) style = dict(linewidth=1) style.update(next(ax._get_patches_for_fill.prop_cycler)) # Swap color -> facecolor here so we preserve edgecolor on rects style['facecolor'] = style.pop('color') style.update(kwargs) if base is None: base = np.arange(len(ticks)) if height is None: height = 1 if np.isscalar(height): height = height * np.ones_like(base) seg_y = dict() for ybase, yheight, lab in zip(base, height, ticks): seg_y[lab] = (ybase, yheight) xvals = defaultdict(list) for ival, lab in zip(intervals, labels): if lab not in seg_y: continue xvals[lab].append((ival[0], ival[1] - ival[0])) for lab in seg_y: ax.add_collection(BrokenBarHCollection(xvals[lab], seg_y[lab], **style)) # Pop the label after the first time we see it, so we only get # one legend entry style.pop('label', None) # Draw a line separating the new labels from pre-existing labels if label_set != ticks: ax.axhline(len(label_set), color='k', alpha=0.5) if tick: ax.grid(True, axis='y') ax.set_yticks([]) ax.set_yticks(base) ax.set_yticklabels(ticks, va='bottom') ax.yaxis.set_major_formatter(IntervalFormatter(base, ticks)) if base.size: __expand_limits(ax, [base.min(), (base + height).max()], which='y') if intervals.size: __expand_limits(ax, [intervals.min(), intervals.max()], which='x') return ax
0, module; 1, function_definition; 2, function_name:finish_directory_parse; 3, parameters; 4, comment:# type: () -> None; 5, block; 6, identifier:self; 7, expression_statement; 8, if_statement; 9, if_statement; 10, string; 11, not_operator; 12, block; 13, comparison_operator:self.icb_tag.file_type != 4; 14, block; 15, string_content:A method to finish up the parsing of this UDF File Entry directory. In particular, this method checks to see if it is in sorted order for future use. Parameters: None. Returns: Nothing.; 16, attribute; 17, raise_statement; 18, attribute; 19, integer:4; 20, raise_statement; 21, identifier:self; 22, identifier:_initialized; 23, call; 24, attribute; 25, identifier:file_type; 26, call; 27, attribute; 28, argument_list; 29, identifier:self; 30, identifier:icb_tag; 31, attribute; 32, argument_list; 33, identifier:pycdlibexception; 34, identifier:PyCdlibInternalError; 35, string; 36, identifier:pycdlibexception; 37, identifier:PyCdlibInternalError; 38, string; 39, string_content:UDF File Entry not initialized; 40, string_content:Can only finish_directory for a directory
0, 1; 1, 2; 1, 3; 1, 4; 1, 5; 3, 6; 5, 7; 5, 8; 5, 9; 7, 10; 8, 11; 8, 12; 9, 13; 9, 14; 10, 15; 11, 16; 12, 17; 13, 18; 13, 19; 14, 20; 16, 21; 16, 22; 17, 23; 18, 24; 18, 25; 20, 26; 23, 27; 23, 28; 24, 29; 24, 30; 26, 31; 26, 32; 27, 33; 27, 34; 28, 35; 31, 36; 31, 37; 32, 38; 35, 39; 38, 40
def finish_directory_parse(self): # type: () -> None ''' A method to finish up the parsing of this UDF File Entry directory. In particular, this method checks to see if it is in sorted order for future use. Parameters: None. Returns: Nothing. ''' if not self._initialized: raise pycdlibexception.PyCdlibInternalError('UDF File Entry not initialized') if self.icb_tag.file_type != 4: raise pycdlibexception.PyCdlibInternalError('Can only finish_directory for a directory')
0, module; 1, function_definition; 2, function_name:filepaths; 3, parameters; 4, block; 5, identifier:path; 6, default_parameter; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, if_statement; 11, if_statement; 12, identifier:exclude; 13, tuple; 14, identifier:hidden; 15, True; 16, identifier:empty; 17, True; 18, comment:""" Return list of absolute, sorted file paths path: Path to file or directory exclude: List of file name patterns to exclude hidden: Whether to include hidden files empty: Whether to include empty files Raise PathNotFoundError if path doesn't exist. """; 19, not_operator; 20, block; 21, elif_clause; 22, call; 23, block; 24, else_clause; 25, call; 26, raise_statement; 27, not_operator; 28, block; 29, attribute; 30, argument_list; 31, return_statement; 32, block; 33, attribute; 34, argument_list; 35, call; 36, call; 37, raise_statement; 38, attribute; 39, identifier:isfile; 40, identifier:path; 41, list; 42, expression_statement; 43, for_statement; 44, return_statement; 45, attribute; 46, identifier:exists; 47, identifier:path; 48, attribute; 49, argument_list; 50, attribute; 51, argument_list; 52, call; 53, identifier:os; 54, identifier:path; 55, identifier:path; 56, assignment; 57, pattern_list; 58, call; 59, comment:# Ignore hidden directory; 60, block; 61, call; 62, identifier:os; 63, identifier:path; 64, identifier:error; 65, identifier:PathNotFoundError; 66, identifier:path; 67, identifier:os; 68, identifier:access; 69, identifier:path; 70, attribute; 71, keyword_argument; 72, attribute; 73, argument_list; 74, identifier:filepaths; 75, list; 76, identifier:dirpath; 77, identifier:dirnames; 78, identifier:filenames; 79, attribute; 80, argument_list; 81, if_statement; 82, for_statement; 83, identifier:sorted; 84, argument_list; 85, identifier:os; 86, identifier:R_OK; 87, identifier:effective_ids; 88, comparison_operator:os.access in os.supports_effective_ids; 89, identifier:error; 90, identifier:ReadError; 91, attribute; 92, identifier:path; 93, identifier:os; 94, identifier:walk; 95, identifier:path; 96, boolean_operator; 97, block; 98, identifier:filename; 99, identifier:filenames; 100, comment:# Ignore hidden file; 101, block; 102, identifier:filepaths; 103, keyword_argument; 104, attribute; 105, attribute; 106, identifier:errno; 107, identifier:EACCES; 108, not_operator; 109, call; 110, continue_statement; 111, if_statement; 112, expression_statement; 113, comment:# Ignore excluded file; 114, if_statement; 115, identifier:key; 116, lambda; 117, identifier:os; 118, identifier:access; 119, identifier:os; 120, identifier:supports_effective_ids; 121, identifier:hidden; 122, identifier:is_hidden; 123, argument_list; 124, boolean_operator; 125, block; 126, assignment; 127, call; 128, block; 129, else_clause; 130, lambda_parameters; 131, call; 132, identifier:dirpath; 133, not_operator; 134, call; 135, continue_statement; 136, identifier:filepath; 137, call; 138, identifier:any; 139, generator_expression; 140, continue_statement; 141, comment:# Ignore empty file; 142, block; 143, identifier:fp; 144, attribute; 145, argument_list; 146, identifier:hidden; 147, identifier:is_hidden; 148, argument_list; 149, attribute; 150, argument_list; 151, call; 152, for_in_clause; 153, if_statement; 154, identifier:fp; 155, identifier:casefold; 156, identifier:filename; 157, attribute; 158, identifier:join; 159, identifier:dirpath; 160, identifier:filename; 161, identifier:is_match; 162, argument_list; 163, identifier:pattern; 164, identifier:exclude; 165, boolean_operator; 166, block; 167, identifier:os; 168, identifier:path; 169, identifier:filepath; 170, identifier:pattern; 171, identifier:empty; 172, comparison_operator:os.path.getsize(os.path.realpath(filepath)) > 0; 173, expression_statement; 174, call; 175, integer:0; 176, call; 177, attribute; 178, argument_list; 179, attribute; 180, argument_list; 181, attribute; 182, identifier:getsize; 183, call; 184, identifier:filepaths; 185, identifier:append; 186, identifier:filepath; 187, identifier:os; 188, identifier:path; 189, attribute; 190, argument_list; 191, attribute; 192, identifier:realpath; 193, identifier:filepath; 194, identifier:os; 195, identifier:path
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 6, 12; 6, 13; 7, 14; 7, 15; 8, 16; 8, 17; 9, 18; 10, 19; 10, 20; 10, 21; 11, 22; 11, 23; 11, 24; 19, 25; 20, 26; 21, 27; 21, 28; 22, 29; 22, 30; 23, 31; 24, 32; 25, 33; 25, 34; 26, 35; 27, 36; 28, 37; 29, 38; 29, 39; 30, 40; 31, 41; 32, 42; 32, 43; 32, 44; 33, 45; 33, 46; 34, 47; 35, 48; 35, 49; 36, 50; 36, 51; 37, 52; 38, 53; 38, 54; 41, 55; 42, 56; 43, 57; 43, 58; 43, 59; 43, 60; 44, 61; 45, 62; 45, 63; 48, 64; 48, 65; 49, 66; 50, 67; 50, 68; 51, 69; 51, 70; 51, 71; 52, 72; 52, 73; 56, 74; 56, 75; 57, 76; 57, 77; 57, 78; 58, 79; 58, 80; 60, 81; 60, 82; 61, 83; 61, 84; 70, 85; 70, 86; 71, 87; 71, 88; 72, 89; 72, 90; 73, 91; 73, 92; 79, 93; 79, 94; 80, 95; 81, 96; 81, 97; 82, 98; 82, 99; 82, 100; 82, 101; 84, 102; 84, 103; 88, 104; 88, 105; 91, 106; 91, 107; 96, 108; 96, 109; 97, 110; 101, 111; 101, 112; 101, 113; 101, 114; 103, 115; 103, 116; 104, 117; 104, 118; 105, 119; 105, 120; 108, 121; 109, 122; 109, 123; 111, 124; 111, 125; 112, 126; 114, 127; 114, 128; 114, 129; 116, 130; 116, 131; 123, 132; 124, 133; 124, 134; 125, 135; 126, 136; 126, 137; 127, 138; 127, 139; 128, 140; 129, 141; 129, 142; 130, 143; 131, 144; 131, 145; 133, 146; 134, 147; 134, 148; 137, 149; 137, 150; 139, 151; 139, 152; 142, 153; 144, 154; 144, 155; 148, 156; 149, 157; 149, 158; 150, 159; 150, 160; 151, 161; 151, 162; 152, 163; 152, 164; 153, 165; 153, 166; 157, 167; 157, 168; 162, 169; 162, 170; 165, 171; 165, 172; 166, 173; 172, 174; 172, 175; 173, 176; 174, 177; 174, 178; 176, 179; 176, 180; 177, 181; 177, 182; 178, 183; 179, 184; 179, 185; 180, 186; 181, 187; 181, 188; 183, 189; 183, 190; 189, 191; 189, 192; 190, 193; 191, 194; 191, 195
def filepaths(path, exclude=(), hidden=True, empty=True): """ Return list of absolute, sorted file paths path: Path to file or directory exclude: List of file name patterns to exclude hidden: Whether to include hidden files empty: Whether to include empty files Raise PathNotFoundError if path doesn't exist. """ if not os.path.exists(path): raise error.PathNotFoundError(path) elif not os.access(path, os.R_OK, effective_ids=os.access in os.supports_effective_ids): raise error.ReadError(errno.EACCES, path) if os.path.isfile(path): return [path] else: filepaths = [] for dirpath, dirnames, filenames in os.walk(path): # Ignore hidden directory if not hidden and is_hidden(dirpath): continue for filename in filenames: # Ignore hidden file if not hidden and is_hidden(filename): continue filepath = os.path.join(dirpath, filename) # Ignore excluded file if any(is_match(filepath, pattern) for pattern in exclude): continue else: # Ignore empty file if empty or os.path.getsize(os.path.realpath(filepath)) > 0: filepaths.append(filepath) return sorted(filepaths, key=lambda fp: fp.casefold())
0, module; 1, function_definition; 2, function_name:_get_sorted_methods; 3, parameters; 4, block; 5, identifier:self; 6, identifier:methods; 7, expression_statement; 8, if_statement; 9, comment:# Comparison function we'll use to sort the methods:; 10, function_definition; 11, return_statement; 12, comment:"""Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server. """; 13, not_operator; 14, block; 15, function_name:_sorted_methods_comparison; 16, parameters; 17, block; 18, call; 19, identifier:methods; 20, return_statement; 21, identifier:method_info1; 22, identifier:method_info2; 23, expression_statement; 24, function_definition; 25, comment:# Higher path scores come first.; 26, expression_statement; 27, expression_statement; 28, if_statement; 29, comment:# Compare by path text next, sorted alphabetically.; 30, expression_statement; 31, if_statement; 32, comment:# All else being equal, sort by HTTP method.; 33, expression_statement; 34, return_statement; 35, identifier:sorted; 36, argument_list; 37, identifier:methods; 38, comment:"""Sort method info by path and http_method. Args: method_info1: Method name and info for the first method to compare. method_info2: Method name and info for the method to compare to. Returns: Negative if the first method should come first, positive if the first method should come after the second. Zero if they're equivalent. """; 39, function_name:_score_path; 40, parameters; 41, block; 42, assignment; 43, assignment; 44, comparison_operator:path_score1 != path_score2; 45, block; 46, assignment; 47, comparison_operator:path_result != 0; 48, block; 49, assignment; 50, identifier:method_result; 51, call; 52, identifier:_sorted_methods_comparison; 53, identifier:path; 54, expression_statement; 55, expression_statement; 56, expression_statement; 57, for_statement; 58, comment:# Shift by 31 instead of 32 because some (!) versions of Python like; 59, comment:# to convert the int to a long if we shift by 32, and the sorted(); 60, comment:# function that uses this blows up if it receives anything but an int.; 61, expression_statement; 62, return_statement; 63, identifier:path_score1; 64, call; 65, identifier:path_score2; 66, call; 67, identifier:path_score1; 68, identifier:path_score2; 69, return_statement; 70, identifier:path_result; 71, call; 72, identifier:path_result; 73, integer:0; 74, return_statement; 75, identifier:method_result; 76, call; 77, attribute; 78, argument_list; 79, comment:"""Calculate the score for this path, used for comparisons. Higher scores have priority, and if scores are equal, the path text is sorted alphabetically. Scores are based on the number and location of the constant parts of the path. The server has some special handling for variables with regexes, which we don't handle here. Args: path: The request path that we're calculating a score for. Returns: The score for the given path. """; 80, assignment; 81, assignment; 82, identifier:part; 83, identifier:parts; 84, block; 85, augmented_assignment; 86, identifier:score; 87, identifier:_score_path; 88, argument_list; 89, identifier:_score_path; 90, argument_list; 91, binary_operator:path_score2 - path_score1; 92, identifier:cmp; 93, argument_list; 94, identifier:path_result; 95, identifier:cmp; 96, argument_list; 97, identifier:methods; 98, identifier:items; 99, identifier:score; 100, integer:0; 101, identifier:parts; 102, call; 103, expression_statement; 104, if_statement; 105, identifier:score; 106, binary_operator:31 - len(parts); 107, call; 108, call; 109, identifier:path_score2; 110, identifier:path_score1; 111, call; 112, call; 113, call; 114, call; 115, attribute; 116, argument_list; 117, augmented_assignment; 118, boolean_operator; 119, comment:# Found a constant.; 120, block; 121, integer:31; 122, call; 123, attribute; 124, argument_list; 125, attribute; 126, argument_list; 127, attribute; 128, argument_list; 129, attribute; 130, argument_list; 131, attribute; 132, argument_list; 133, attribute; 134, argument_list; 135, identifier:path; 136, identifier:split; 137, string; 138, identifier:score; 139, integer:1; 140, not_operator; 141, comparison_operator:part[0] != '{'; 142, expression_statement; 143, identifier:len; 144, argument_list; 145, subscript; 146, identifier:get; 147, string; 148, string; 149, subscript; 150, identifier:get; 151, string; 152, string; 153, subscript; 154, identifier:get; 155, string; 156, string; 157, subscript; 158, identifier:get; 159, string; 160, string; 161, subscript; 162, identifier:get; 163, string; 164, string; 165, subscript; 166, identifier:get; 167, string; 168, string; 169, string_content:/; 170, identifier:part; 171, subscript; 172, string; 173, augmented_assignment; 174, identifier:parts; 175, identifier:method_info1; 176, integer:1; 177, string_content:path; 178, identifier:method_info2; 179, integer:1; 180, string_content:path; 181, identifier:method_info1; 182, integer:1; 183, string_content:path; 184, identifier:method_info2; 185, integer:1; 186, string_content:path; 187, identifier:method_info1; 188, integer:1; 189, string_content:httpMethod; 190, identifier:method_info2; 191, integer:1; 192, string_content:httpMethod; 193, identifier:part; 194, integer:0; 195, string_content:{; 196, identifier:score; 197, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 8, 14; 10, 15; 10, 16; 10, 17; 11, 18; 13, 19; 14, 20; 16, 21; 16, 22; 17, 23; 17, 24; 17, 25; 17, 26; 17, 27; 17, 28; 17, 29; 17, 30; 17, 31; 17, 32; 17, 33; 17, 34; 18, 35; 18, 36; 20, 37; 23, 38; 24, 39; 24, 40; 24, 41; 26, 42; 27, 43; 28, 44; 28, 45; 30, 46; 31, 47; 31, 48; 33, 49; 34, 50; 36, 51; 36, 52; 40, 53; 41, 54; 41, 55; 41, 56; 41, 57; 41, 58; 41, 59; 41, 60; 41, 61; 41, 62; 42, 63; 42, 64; 43, 65; 43, 66; 44, 67; 44, 68; 45, 69; 46, 70; 46, 71; 47, 72; 47, 73; 48, 74; 49, 75; 49, 76; 51, 77; 51, 78; 54, 79; 55, 80; 56, 81; 57, 82; 57, 83; 57, 84; 61, 85; 62, 86; 64, 87; 64, 88; 66, 89; 66, 90; 69, 91; 71, 92; 71, 93; 74, 94; 76, 95; 76, 96; 77, 97; 77, 98; 80, 99; 80, 100; 81, 101; 81, 102; 84, 103; 84, 104; 85, 105; 85, 106; 88, 107; 90, 108; 91, 109; 91, 110; 93, 111; 93, 112; 96, 113; 96, 114; 102, 115; 102, 116; 103, 117; 104, 118; 104, 119; 104, 120; 106, 121; 106, 122; 107, 123; 107, 124; 108, 125; 108, 126; 111, 127; 111, 128; 112, 129; 112, 130; 113, 131; 113, 132; 114, 133; 114, 134; 115, 135; 115, 136; 116, 137; 117, 138; 117, 139; 118, 140; 118, 141; 120, 142; 122, 143; 122, 144; 123, 145; 123, 146; 124, 147; 124, 148; 125, 149; 125, 150; 126, 151; 126, 152; 127, 153; 127, 154; 128, 155; 128, 156; 129, 157; 129, 158; 130, 159; 130, 160; 131, 161; 131, 162; 132, 163; 132, 164; 133, 165; 133, 166; 134, 167; 134, 168; 137, 169; 140, 170; 141, 171; 141, 172; 142, 173; 144, 174; 145, 175; 145, 176; 147, 177; 149, 178; 149, 179; 151, 180; 153, 181; 153, 182; 155, 183; 157, 184; 157, 185; 159, 186; 161, 187; 161, 188; 163, 189; 165, 190; 165, 191; 167, 192; 171, 193; 171, 194; 172, 195; 173, 196; 173, 197
def _get_sorted_methods(self, methods): """Get a copy of 'methods' sorted the way they would be on the live server. Args: methods: JSON configuration of an API's methods. Returns: The same configuration with the methods sorted based on what order they'll be checked by the server. """ if not methods: return methods # Comparison function we'll use to sort the methods: def _sorted_methods_comparison(method_info1, method_info2): """Sort method info by path and http_method. Args: method_info1: Method name and info for the first method to compare. method_info2: Method name and info for the method to compare to. Returns: Negative if the first method should come first, positive if the first method should come after the second. Zero if they're equivalent. """ def _score_path(path): """Calculate the score for this path, used for comparisons. Higher scores have priority, and if scores are equal, the path text is sorted alphabetically. Scores are based on the number and location of the constant parts of the path. The server has some special handling for variables with regexes, which we don't handle here. Args: path: The request path that we're calculating a score for. Returns: The score for the given path. """ score = 0 parts = path.split('/') for part in parts: score <<= 1 if not part or part[0] != '{': # Found a constant. score += 1 # Shift by 31 instead of 32 because some (!) versions of Python like # to convert the int to a long if we shift by 32, and the sorted() # function that uses this blows up if it receives anything but an int. score <<= 31 - len(parts) return score # Higher path scores come first. path_score1 = _score_path(method_info1[1].get('path', '')) path_score2 = _score_path(method_info2[1].get('path', '')) if path_score1 != path_score2: return path_score2 - path_score1 # Compare by path text next, sorted alphabetically. path_result = cmp(method_info1[1].get('path', ''), method_info2[1].get('path', '')) if path_result != 0: return path_result # All else being equal, sort by HTTP method. method_result = cmp(method_info1[1].get('httpMethod', ''), method_info2[1].get('httpMethod', '')) return method_result return sorted(methods.items(), _sorted_methods_comparison)
0, module; 1, function_definition; 2, function_name:_format_ase2clusgeo; 3, parameters; 4, block; 5, identifier:obj; 6, default_parameter; 7, expression_statement; 8, comment:#atoms metadata; 9, expression_statement; 10, if_statement; 11, expression_statement; 12, expression_statement; 13, expression_statement; 14, for_statement; 15, expression_statement; 16, expression_statement; 17, expression_statement; 18, expression_statement; 19, return_statement; 20, identifier:all_atomtypes; 21, None; 22, comment:""" Takes an ase Atoms object and returns numpy arrays and integers which are read by the internal clusgeo. Apos is currently a flattened out numpy array Args: obj(): all_atomtypes(): sort(): """; 23, assignment; 24, comparison_operator:all_atomtypes is not None; 25, block; 26, else_clause; 27, assignment; 28, assignment; 29, assignment; 30, identifier:atomtype; 31, identifier:atomtype_lst; 32, block; 33, assignment; 34, assignment; 35, identifier:atomtype_lst; 36, assignment; 37, expression_list; 38, identifier:totalAN; 39, call; 40, identifier:all_atomtypes; 41, None; 42, expression_statement; 43, block; 44, identifier:atomtype_lst; 45, call; 46, identifier:n_atoms_per_type_lst; 47, list; 48, identifier:pos_lst; 49, list; 50, expression_statement; 51, expression_statement; 52, expression_statement; 53, comment:# store data in lists; 54, expression_statement; 55, expression_statement; 56, identifier:typeNs; 57, identifier:n_atoms_per_type_lst; 58, identifier:Ntypes; 59, call; 60, identifier:Apos; 61, call; 62, identifier:Apos; 63, identifier:typeNs; 64, identifier:Ntypes; 65, identifier:atomtype_lst; 66, identifier:totalAN; 67, identifier:len; 68, argument_list; 69, assignment; 70, expression_statement; 71, attribute; 72, argument_list; 73, assignment; 74, assignment; 75, assignment; 76, call; 77, call; 78, identifier:len; 79, argument_list; 80, attribute; 81, argument_list; 82, identifier:obj; 83, identifier:atomtype_set; 84, call; 85, assignment; 86, identifier:np; 87, identifier:sort; 88, call; 89, identifier:condition; 90, comparison_operator:obj.get_atomic_numbers() == atomtype; 91, identifier:pos_onetype; 92, subscript; 93, identifier:n_onetype; 94, subscript; 95, attribute; 96, argument_list; 97, attribute; 98, argument_list; 99, identifier:n_atoms_per_type_lst; 100, call; 101, identifier:ravel; 102, identifier:set; 103, argument_list; 104, identifier:atomtype_set; 105, call; 106, identifier:list; 107, argument_list; 108, call; 109, identifier:atomtype; 110, call; 111, identifier:condition; 112, attribute; 113, integer:0; 114, identifier:pos_lst; 115, identifier:append; 116, identifier:pos_onetype; 117, identifier:n_atoms_per_type_lst; 118, identifier:append; 119, identifier:n_onetype; 120, attribute; 121, argument_list; 122, identifier:all_atomtypes; 123, identifier:set; 124, argument_list; 125, identifier:atomtype_set; 126, attribute; 127, argument_list; 128, attribute; 129, argument_list; 130, identifier:pos_onetype; 131, identifier:shape; 132, identifier:np; 133, identifier:concatenate; 134, identifier:pos_lst; 135, call; 136, identifier:obj; 137, identifier:get_atomic_numbers; 138, identifier:obj; 139, identifier:get_positions; 140, attribute; 141, argument_list; 142, identifier:obj; 143, identifier:get_atomic_numbers
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 6, 20; 6, 21; 7, 22; 9, 23; 10, 24; 10, 25; 10, 26; 11, 27; 12, 28; 13, 29; 14, 30; 14, 31; 14, 32; 15, 33; 16, 34; 17, 35; 18, 36; 19, 37; 23, 38; 23, 39; 24, 40; 24, 41; 25, 42; 26, 43; 27, 44; 27, 45; 28, 46; 28, 47; 29, 48; 29, 49; 32, 50; 32, 51; 32, 52; 32, 53; 32, 54; 32, 55; 33, 56; 33, 57; 34, 58; 34, 59; 36, 60; 36, 61; 37, 62; 37, 63; 37, 64; 37, 65; 37, 66; 39, 67; 39, 68; 42, 69; 43, 70; 45, 71; 45, 72; 50, 73; 51, 74; 52, 75; 54, 76; 55, 77; 59, 78; 59, 79; 61, 80; 61, 81; 68, 82; 69, 83; 69, 84; 70, 85; 71, 86; 71, 87; 72, 88; 73, 89; 73, 90; 74, 91; 74, 92; 75, 93; 75, 94; 76, 95; 76, 96; 77, 97; 77, 98; 79, 99; 80, 100; 80, 101; 84, 102; 84, 103; 85, 104; 85, 105; 88, 106; 88, 107; 90, 108; 90, 109; 92, 110; 92, 111; 94, 112; 94, 113; 95, 114; 95, 115; 96, 116; 97, 117; 97, 118; 98, 119; 100, 120; 100, 121; 103, 122; 105, 123; 105, 124; 107, 125; 108, 126; 108, 127; 110, 128; 110, 129; 112, 130; 112, 131; 120, 132; 120, 133; 121, 134; 124, 135; 126, 136; 126, 137; 128, 138; 128, 139; 135, 140; 135, 141; 140, 142; 140, 143
def _format_ase2clusgeo(obj, all_atomtypes=None): """ Takes an ase Atoms object and returns numpy arrays and integers which are read by the internal clusgeo. Apos is currently a flattened out numpy array Args: obj(): all_atomtypes(): sort(): """ #atoms metadata totalAN = len(obj) if all_atomtypes is not None: atomtype_set = set(all_atomtypes) else: atomtype_set = set(obj.get_atomic_numbers()) atomtype_lst = np.sort(list(atomtype_set)) n_atoms_per_type_lst = [] pos_lst = [] for atomtype in atomtype_lst: condition = obj.get_atomic_numbers() == atomtype pos_onetype = obj.get_positions()[condition] n_onetype = pos_onetype.shape[0] # store data in lists pos_lst.append(pos_onetype) n_atoms_per_type_lst.append(n_onetype) typeNs = n_atoms_per_type_lst Ntypes = len(n_atoms_per_type_lst) atomtype_lst Apos = np.concatenate(pos_lst).ravel() return Apos, typeNs, Ntypes, atomtype_lst, totalAN
0, module; 1, function_definition; 2, function_name:sort_loading_order; 3, parameters; 4, block; 5, identifier:step_files; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, comment:"""Sort step files into correct loading order. The correct loading order is first tools, then workflows without subworkflows, and then workflows with subworkflows. This order is required to avoid error messages when a working directory is used. """; 13, assignment; 14, assignment; 15, assignment; 16, identifier:f; 17, identifier:step_files; 18, comment:# assume that urls are tools; 19, block; 20, binary_operator:tools + workflows + workflows_with_subworkflows; 21, identifier:tools; 22, list; 23, identifier:workflows; 24, list; 25, identifier:workflows_with_subworkflows; 26, list; 27, if_statement; 28, binary_operator:tools + workflows; 29, identifier:workflows_with_subworkflows; 30, boolean_operator; 31, block; 32, else_clause; 33, identifier:tools; 34, identifier:workflows; 35, call; 36, call; 37, expression_statement; 38, block; 39, attribute; 40, argument_list; 41, attribute; 42, argument_list; 43, call; 44, expression_statement; 45, if_statement; 46, identifier:f; 47, identifier:startswith; 48, string; 49, identifier:f; 50, identifier:startswith; 51, string; 52, attribute; 53, argument_list; 54, assignment; 55, comparison_operator:obj.get('class', '') == 'Workflow'; 56, block; 57, else_clause; 58, string_content:http://; 59, string_content:https://; 60, identifier:tools; 61, identifier:append; 62, identifier:f; 63, identifier:obj; 64, call; 65, call; 66, string; 67, if_statement; 68, block; 69, identifier:load_yaml; 70, argument_list; 71, attribute; 72, argument_list; 73, string_content:Workflow; 74, comparison_operator:'requirements' in obj.keys(); 75, block; 76, else_clause; 77, expression_statement; 78, identifier:f; 79, identifier:obj; 80, identifier:get; 81, string; 82, string; 83, string; 84, call; 85, expression_statement; 86, if_statement; 87, block; 88, call; 89, string_content:class; 90, string_content:requirements; 91, attribute; 92, argument_list; 93, assignment; 94, comparison_operator:subw in obj['requirements']; 95, block; 96, else_clause; 97, expression_statement; 98, attribute; 99, argument_list; 100, identifier:obj; 101, identifier:keys; 102, identifier:subw; 103, dictionary; 104, identifier:subw; 105, subscript; 106, expression_statement; 107, block; 108, call; 109, identifier:tools; 110, identifier:append; 111, identifier:f; 112, pair; 113, identifier:obj; 114, string; 115, call; 116, expression_statement; 117, attribute; 118, argument_list; 119, string; 120, string; 121, string_content:requirements; 122, attribute; 123, argument_list; 124, call; 125, identifier:workflows; 126, identifier:append; 127, identifier:f; 128, string_content:class; 129, string_content:SubworkflowFeatureRequirement; 130, identifier:workflows_with_subworkflows; 131, identifier:append; 132, identifier:f; 133, attribute; 134, argument_list; 135, identifier:workflows; 136, identifier:append; 137, identifier:f
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 6, 12; 7, 13; 8, 14; 9, 15; 10, 16; 10, 17; 10, 18; 10, 19; 11, 20; 13, 21; 13, 22; 14, 23; 14, 24; 15, 25; 15, 26; 19, 27; 20, 28; 20, 29; 27, 30; 27, 31; 27, 32; 28, 33; 28, 34; 30, 35; 30, 36; 31, 37; 32, 38; 35, 39; 35, 40; 36, 41; 36, 42; 37, 43; 38, 44; 38, 45; 39, 46; 39, 47; 40, 48; 41, 49; 41, 50; 42, 51; 43, 52; 43, 53; 44, 54; 45, 55; 45, 56; 45, 57; 48, 58; 51, 59; 52, 60; 52, 61; 53, 62; 54, 63; 54, 64; 55, 65; 55, 66; 56, 67; 57, 68; 64, 69; 64, 70; 65, 71; 65, 72; 66, 73; 67, 74; 67, 75; 67, 76; 68, 77; 70, 78; 71, 79; 71, 80; 72, 81; 72, 82; 74, 83; 74, 84; 75, 85; 75, 86; 76, 87; 77, 88; 81, 89; 83, 90; 84, 91; 84, 92; 85, 93; 86, 94; 86, 95; 86, 96; 87, 97; 88, 98; 88, 99; 91, 100; 91, 101; 93, 102; 93, 103; 94, 104; 94, 105; 95, 106; 96, 107; 97, 108; 98, 109; 98, 110; 99, 111; 103, 112; 105, 113; 105, 114; 106, 115; 107, 116; 108, 117; 108, 118; 112, 119; 112, 120; 114, 121; 115, 122; 115, 123; 116, 124; 117, 125; 117, 126; 118, 127; 119, 128; 120, 129; 122, 130; 122, 131; 123, 132; 124, 133; 124, 134; 133, 135; 133, 136; 134, 137
def sort_loading_order(step_files): """Sort step files into correct loading order. The correct loading order is first tools, then workflows without subworkflows, and then workflows with subworkflows. This order is required to avoid error messages when a working directory is used. """ tools = [] workflows = [] workflows_with_subworkflows = [] for f in step_files: # assume that urls are tools if f.startswith('http://') or f.startswith('https://'): tools.append(f) else: obj = load_yaml(f) if obj.get('class', '') == 'Workflow': if 'requirements' in obj.keys(): subw = {'class': 'SubworkflowFeatureRequirement'} if subw in obj['requirements']: workflows_with_subworkflows.append(f) else: workflows.append(f) else: workflows.append(f) else: tools.append(f) return tools + workflows + workflows_with_subworkflows
0, module; 1, function_definition; 2, function_name:get_contents_dir; 3, parameters; 4, block; 5, identifier:node; 6, expression_statement; 7, expression_statement; 8, for_statement; 9, return_statement; 10, comment:"""Return content signatures and names of all our children separated by new-lines. Ensure that the nodes are sorted."""; 11, assignment; 12, identifier:n; 13, call; 14, block; 15, call; 16, identifier:contents; 17, list; 18, identifier:sorted; 19, argument_list; 20, expression_statement; 21, attribute; 22, argument_list; 23, call; 24, keyword_argument; 25, call; 26, string; 27, identifier:join; 28, identifier:contents; 29, attribute; 30, argument_list; 31, identifier:key; 32, lambda; 33, attribute; 34, argument_list; 35, identifier:node; 36, identifier:children; 37, lambda_parameters; 38, attribute; 39, identifier:contents; 40, identifier:append; 41, binary_operator:'%s %s\n' % (n.get_csig(), n.name); 42, identifier:t; 43, identifier:t; 44, identifier:name; 45, string; 46, tuple; 47, string_content; 48, call; 49, attribute; 50, escape_sequence:\n; 51, attribute; 52, argument_list; 53, identifier:n; 54, identifier:name; 55, identifier:n; 56, identifier:get_csig
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 6, 10; 7, 11; 8, 12; 8, 13; 8, 14; 9, 15; 11, 16; 11, 17; 13, 18; 13, 19; 14, 20; 15, 21; 15, 22; 19, 23; 19, 24; 20, 25; 21, 26; 21, 27; 22, 28; 23, 29; 23, 30; 24, 31; 24, 32; 25, 33; 25, 34; 29, 35; 29, 36; 32, 37; 32, 38; 33, 39; 33, 40; 34, 41; 37, 42; 38, 43; 38, 44; 41, 45; 41, 46; 45, 47; 46, 48; 46, 49; 47, 50; 48, 51; 48, 52; 49, 53; 49, 54; 51, 55; 51, 56
def get_contents_dir(node): """Return content signatures and names of all our children separated by new-lines. Ensure that the nodes are sorted.""" contents = [] for n in sorted(node.children(), key=lambda t: t.name): contents.append('%s %s\n' % (n.get_csig(), n.name)) return ''.join(contents)
0, module; 1, function_definition; 2, function_name:sort_nodes; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, for_statement; 10, comment:# Now that we have our dependency tree properly built, topologically; 11, comment:# sort the nodes and reorder them.; 12, expression_statement; 13, expression_statement; 14, comment:#Check root nodes all topographically sorted to the beginning; 15, for_statement; 16, comment:"""Topologically sort all of our nodes. Topologically sorting our nodes makes nodes that are inputs to other nodes come first in the list of nodes. This is important to do before programming a sensorgraph into an embedded device whose engine assumes a topologically sorted graph. The sorting is done in place on self.nodes """; 17, assignment; 18, assignment; 19, pattern_list; 20, call; 21, block; 22, assignment; 23, assignment; 24, identifier:root; 25, attribute; 26, block; 27, identifier:node_map; 28, dictionary_comprehension; 29, identifier:node_deps; 30, dictionary; 31, identifier:node; 32, identifier:inputs; 33, identifier:_outputs; 34, attribute; 35, argument_list; 36, expression_statement; 37, expression_statement; 38, expression_statement; 39, identifier:node_order; 40, call; 41, attribute; 42, list_comprehension; 43, identifier:self; 44, identifier:roots; 45, if_statement; 46, pair; 47, for_in_clause; 48, identifier:self; 49, identifier:iterate_bfs; 50, assignment; 51, assignment; 52, assignment; 53, identifier:toposort_flatten; 54, argument_list; 55, identifier:self; 56, identifier:nodes; 57, subscript; 58, for_in_clause; 59, comparison_operator:root not in self.nodes[0:len(self.roots)]; 60, block; 61, call; 62, identifier:i; 63, pattern_list; 64, call; 65, identifier:node_index; 66, subscript; 67, identifier:deps; 68, set_comprehension; 69, subscript; 70, identifier:deps; 71, identifier:node_deps; 72, attribute; 73, identifier:x; 74, identifier:x; 75, identifier:node_order; 76, identifier:root; 77, subscript; 78, raise_statement; 79, identifier:id; 80, argument_list; 81, identifier:i; 82, identifier:node; 83, identifier:enumerate; 84, argument_list; 85, identifier:node_map; 86, call; 87, subscript; 88, for_in_clause; 89, identifier:node_deps; 90, identifier:node_index; 91, identifier:self; 92, identifier:nodes; 93, attribute; 94, slice; 95, call; 96, identifier:node; 97, attribute; 98, identifier:id; 99, argument_list; 100, identifier:node_map; 101, call; 102, identifier:x; 103, identifier:inputs; 104, identifier:self; 105, identifier:nodes; 106, integer:0; 107, call; 108, identifier:NodeConnectionError; 109, argument_list; 110, identifier:self; 111, identifier:nodes; 112, identifier:node; 113, identifier:id; 114, argument_list; 115, identifier:len; 116, argument_list; 117, string:"Inputs not sorted in the beginning"; 118, keyword_argument; 119, keyword_argument; 120, identifier:x; 121, attribute; 122, identifier:node; 123, call; 124, identifier:node_position; 125, call; 126, identifier:self; 127, identifier:roots; 128, identifier:str; 129, argument_list; 130, attribute; 131, argument_list; 132, identifier:root; 133, attribute; 134, identifier:index; 135, identifier:root; 136, identifier:self; 137, identifier:nodes
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 6, 16; 7, 17; 8, 18; 9, 19; 9, 20; 9, 21; 12, 22; 13, 23; 15, 24; 15, 25; 15, 26; 17, 27; 17, 28; 18, 29; 18, 30; 19, 31; 19, 32; 19, 33; 20, 34; 20, 35; 21, 36; 21, 37; 21, 38; 22, 39; 22, 40; 23, 41; 23, 42; 25, 43; 25, 44; 26, 45; 28, 46; 28, 47; 34, 48; 34, 49; 36, 50; 37, 51; 38, 52; 40, 53; 40, 54; 41, 55; 41, 56; 42, 57; 42, 58; 45, 59; 45, 60; 46, 61; 46, 62; 47, 63; 47, 64; 50, 65; 50, 66; 51, 67; 51, 68; 52, 69; 52, 70; 54, 71; 57, 72; 57, 73; 58, 74; 58, 75; 59, 76; 59, 77; 60, 78; 61, 79; 61, 80; 63, 81; 63, 82; 64, 83; 64, 84; 66, 85; 66, 86; 68, 87; 68, 88; 69, 89; 69, 90; 72, 91; 72, 92; 77, 93; 77, 94; 78, 95; 80, 96; 84, 97; 86, 98; 86, 99; 87, 100; 87, 101; 88, 102; 88, 103; 93, 104; 93, 105; 94, 106; 94, 107; 95, 108; 95, 109; 97, 110; 97, 111; 99, 112; 101, 113; 101, 114; 107, 115; 107, 116; 109, 117; 109, 118; 109, 119; 114, 120; 116, 121; 118, 122; 118, 123; 119, 124; 119, 125; 121, 126; 121, 127; 123, 128; 123, 129; 125, 130; 125, 131; 129, 132; 130, 133; 130, 134; 131, 135; 133, 136; 133, 137
def sort_nodes(self): """Topologically sort all of our nodes. Topologically sorting our nodes makes nodes that are inputs to other nodes come first in the list of nodes. This is important to do before programming a sensorgraph into an embedded device whose engine assumes a topologically sorted graph. The sorting is done in place on self.nodes """ node_map = {id(node): i for i, node in enumerate(self.nodes)} node_deps = {} for node, inputs, _outputs in self.iterate_bfs(): node_index = node_map[id(node)] deps = {node_map[id(x)] for x in inputs} node_deps[node_index] = deps # Now that we have our dependency tree properly built, topologically # sort the nodes and reorder them. node_order = toposort_flatten(node_deps) self.nodes = [self.nodes[x] for x in node_order] #Check root nodes all topographically sorted to the beginning for root in self.roots: if root not in self.nodes[0:len(self.roots)]: raise NodeConnectionError("Inputs not sorted in the beginning", node=str(root), node_position=self.nodes.index(root))
0, module; 1, function_definition; 2, function_name:GenerateHelpText; 3, parameters; 4, block; 5, identifier:self; 6, identifier:env; 7, default_parameter; 8, expression_statement; 9, if_statement; 10, function_definition; 11, expression_statement; 12, return_statement; 13, identifier:sort; 14, None; 15, comment:""" Generate the help text for the options. env - an environment that is used to get the current values of the options. cmp - Either a function as follows: The specific sort function should take two arguments and return -1, 0 or 1 or a boolean to indicate if it should be sorted. """; 16, call; 17, block; 18, elif_clause; 19, else_clause; 20, function_name:format; 21, parameters; 22, block; 23, assignment; 24, call; 25, identifier:callable; 26, argument_list; 27, expression_statement; 28, comparison_operator:sort is True; 29, block; 30, block; 31, identifier:opt; 32, default_parameter; 33, default_parameter; 34, if_statement; 35, return_statement; 36, identifier:lines; 37, list_comprehension; 38, attribute; 39, argument_list; 40, identifier:sort; 41, assignment; 42, identifier:sort; 43, True; 44, expression_statement; 45, expression_statement; 46, identifier:self; 47, identifier:self; 48, identifier:env; 49, identifier:env; 50, comparison_operator:opt.key in env; 51, block; 52, else_clause; 53, call; 54, identifier:_f; 55, for_in_clause; 56, if_clause; 57, string; 58, identifier:join; 59, identifier:lines; 60, identifier:options; 61, call; 62, assignment; 63, assignment; 64, attribute; 65, identifier:env; 66, expression_statement; 67, block; 68, attribute; 69, argument_list; 70, identifier:_f; 71, call; 72, identifier:_f; 73, identifier:sorted; 74, argument_list; 75, identifier:options; 76, call; 77, identifier:options; 78, attribute; 79, identifier:opt; 80, identifier:key; 81, assignment; 82, expression_statement; 83, identifier:self; 84, identifier:FormatVariableHelpText; 85, identifier:env; 86, attribute; 87, attribute; 88, attribute; 89, identifier:actual; 90, attribute; 91, identifier:map; 92, argument_list; 93, attribute; 94, keyword_argument; 95, identifier:sorted; 96, argument_list; 97, identifier:self; 98, identifier:options; 99, identifier:actual; 100, call; 101, assignment; 102, identifier:opt; 103, identifier:key; 104, identifier:opt; 105, identifier:help; 106, identifier:opt; 107, identifier:default; 108, identifier:opt; 109, identifier:aliases; 110, identifier:format; 111, identifier:options; 112, identifier:self; 113, identifier:options; 114, identifier:key; 115, call; 116, attribute; 117, keyword_argument; 118, attribute; 119, argument_list; 120, identifier:actual; 121, None; 122, identifier:cmp_to_key; 123, argument_list; 124, identifier:self; 125, identifier:options; 126, identifier:key; 127, lambda; 128, identifier:env; 129, identifier:subst; 130, binary_operator:'${%s}' % opt.key; 131, lambda; 132, lambda_parameters; 133, attribute; 134, string; 135, attribute; 136, lambda_parameters; 137, call; 138, identifier:x; 139, identifier:x; 140, identifier:key; 141, string_content:${%s}; 142, identifier:opt; 143, identifier:key; 144, identifier:x; 145, identifier:y; 146, identifier:sort; 147, argument_list; 148, attribute; 149, attribute; 150, identifier:x; 151, identifier:key; 152, identifier:y; 153, identifier:key
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 7, 13; 7, 14; 8, 15; 9, 16; 9, 17; 9, 18; 9, 19; 10, 20; 10, 21; 10, 22; 11, 23; 12, 24; 16, 25; 16, 26; 17, 27; 18, 28; 18, 29; 19, 30; 21, 31; 21, 32; 21, 33; 22, 34; 22, 35; 23, 36; 23, 37; 24, 38; 24, 39; 26, 40; 27, 41; 28, 42; 28, 43; 29, 44; 30, 45; 32, 46; 32, 47; 33, 48; 33, 49; 34, 50; 34, 51; 34, 52; 35, 53; 37, 54; 37, 55; 37, 56; 38, 57; 38, 58; 39, 59; 41, 60; 41, 61; 44, 62; 45, 63; 50, 64; 50, 65; 51, 66; 52, 67; 53, 68; 53, 69; 55, 70; 55, 71; 56, 72; 61, 73; 61, 74; 62, 75; 62, 76; 63, 77; 63, 78; 64, 79; 64, 80; 66, 81; 67, 82; 68, 83; 68, 84; 69, 85; 69, 86; 69, 87; 69, 88; 69, 89; 69, 90; 71, 91; 71, 92; 74, 93; 74, 94; 76, 95; 76, 96; 78, 97; 78, 98; 81, 99; 81, 100; 82, 101; 86, 102; 86, 103; 87, 104; 87, 105; 88, 106; 88, 107; 90, 108; 90, 109; 92, 110; 92, 111; 93, 112; 93, 113; 94, 114; 94, 115; 96, 116; 96, 117; 100, 118; 100, 119; 101, 120; 101, 121; 115, 122; 115, 123; 116, 124; 116, 125; 117, 126; 117, 127; 118, 128; 118, 129; 119, 130; 123, 131; 127, 132; 127, 133; 130, 134; 130, 135; 131, 136; 131, 137; 132, 138; 133, 139; 133, 140; 134, 141; 135, 142; 135, 143; 136, 144; 136, 145; 137, 146; 137, 147; 147, 148; 147, 149; 148, 150; 148, 151; 149, 152; 149, 153
def GenerateHelpText(self, env, sort=None): """ Generate the help text for the options. env - an environment that is used to get the current values of the options. cmp - Either a function as follows: The specific sort function should take two arguments and return -1, 0 or 1 or a boolean to indicate if it should be sorted. """ if callable(sort): options = sorted(self.options, key=cmp_to_key(lambda x,y: sort(x.key,y.key))) elif sort is True: options = sorted(self.options, key=lambda x: x.key) else: options = self.options def format(opt, self=self, env=env): if opt.key in env: actual = env.subst('${%s}' % opt.key) else: actual = None return self.FormatVariableHelpText(env, opt.key, opt.help, opt.default, actual, opt.aliases) lines = [_f for _f in map(format, options) if _f] return ''.join(lines)
0, module; 1, function_definition; 2, function_name:unique; 3, parameters; 4, block; 5, identifier:s; 6, expression_statement; 7, expression_statement; 8, if_statement; 9, comment:# Try using a dict first, as that's the fastest and will usually; 10, comment:# work. If it doesn't work, it will usually fail quickly, so it; 11, comment:# usually doesn't cost much to *try* it. It requires that all the; 12, comment:# sequence elements be hashable, and support equality comparison.; 13, expression_statement; 14, try_statement; 15, delete_statement; 16, comment:# We can't hash all the elements. Second fastest is to sort,; 17, comment:# which brings the equal elements together; then duplicates are; 18, comment:# easy to weed out in a single pass.; 19, comment:# NOTE: Python's list.sort() was designed to be efficient in the; 20, comment:# presence of many duplicate elements. This isn't true of all; 21, comment:# sort functions in all languages or libraries, so this approach; 22, comment:# is more effective in Python than it may be elsewhere.; 23, try_statement; 24, delete_statement; 25, comment:# Brute force is all that's left.; 26, expression_statement; 27, for_statement; 28, return_statement; 29, comment:"""Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """; 30, assignment; 31, comparison_operator:n == 0; 32, block; 33, assignment; 34, block; 35, except_clause; 36, else_clause; 37, identifier:u; 38, block; 39, except_clause; 40, else_clause; 41, identifier:t; 42, assignment; 43, identifier:x; 44, identifier:s; 45, block; 46, identifier:u; 47, identifier:n; 48, call; 49, identifier:n; 50, integer:0; 51, return_statement; 52, identifier:u; 53, dictionary; 54, for_statement; 55, identifier:TypeError; 56, block; 57, block; 58, expression_statement; 59, identifier:TypeError; 60, block; 61, block; 62, identifier:u; 63, list; 64, if_statement; 65, identifier:len; 66, argument_list; 67, list; 68, identifier:x; 69, identifier:s; 70, block; 71, pass_statement; 72, comment:# move on to the next method; 73, return_statement; 74, assignment; 75, pass_statement; 76, comment:# move on to the next method; 77, assert_statement; 78, expression_statement; 79, expression_statement; 80, while_statement; 81, return_statement; 82, comparison_operator:x not in u; 83, block; 84, identifier:s; 85, expression_statement; 86, call; 87, identifier:t; 88, call; 89, comparison_operator:n > 0; 90, assignment; 91, assignment; 92, comparison_operator:i < n; 93, block; 94, subscript; 95, identifier:x; 96, identifier:u; 97, expression_statement; 98, assignment; 99, identifier:list; 100, argument_list; 101, identifier:sorted; 102, argument_list; 103, identifier:n; 104, integer:0; 105, identifier:last; 106, subscript; 107, identifier:lasti; 108, assignment; 109, identifier:i; 110, identifier:n; 111, if_statement; 112, expression_statement; 113, identifier:t; 114, slice; 115, call; 116, subscript; 117, integer:1; 118, call; 119, identifier:s; 120, identifier:t; 121, integer:0; 122, identifier:i; 123, integer:1; 124, comparison_operator:t[i] != last; 125, block; 126, assignment; 127, identifier:lasti; 128, attribute; 129, argument_list; 130, identifier:u; 131, identifier:x; 132, attribute; 133, argument_list; 134, subscript; 135, identifier:last; 136, expression_statement; 137, expression_statement; 138, identifier:i; 139, binary_operator:i + 1; 140, identifier:u; 141, identifier:append; 142, identifier:x; 143, identifier:u; 144, identifier:keys; 145, identifier:t; 146, identifier:i; 147, assignment; 148, assignment; 149, identifier:i; 150, integer:1; 151, subscript; 152, assignment; 153, identifier:lasti; 154, binary_operator:lasti + 1; 155, identifier:t; 156, identifier:lasti; 157, identifier:last; 158, subscript; 159, identifier:lasti; 160, integer:1; 161, identifier:t; 162, identifier:i
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 4, 23; 4, 24; 4, 25; 4, 26; 4, 27; 4, 28; 6, 29; 7, 30; 8, 31; 8, 32; 13, 33; 14, 34; 14, 35; 14, 36; 15, 37; 23, 38; 23, 39; 23, 40; 24, 41; 26, 42; 27, 43; 27, 44; 27, 45; 28, 46; 30, 47; 30, 48; 31, 49; 31, 50; 32, 51; 33, 52; 33, 53; 34, 54; 35, 55; 35, 56; 36, 57; 38, 58; 39, 59; 39, 60; 40, 61; 42, 62; 42, 63; 45, 64; 48, 65; 48, 66; 51, 67; 54, 68; 54, 69; 54, 70; 56, 71; 56, 72; 57, 73; 58, 74; 60, 75; 60, 76; 61, 77; 61, 78; 61, 79; 61, 80; 61, 81; 64, 82; 64, 83; 66, 84; 70, 85; 73, 86; 74, 87; 74, 88; 77, 89; 78, 90; 79, 91; 80, 92; 80, 93; 81, 94; 82, 95; 82, 96; 83, 97; 85, 98; 86, 99; 86, 100; 88, 101; 88, 102; 89, 103; 89, 104; 90, 105; 90, 106; 91, 107; 91, 108; 92, 109; 92, 110; 93, 111; 93, 112; 94, 113; 94, 114; 97, 115; 98, 116; 98, 117; 100, 118; 102, 119; 106, 120; 106, 121; 108, 122; 108, 123; 111, 124; 111, 125; 112, 126; 114, 127; 115, 128; 115, 129; 116, 130; 116, 131; 118, 132; 118, 133; 124, 134; 124, 135; 125, 136; 125, 137; 126, 138; 126, 139; 128, 140; 128, 141; 129, 142; 132, 143; 132, 144; 134, 145; 134, 146; 136, 147; 137, 148; 139, 149; 139, 150; 147, 151; 147, 152; 148, 153; 148, 154; 151, 155; 151, 156; 152, 157; 152, 158; 154, 159; 154, 160; 158, 161; 158, 162
def unique(s): """Return a list of the elements in s, but without duplicates. For example, unique([1,2,3,1,2,3]) is some permutation of [1,2,3], unique("abcabc") some permutation of ["a", "b", "c"], and unique(([1, 2], [2, 3], [1, 2])) some permutation of [[2, 3], [1, 2]]. For best speed, all sequence elements should be hashable. Then unique() will usually work in linear time. If not possible, the sequence elements should enjoy a total ordering, and if list(s).sort() doesn't raise TypeError it's assumed that they do enjoy a total ordering. Then unique() will usually work in O(N*log2(N)) time. If that's not possible either, the sequence elements must support equality-testing. Then unique() will usually work in quadratic time. """ n = len(s) if n == 0: return [] # Try using a dict first, as that's the fastest and will usually # work. If it doesn't work, it will usually fail quickly, so it # usually doesn't cost much to *try* it. It requires that all the # sequence elements be hashable, and support equality comparison. u = {} try: for x in s: u[x] = 1 except TypeError: pass # move on to the next method else: return list(u.keys()) del u # We can't hash all the elements. Second fastest is to sort, # which brings the equal elements together; then duplicates are # easy to weed out in a single pass. # NOTE: Python's list.sort() was designed to be efficient in the # presence of many duplicate elements. This isn't true of all # sort functions in all languages or libraries, so this approach # is more effective in Python than it may be elsewhere. try: t = sorted(s) except TypeError: pass # move on to the next method else: assert n > 0 last = t[0] lasti = i = 1 while i < n: if t[i] != last: t[lasti] = last = t[i] lasti = lasti + 1 i = i + 1 return t[:lasti] del t # Brute force is all that's left. u = [] for x in s: if x not in u: u.append(x) return u
0, module; 1, function_definition; 2, function_name:get_text_contents; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, try_statement; 8, comment:"""Fetch the decoded text contents of a Unicode encoded Entry. Since this should return the text contents from the file system, we check to see into what sort of subclass we should morph this Entry."""; 9, block; 10, except_clause; 11, else_clause; 12, expression_statement; 13, attribute; 14, comment:# There was nothing on disk with which to disambiguate; 15, comment:# this entry. Leave it as an Entry, but return a null; 16, comment:# string so calls to get_text_contents() in emitters and; 17, comment:# the like (e.g. in qt.py) don't have to disambiguate by; 18, comment:# hand or catch the exception.; 19, block; 20, block; 21, assignment; 22, attribute; 23, identifier:UserError; 24, return_statement; 25, return_statement; 26, identifier:self; 27, call; 28, identifier:SCons; 29, identifier:Errors; 30, string; 31, call; 32, attribute; 33, argument_list; 34, attribute; 35, argument_list; 36, identifier:self; 37, identifier:disambiguate; 38, keyword_argument; 39, identifier:self; 40, identifier:get_text_contents; 41, identifier:must_exist; 42, integer:1
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 6, 8; 7, 9; 7, 10; 7, 11; 9, 12; 10, 13; 10, 14; 10, 15; 10, 16; 10, 17; 10, 18; 10, 19; 11, 20; 12, 21; 13, 22; 13, 23; 19, 24; 20, 25; 21, 26; 21, 27; 22, 28; 22, 29; 24, 30; 25, 31; 27, 32; 27, 33; 31, 34; 31, 35; 32, 36; 32, 37; 33, 38; 34, 39; 34, 40; 38, 41; 38, 42
def get_text_contents(self): """Fetch the decoded text contents of a Unicode encoded Entry. Since this should return the text contents from the file system, we check to see into what sort of subclass we should morph this Entry.""" try: self = self.disambiguate(must_exist=1) except SCons.Errors.UserError: # There was nothing on disk with which to disambiguate # this entry. Leave it as an Entry, but return a null # string so calls to get_text_contents() in emitters and # the like (e.g. in qt.py) don't have to disambiguate by # hand or catch the exception. return '' else: return self.get_text_contents()
0, module; 1, function_definition; 2, function_name:_order_pases; 3, parameters; 4, block; 5, identifier:self; 6, identifier:passes; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, for_statement; 11, return_statement; 12, comment:"""Topologically sort optimization passes. This ensures that the resulting passes are run in order respecting before/after constraints. Args: passes (iterable): An iterable of pass names that should be included in the optimization passes run. """; 13, assignment; 14, assignment; 15, identifier:opt; 16, identifier:passes; 17, block; 18, call; 19, identifier:passes; 20, call; 21, identifier:pass_deps; 22, dictionary; 23, expression_statement; 24, if_statement; 25, for_statement; 26, comment:# For passes that we are before, we may need to; 27, comment:# preemptively add them to the list early; 28, for_statement; 29, identifier:toposort_flatten; 30, argument_list; 31, identifier:set; 32, argument_list; 33, assignment; 34, comparison_operator:opt not in pass_deps; 35, block; 36, identifier:after_pass; 37, identifier:after; 38, block; 39, identifier:other; 40, identifier:before; 41, block; 42, identifier:pass_deps; 43, identifier:passes; 44, pattern_list; 45, subscript; 46, identifier:opt; 47, identifier:pass_deps; 48, expression_statement; 49, expression_statement; 50, if_statement; 51, if_statement; 52, expression_statement; 53, identifier:_; 54, identifier:before; 55, identifier:after; 56, attribute; 57, identifier:opt; 58, assignment; 59, call; 60, comparison_operator:other not in passes; 61, block; 62, comparison_operator:other not in pass_deps; 63, block; 64, call; 65, identifier:self; 66, identifier:_known_passes; 67, subscript; 68, call; 69, attribute; 70, argument_list; 71, identifier:other; 72, identifier:passes; 73, continue_statement; 74, identifier:other; 75, identifier:pass_deps; 76, expression_statement; 77, attribute; 78, argument_list; 79, identifier:pass_deps; 80, identifier:opt; 81, identifier:set; 82, argument_list; 83, subscript; 84, identifier:add; 85, identifier:after_pass; 86, assignment; 87, subscript; 88, identifier:add; 89, identifier:opt; 90, identifier:pass_deps; 91, identifier:opt; 92, subscript; 93, call; 94, identifier:pass_deps; 95, identifier:other; 96, identifier:pass_deps; 97, identifier:other; 98, identifier:set; 99, argument_list
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 7, 12; 8, 13; 9, 14; 10, 15; 10, 16; 10, 17; 11, 18; 13, 19; 13, 20; 14, 21; 14, 22; 17, 23; 17, 24; 17, 25; 17, 26; 17, 27; 17, 28; 18, 29; 18, 30; 20, 31; 20, 32; 23, 33; 24, 34; 24, 35; 25, 36; 25, 37; 25, 38; 28, 39; 28, 40; 28, 41; 30, 42; 32, 43; 33, 44; 33, 45; 34, 46; 34, 47; 35, 48; 38, 49; 41, 50; 41, 51; 41, 52; 44, 53; 44, 54; 44, 55; 45, 56; 45, 57; 48, 58; 49, 59; 50, 60; 50, 61; 51, 62; 51, 63; 52, 64; 56, 65; 56, 66; 58, 67; 58, 68; 59, 69; 59, 70; 60, 71; 60, 72; 61, 73; 62, 74; 62, 75; 63, 76; 64, 77; 64, 78; 67, 79; 67, 80; 68, 81; 68, 82; 69, 83; 69, 84; 70, 85; 76, 86; 77, 87; 77, 88; 78, 89; 83, 90; 83, 91; 86, 92; 86, 93; 87, 94; 87, 95; 92, 96; 92, 97; 93, 98; 93, 99
def _order_pases(self, passes): """Topologically sort optimization passes. This ensures that the resulting passes are run in order respecting before/after constraints. Args: passes (iterable): An iterable of pass names that should be included in the optimization passes run. """ passes = set(passes) pass_deps = {} for opt in passes: _, before, after = self._known_passes[opt] if opt not in pass_deps: pass_deps[opt] = set() for after_pass in after: pass_deps[opt].add(after_pass) # For passes that we are before, we may need to # preemptively add them to the list early for other in before: if other not in passes: continue if other not in pass_deps: pass_deps[other] = set() pass_deps[other].add(opt) return toposort_flatten(pass_deps)
0, module; 1, function_definition; 2, function_name:local_services; 3, parameters; 4, block; 5, identifier:self; 6, expression_statement; 7, if_statement; 8, try_statement; 9, comment:"""Get a list of id, name pairs for all of the known synced services. This method is safe to call outside of the background event loop without any race condition. Internally it uses a thread-safe mutex to protect the local copies of supervisor data and ensure that it cannot change while this method is iterating over it. Returns: list (id, name): A list of tuples with id and service name sorted by id from low to high """; 10, not_operator; 11, block; 12, block; 13, finally_clause; 14, call; 15, expression_statement; 16, return_statement; 17, block; 18, attribute; 19, argument_list; 20, call; 21, call; 22, if_statement; 23, attribute; 24, identifier:inside_loop; 25, attribute; 26, argument_list; 27, identifier:sorted; 28, argument_list; 29, not_operator; 30, block; 31, identifier:self; 32, identifier:_loop; 33, attribute; 34, identifier:acquire; 35, list_comprehension; 36, keyword_argument; 37, call; 38, expression_statement; 39, identifier:self; 40, identifier:_state_lock; 41, tuple; 42, for_in_clause; 43, identifier:key; 44, lambda; 45, attribute; 46, argument_list; 47, call; 48, identifier:index; 49, identifier:name; 50, pattern_list; 51, call; 52, lambda_parameters; 53, subscript; 54, attribute; 55, identifier:inside_loop; 56, attribute; 57, argument_list; 58, identifier:index; 59, identifier:name; 60, attribute; 61, argument_list; 62, identifier:element; 63, identifier:element; 64, integer:0; 65, identifier:self; 66, identifier:_loop; 67, attribute; 68, identifier:release; 69, attribute; 70, identifier:items; 71, identifier:self; 72, identifier:_state_lock; 73, identifier:self; 74, identifier:_name_map
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 6, 9; 7, 10; 7, 11; 8, 12; 8, 13; 10, 14; 11, 15; 12, 16; 13, 17; 14, 18; 14, 19; 15, 20; 16, 21; 17, 22; 18, 23; 18, 24; 20, 25; 20, 26; 21, 27; 21, 28; 22, 29; 22, 30; 23, 31; 23, 32; 25, 33; 25, 34; 28, 35; 28, 36; 29, 37; 30, 38; 33, 39; 33, 40; 35, 41; 35, 42; 36, 43; 36, 44; 37, 45; 37, 46; 38, 47; 41, 48; 41, 49; 42, 50; 42, 51; 44, 52; 44, 53; 45, 54; 45, 55; 47, 56; 47, 57; 50, 58; 50, 59; 51, 60; 51, 61; 52, 62; 53, 63; 53, 64; 54, 65; 54, 66; 56, 67; 56, 68; 60, 69; 60, 70; 67, 71; 67, 72; 69, 73; 69, 74
def local_services(self): """Get a list of id, name pairs for all of the known synced services. This method is safe to call outside of the background event loop without any race condition. Internally it uses a thread-safe mutex to protect the local copies of supervisor data and ensure that it cannot change while this method is iterating over it. Returns: list (id, name): A list of tuples with id and service name sorted by id from low to high """ if not self._loop.inside_loop(): self._state_lock.acquire() try: return sorted([(index, name) for index, name in self._name_map.items()], key=lambda element: element[0]) finally: if not self._loop.inside_loop(): self._state_lock.release()
0, module; 1, function_definition; 2, function_name:format_script; 3, parameters; 4, block; 5, identifier:sensor_graph; 6, expression_statement; 7, expression_statement; 8, expression_statement; 9, expression_statement; 10, expression_statement; 11, for_statement; 12, for_statement; 13, for_statement; 14, expression_statement; 15, expression_statement; 16, for_statement; 17, comment:# If we have an app tag and version set program them in; 18, expression_statement; 19, expression_statement; 20, if_statement; 21, expression_statement; 22, return_statement; 23, comment:"""Create a binary script containing this sensor graph. This function produces a repeatable script by applying a known sorting order to all constants and config variables when iterating over those dictionaries. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: bytearray: The binary script data. """; 24, assignment; 25, call; 26, call; 27, call; 28, identifier:node; 29, attribute; 30, block; 31, identifier:streamer; 32, attribute; 33, block; 34, pattern_list; 35, call; 36, block; 37, call; 38, call; 39, identifier:slot; 40, call; 41, block; 42, assignment; 43, assignment; 44, comparison_operator:app_tag is not None; 45, block; 46, assignment; 47, call; 48, identifier:records; 49, list; 50, attribute; 51, argument_list; 52, attribute; 53, argument_list; 54, attribute; 55, argument_list; 56, identifier:sensor_graph; 57, identifier:nodes; 58, expression_statement; 59, identifier:sensor_graph; 60, identifier:streamers; 61, expression_statement; 62, identifier:stream; 63, identifier:value; 64, identifier:sorted; 65, argument_list; 66, expression_statement; 67, attribute; 68, argument_list; 69, attribute; 70, argument_list; 71, identifier:sorted; 72, argument_list; 73, for_statement; 74, identifier:app_tag; 75, call; 76, identifier:app_version; 77, call; 78, identifier:app_tag; 79, None; 80, expression_statement; 81, identifier:script; 82, call; 83, attribute; 84, argument_list; 85, identifier:records; 86, identifier:append; 87, call; 88, identifier:records; 89, identifier:append; 90, call; 91, identifier:records; 92, identifier:append; 93, call; 94, call; 95, call; 96, call; 97, keyword_argument; 98, call; 99, identifier:records; 100, identifier:append; 101, call; 102, identifier:records; 103, identifier:append; 104, call; 105, attribute; 106, keyword_argument; 107, identifier:config_id; 108, call; 109, block; 110, attribute; 111, argument_list; 112, attribute; 113, argument_list; 114, call; 115, identifier:UpdateScript; 116, argument_list; 117, identifier:script; 118, identifier:encode; 119, identifier:SetGraphOnlineRecord; 120, argument_list; 121, identifier:ClearDataRecord; 122, argument_list; 123, identifier:ResetGraphRecord; 124, argument_list; 125, attribute; 126, argument_list; 127, attribute; 128, argument_list; 129, attribute; 130, argument_list; 131, identifier:key; 132, lambda; 133, attribute; 134, argument_list; 135, identifier:PersistGraphRecord; 136, argument_list; 137, identifier:ClearConfigVariablesRecord; 138, argument_list; 139, identifier:sensor_graph; 140, identifier:config_database; 141, identifier:key; 142, lambda; 143, identifier:sorted; 144, argument_list; 145, expression_statement; 146, expression_statement; 147, expression_statement; 148, attribute; 149, identifier:get; 150, string; 151, attribute; 152, identifier:get; 153, string; 154, attribute; 155, argument_list; 156, identifier:records; 157, False; 158, keyword_argument; 159, keyword_argument; 160, keyword_argument; 161, identifier:records; 162, identifier:append; 163, call; 164, identifier:records; 165, identifier:append; 166, call; 167, attribute; 168, identifier:items; 169, lambda_parameters; 170, call; 171, identifier:records; 172, identifier:append; 173, call; 174, keyword_argument; 175, lambda_parameters; 176, call; 177, subscript; 178, assignment; 179, assignment; 180, call; 181, identifier:sensor_graph; 182, identifier:metadata_database; 183, string_content:app_tag; 184, identifier:sensor_graph; 185, identifier:metadata_database; 186, string_content:app_version; 187, identifier:records; 188, identifier:append; 189, call; 190, identifier:address; 191, integer:8; 192, identifier:address; 193, integer:8; 194, identifier:address; 195, integer:8; 196, identifier:AddNodeRecord; 197, argument_list; 198, identifier:AddStreamerRecord; 199, argument_list; 200, identifier:sensor_graph; 201, identifier:constant_database; 202, identifier:x; 203, attribute; 204, argument_list; 205, identifier:SetConstantRecord; 206, argument_list; 207, identifier:address; 208, integer:8; 209, identifier:x; 210, attribute; 211, argument_list; 212, attribute; 213, identifier:slot; 214, pattern_list; 215, subscript; 216, identifier:byte_value; 217, call; 218, attribute; 219, argument_list; 220, identifier:SetDeviceTagRecord; 221, argument_list; 222, call; 223, keyword_argument; 224, identifier:streamer; 225, keyword_argument; 226, subscript; 227, identifier:encode; 228, identifier:stream; 229, identifier:value; 230, keyword_argument; 231, identifier:x; 232, identifier:encode; 233, identifier:sensor_graph; 234, identifier:config_database; 235, identifier:config_type; 236, identifier:value; 237, subscript; 238, identifier:config_id; 239, identifier:_convert_to_bytes; 240, argument_list; 241, identifier:records; 242, identifier:append; 243, call; 244, keyword_argument; 245, keyword_argument; 246, identifier:str; 247, argument_list; 248, identifier:address; 249, integer:8; 250, identifier:address; 251, integer:8; 252, identifier:x; 253, integer:0; 254, identifier:address; 255, integer:8; 256, attribute; 257, identifier:slot; 258, identifier:config_type; 259, identifier:value; 260, identifier:SetConfigRecord; 261, argument_list; 262, identifier:app_tag; 263, identifier:app_tag; 264, identifier:app_version; 265, identifier:app_version; 266, identifier:node; 267, identifier:sensor_graph; 268, identifier:config_database; 269, identifier:slot; 270, identifier:config_id; 271, identifier:byte_value
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 4, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 4, 19; 4, 20; 4, 21; 4, 22; 6, 23; 7, 24; 8, 25; 9, 26; 10, 27; 11, 28; 11, 29; 11, 30; 12, 31; 12, 32; 12, 33; 13, 34; 13, 35; 13, 36; 14, 37; 15, 38; 16, 39; 16, 40; 16, 41; 18, 42; 19, 43; 20, 44; 20, 45; 21, 46; 22, 47; 24, 48; 24, 49; 25, 50; 25, 51; 26, 52; 26, 53; 27, 54; 27, 55; 29, 56; 29, 57; 30, 58; 32, 59; 32, 60; 33, 61; 34, 62; 34, 63; 35, 64; 35, 65; 36, 66; 37, 67; 37, 68; 38, 69; 38, 70; 40, 71; 40, 72; 41, 73; 42, 74; 42, 75; 43, 76; 43, 77; 44, 78; 44, 79; 45, 80; 46, 81; 46, 82; 47, 83; 47, 84; 50, 85; 50, 86; 51, 87; 52, 88; 52, 89; 53, 90; 54, 91; 54, 92; 55, 93; 58, 94; 61, 95; 65, 96; 65, 97; 66, 98; 67, 99; 67, 100; 68, 101; 69, 102; 69, 103; 70, 104; 72, 105; 72, 106; 73, 107; 73, 108; 73, 109; 75, 110; 75, 111; 77, 112; 77, 113; 80, 114; 82, 115; 82, 116; 83, 117; 83, 118; 87, 119; 87, 120; 90, 121; 90, 122; 93, 123; 93, 124; 94, 125; 94, 126; 95, 127; 95, 128; 96, 129; 96, 130; 97, 131; 97, 132; 98, 133; 98, 134; 101, 135; 101, 136; 104, 137; 104, 138; 105, 139; 105, 140; 106, 141; 106, 142; 108, 143; 108, 144; 109, 145; 109, 146; 109, 147; 110, 148; 110, 149; 111, 150; 112, 151; 112, 152; 113, 153; 114, 154; 114, 155; 116, 156; 120, 157; 120, 158; 122, 159; 124, 160; 125, 161; 125, 162; 126, 163; 127, 164; 127, 165; 128, 166; 129, 167; 129, 168; 132, 169; 132, 170; 133, 171; 133, 172; 134, 173; 136, 174; 142, 175; 142, 176; 144, 177; 145, 178; 146, 179; 147, 180; 148, 181; 148, 182; 150, 183; 151, 184; 151, 185; 153, 186; 154, 187; 154, 188; 155, 189; 158, 190; 158, 191; 159, 192; 159, 193; 160, 194; 160, 195; 163, 196; 163, 197; 166, 198; 166, 199; 167, 200; 167, 201; 169, 202; 170, 203; 170, 204; 173, 205; 173, 206; 174, 207; 174, 208; 175, 209; 176, 210; 176, 211; 177, 212; 177, 213; 178, 214; 178, 215; 179, 216; 179, 217; 180, 218; 180, 219; 189, 220; 189, 221; 197, 222; 197, 223; 199, 224; 199, 225; 203, 226; 203, 227; 206, 228; 206, 229; 206, 230; 210, 231; 210, 232; 212, 233; 212, 234; 214, 235; 214, 236; 215, 237; 215, 238; 217, 239; 217, 240; 218, 241; 218, 242; 219, 243; 221, 244; 221, 245; 222, 246; 222, 247; 223, 248; 223, 249; 225, 250; 225, 251; 226, 252; 226, 253; 230, 254; 230, 255; 237, 256; 237, 257; 240, 258; 240, 259; 243, 260; 243, 261; 244, 262; 244, 263; 245, 264; 245, 265; 247, 266; 256, 267; 256, 268; 261, 269; 261, 270; 261, 271
def format_script(sensor_graph): """Create a binary script containing this sensor graph. This function produces a repeatable script by applying a known sorting order to all constants and config variables when iterating over those dictionaries. Args: sensor_graph (SensorGraph): the sensor graph that we want to format Returns: bytearray: The binary script data. """ records = [] records.append(SetGraphOnlineRecord(False, address=8)) records.append(ClearDataRecord(address=8)) records.append(ResetGraphRecord(address=8)) for node in sensor_graph.nodes: records.append(AddNodeRecord(str(node), address=8)) for streamer in sensor_graph.streamers: records.append(AddStreamerRecord(streamer, address=8)) for stream, value in sorted(sensor_graph.constant_database.items(), key=lambda x: x[0].encode()): records.append(SetConstantRecord(stream, value, address=8)) records.append(PersistGraphRecord(address=8)) records.append(ClearConfigVariablesRecord()) for slot in sorted(sensor_graph.config_database, key=lambda x: x.encode()): for config_id in sorted(sensor_graph.config_database[slot]): config_type, value = sensor_graph.config_database[slot][config_id] byte_value = _convert_to_bytes(config_type, value) records.append(SetConfigRecord(slot, config_id, byte_value)) # If we have an app tag and version set program them in app_tag = sensor_graph.metadata_database.get('app_tag') app_version = sensor_graph.metadata_database.get('app_version') if app_tag is not None: records.append(SetDeviceTagRecord(app_tag=app_tag, app_version=app_version)) script = UpdateScript(records) return script.encode()
0, module; 1, function_definition; 2, function_name:msregularize; 3, parameters; 4, block; 5, identifier:msname; 6, identifier:newname; 7, expression_statement; 8, comment:# Find out all baselines.; 9, expression_statement; 10, expression_statement; 11, expression_statement; 12, comment:# Now iterate in time,band over the MS.; 13, for_statement; 14, comment:# Combine the existing table and new table.; 15, if_statement; 16, expression_statement; 17, expression_statement; 18, if_statement; 19, comment:""" Regularize an MS The output MS will be such that it has the same number of baselines for each time stamp. Where needed fully flagged rows are added. Possibly missing rows are written into a separate MS <newname>-add. It is concatenated with the original MS and sorted in order of TIME, DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that the new MS references the input MS (it does not copy the data). It means that changes made in the new MS are also made in the input MS. If no rows were missing, the new MS is still created referencing the input MS. """; 20, assignment; 21, assignment; 22, assignment; 23, identifier:tsub; 24, call; 25, block; 26, comparison_operator:nadded > 0; 27, comment:# First initialize data and flags in the added rows.; 28, block; 29, else_clause; 30, call; 31, call; 32, comparison_operator:nadded > 0; 33, block; 34, else_clause; 35, identifier:t; 36, call; 37, identifier:t1; 38, call; 39, identifier:nadded; 40, integer:0; 41, attribute; 42, argument_list; 43, expression_statement; 44, if_statement; 45, if_statement; 46, identifier:nadded; 47, integer:0; 48, expression_statement; 49, expression_statement; 50, expression_statement; 51, expression_statement; 52, expression_statement; 53, block; 54, attribute; 55, argument_list; 56, attribute; 57, argument_list; 58, identifier:nadded; 59, integer:0; 60, expression_statement; 61, block; 62, identifier:table; 63, argument_list; 64, attribute; 65, argument_list; 66, identifier:t; 67, identifier:iter; 68, list; 69, assignment; 70, comparison_operator:nmissing < 0; 71, block; 72, comparison_operator:nmissing > 0; 73, comment:# Rows needs to be added for the missing baselines.; 74, block; 75, call; 76, call; 77, assignment; 78, call; 79, assignment; 80, expression_statement; 81, identifier:tcombs; 82, identifier:rename; 83, identifier:newname; 84, identifier:six; 85, identifier:print_; 86, identifier:newname; 87, string; 88, call; 89, expression_statement; 90, identifier:msname; 91, identifier:t; 92, identifier:sort; 93, string; 94, string; 95, string; 96, identifier:nmissing; 97, binary_operator:t1.nrows() - tsub.nrows(); 98, identifier:nmissing; 99, integer:0; 100, raise_statement; 101, identifier:nmissing; 102, integer:0; 103, expression_statement; 104, expression_statement; 105, expression_statement; 106, expression_statement; 107, expression_statement; 108, expression_statement; 109, if_statement; 110, comment:# If nothing added yet, create a new table.; 111, comment:# (which has to be reopened for read/write).; 112, comment:# Otherwise append to that new table.; 113, if_statement; 114, comment:# Set the correct time and band in the new rows.; 115, expression_statement; 116, expression_statement; 117, expression_statement; 118, identifier:taql; 119, argument_list; 120, identifier:taql; 121, argument_list; 122, identifier:tcomb; 123, call; 124, attribute; 125, argument_list; 126, identifier:tcombs; 127, call; 128, assignment; 129, string_content:has been created; it references the original MS; 130, attribute; 131, argument_list; 132, call; 133, string_content:unique ANTENNA1,ANTENNA2; 134, string_content:TIME; 135, string_content:DATA_DESC_ID; 136, call; 137, call; 138, call; 139, assignment; 140, assignment; 141, assignment; 142, assignment; 143, assignment; 144, call; 145, comparison_operator:t2.nrows() != nmissing; 146, block; 147, comparison_operator:nadded == 0; 148, block; 149, else_clause; 150, call; 151, call; 152, augmented_assignment; 153, string; 154, string; 155, identifier:table; 156, argument_list; 157, identifier:tcomb; 158, identifier:rename; 159, binary_operator:newname + '_adds'; 160, attribute; 161, argument_list; 162, identifier:tcombs; 163, call; 164, identifier:six; 165, identifier:print_; 166, string; 167, binary_operator:newname + '_adds'; 168, string; 169, identifier:nadded; 170, string; 171, attribute; 172, argument_list; 173, attribute; 174, argument_list; 175, attribute; 176, argument_list; 177, identifier:ValueError; 178, argument_list; 179, identifier:ant1; 180, call; 181, identifier:ant2; 182, call; 183, identifier:ant1; 184, call; 185, identifier:ant2; 186, call; 187, identifier:t2; 188, call; 189, attribute; 190, argument_list; 191, call; 192, identifier:nmissing; 193, raise_statement; 194, identifier:nadded; 195, integer:0; 196, expression_statement; 197, expression_statement; 198, block; 199, attribute; 200, argument_list; 201, attribute; 202, argument_list; 203, identifier:nadded; 204, identifier:nmissing; 205, string_content:update $tnew set DATA=0+0i; 206, string_content:update $tnew set FLAG=True; 207, list; 208, identifier:newname; 209, string; 210, identifier:tcomb; 211, identifier:sort; 212, string; 213, attribute; 214, argument_list; 215, string_content:and; 216, identifier:newname; 217, string; 218, string_content:containing; 219, string_content:new rows; 220, identifier:six; 221, identifier:print_; 222, string; 223, identifier:t1; 224, identifier:nrows; 225, identifier:tsub; 226, identifier:nrows; 227, string:"A time/band chunk has too many rows"; 228, attribute; 229, argument_list; 230, attribute; 231, argument_list; 232, attribute; 233, argument_list; 234, attribute; 235, argument_list; 236, identifier:taql; 237, argument_list; 238, identifier:six; 239, identifier:print_; 240, identifier:nmissing; 241, call; 242, call; 243, call; 244, attribute; 245, argument_list; 246, call; 247, assignment; 248, assignment; 249, expression_statement; 250, identifier:tnew; 251, identifier:putcell; 252, string; 253, call; 254, call; 255, identifier:tnew; 256, identifier:putcell; 257, string; 258, call; 259, call; 260, identifier:t; 261, identifier:tnew; 262, string_content:_adds; 263, string_content:TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2; 264, identifier:t; 265, identifier:query; 266, keyword_argument; 267, string_content:_adds; 268, string_content:no rows needed to be added; 269, call; 270, identifier:replace; 271, string; 272, string; 273, call; 274, identifier:replace; 275, string; 276, string; 277, identifier:tsub; 278, identifier:getcol; 279, string; 280, identifier:tsub; 281, identifier:getcol; 282, string; 283, binary_operator:'select from $t1 where !any(ANTENNA1 == $ant1 &&' + ' ANTENNA2 == $ant2)'; 284, attribute; 285, argument_list; 286, attribute; 287, argument_list; 288, attribute; 289, argument_list; 290, identifier:t2; 291, identifier:nrows; 292, identifier:ValueError; 293, argument_list; 294, identifier:tnew; 295, call; 296, identifier:tnew; 297, call; 298, call; 299, string_content:TIME; 300, identifier:range; 301, argument_list; 302, attribute; 303, argument_list; 304, string_content:DATA_DESC_ID; 305, identifier:range; 306, argument_list; 307, attribute; 308, argument_list; 309, identifier:offset; 310, integer:0; 311, identifier:str; 312, argument_list; 313, string_content:; 314, string_content:,; 315, identifier:str; 316, argument_list; 317, string_content:; 318, string_content:,; 319, string_content:ANTENNA1; 320, string_content:ANTENNA2; 321, string; 322, string; 323, identifier:t1; 324, identifier:nrows; 325, identifier:tsub; 326, identifier:nrows; 327, identifier:t2; 328, identifier:nrows; 329, string:"A time/band chunk behaves strangely"; 330, attribute; 331, argument_list; 332, identifier:table; 333, argument_list; 334, attribute; 335, argument_list; 336, identifier:nadded; 337, binary_operator:nadded + nmissing; 338, identifier:tsub; 339, identifier:getcell; 340, string; 341, integer:0; 342, identifier:nadded; 343, binary_operator:nadded + nmissing; 344, identifier:tsub; 345, identifier:getcell; 346, string; 347, integer:0; 348, call; 349, call; 350, string_content:select from $t1 where !any(ANTENNA1 == $ant1 &&; 351, string_content:ANTENNA2 == $ant2); 352, identifier:t2; 353, identifier:copy; 354, binary_operator:newname + "_add"; 355, keyword_argument; 356, binary_operator:newname + "_add"; 357, keyword_argument; 358, identifier:t2; 359, identifier:copyrows; 360, identifier:tnew; 361, identifier:nadded; 362, identifier:nmissing; 363, string_content:TIME; 364, identifier:nadded; 365, identifier:nmissing; 366, string_content:DATA_DESC_ID; 367, attribute; 368, argument_list; 369, attribute; 370, argument_list; 371, identifier:newname; 372, string:"_add"; 373, identifier:deep; 374, True; 375, identifier:newname; 376, string:"_add"; 377, identifier:readonly; 378, False; 379, identifier:t1; 380, identifier:getcol; 381, string; 382, identifier:t1; 383, identifier:getcol; 384, string; 385, string_content:ANTENNA1; 386, string_content:ANTENNA2
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 4, 7; 4, 8; 4, 9; 4, 10; 4, 11; 4, 12; 4, 13; 4, 14; 4, 15; 4, 16; 4, 17; 4, 18; 7, 19; 9, 20; 10, 21; 11, 22; 13, 23; 13, 24; 13, 25; 15, 26; 15, 27; 15, 28; 15, 29; 16, 30; 17, 31; 18, 32; 18, 33; 18, 34; 20, 35; 20, 36; 21, 37; 21, 38; 22, 39; 22, 40; 24, 41; 24, 42; 25, 43; 25, 44; 25, 45; 26, 46; 26, 47; 28, 48; 28, 49; 28, 50; 28, 51; 28, 52; 29, 53; 30, 54; 30, 55; 31, 56; 31, 57; 32, 58; 32, 59; 33, 60; 34, 61; 36, 62; 36, 63; 38, 64; 38, 65; 41, 66; 41, 67; 42, 68; 43, 69; 44, 70; 44, 71; 45, 72; 45, 73; 45, 74; 48, 75; 49, 76; 50, 77; 51, 78; 52, 79; 53, 80; 54, 81; 54, 82; 55, 83; 56, 84; 56, 85; 57, 86; 57, 87; 60, 88; 61, 89; 63, 90; 64, 91; 64, 92; 65, 93; 68, 94; 68, 95; 69, 96; 69, 97; 70, 98; 70, 99; 71, 100; 72, 101; 72, 102; 74, 103; 74, 104; 74, 105; 74, 106; 74, 107; 74, 108; 74, 109; 74, 110; 74, 111; 74, 112; 74, 113; 74, 114; 74, 115; 74, 116; 74, 117; 75, 118; 75, 119; 76, 120; 76, 121; 77, 122; 77, 123; 78, 124; 78, 125; 79, 126; 79, 127; 80, 128; 87, 129; 88, 130; 88, 131; 89, 132; 93, 133; 94, 134; 95, 135; 97, 136; 97, 137; 100, 138; 103, 139; 104, 140; 105, 141; 106, 142; 107, 143; 108, 144; 109, 145; 109, 146; 113, 147; 113, 148; 113, 149; 115, 150; 116, 151; 117, 152; 119, 153; 121, 154; 123, 155; 123, 156; 124, 157; 124, 158; 125, 159; 127, 160; 127, 161; 128, 162; 128, 163; 130, 164; 130, 165; 131, 166; 131, 167; 131, 168; 131, 169; 131, 170; 132, 171; 132, 172; 136, 173; 136, 174; 137, 175; 137, 176; 138, 177; 138, 178; 139, 179; 139, 180; 140, 181; 140, 182; 141, 183; 141, 184; 142, 185; 142, 186; 143, 187; 143, 188; 144, 189; 144, 190; 145, 191; 145, 192; 146, 193; 147, 194; 147, 195; 148, 196; 148, 197; 149, 198; 150, 199; 150, 200; 151, 201; 151, 202; 152, 203; 152, 204; 153, 205; 154, 206; 156, 207; 159, 208; 159, 209; 160, 210; 160, 211; 161, 212; 163, 213; 163, 214; 166, 215; 167, 216; 167, 217; 168, 218; 170, 219; 171, 220; 171, 221; 172, 222; 173, 223; 173, 224; 175, 225; 175, 226; 178, 227; 180, 228; 180, 229; 182, 230; 182, 231; 184, 232; 184, 233; 186, 234; 186, 235; 188, 236; 188, 237; 189, 238; 189, 239; 190, 240; 190, 241; 190, 242; 190, 243; 191, 244; 191, 245; 193, 246; 196, 247; 197, 248; 198, 249; 199, 250; 199, 251; 200, 252; 200, 253; 200, 254; 201, 255; 201, 256; 202, 257; 202, 258; 202, 259; 207, 260; 207, 261; 209, 262; 212, 263; 213, 264; 213, 265; 214, 266; 217, 267; 222, 268; 228, 269; 228, 270; 229, 271; 229, 272; 230, 273; 230, 274; 231, 275; 231, 276; 232, 277; 232, 278; 233, 279; 234, 280; 234, 281; 235, 282; 237, 283; 241, 284; 241, 285; 242, 286; 242, 287; 243, 288; 243, 289; 244, 290; 244, 291; 246, 292; 246, 293; 247, 294; 247, 295; 248, 296; 248, 297; 249, 298; 252, 299; 253, 300; 253, 301; 254, 302; 254, 303; 257, 304; 258, 305; 258, 306; 259, 307; 259, 308; 266, 309; 266, 310; 269, 311; 269, 312; 271, 313; 272, 314; 273, 315; 273, 316; 275, 317; 276, 318; 279, 319; 282, 320; 283, 321; 283, 322; 284, 323; 284, 324; 286, 325; 286, 326; 288, 327; 288, 328; 293, 329; 295, 330; 295, 331; 297, 332; 297, 333; 298, 334; 298, 335; 301, 336; 301, 337; 302, 338; 302, 339; 303, 340; 303, 341; 306, 342; 306, 343; 307, 344; 307, 345; 308, 346; 308, 347; 312, 348; 316, 349; 321, 350; 322, 351; 330, 352; 330, 353; 331, 354; 331, 355; 333, 356; 333, 357; 334, 358; 334, 359; 335, 360; 337, 361; 337, 362; 340, 363; 343, 364; 343, 365; 346, 366; 348, 367; 348, 368; 349, 369; 349, 370; 354, 371; 354, 372; 355, 373; 355, 374; 356, 375; 356, 376; 357, 377; 357, 378; 367, 379; 367, 380; 368, 381; 369, 382; 369, 383; 370, 384; 381, 385; 384, 386
def msregularize(msname, newname): """ Regularize an MS The output MS will be such that it has the same number of baselines for each time stamp. Where needed fully flagged rows are added. Possibly missing rows are written into a separate MS <newname>-add. It is concatenated with the original MS and sorted in order of TIME, DATADESC_ID, ANTENNA1,ANTENNA2 to form a new regular MS. Note that the new MS references the input MS (it does not copy the data). It means that changes made in the new MS are also made in the input MS. If no rows were missing, the new MS is still created referencing the input MS. """ # Find out all baselines. t = table(msname) t1 = t.sort('unique ANTENNA1,ANTENNA2') nadded = 0 # Now iterate in time,band over the MS. for tsub in t.iter(['TIME', 'DATA_DESC_ID']): nmissing = t1.nrows() - tsub.nrows() if nmissing < 0: raise ValueError("A time/band chunk has too many rows") if nmissing > 0: # Rows needs to be added for the missing baselines. ant1 = str(t1.getcol('ANTENNA1')).replace(' ', ',') ant2 = str(t1.getcol('ANTENNA2')).replace(' ', ',') ant1 = tsub.getcol('ANTENNA1') ant2 = tsub.getcol('ANTENNA2') t2 = taql('select from $t1 where !any(ANTENNA1 == $ant1 &&' + ' ANTENNA2 == $ant2)') six.print_(nmissing, t1.nrows(), tsub.nrows(), t2.nrows()) if t2.nrows() != nmissing: raise ValueError("A time/band chunk behaves strangely") # If nothing added yet, create a new table. # (which has to be reopened for read/write). # Otherwise append to that new table. if nadded == 0: tnew = t2.copy(newname + "_add", deep=True) tnew = table(newname + "_add", readonly=False) else: t2.copyrows(tnew) # Set the correct time and band in the new rows. tnew.putcell('TIME', range(nadded, nadded + nmissing), tsub.getcell('TIME', 0)) tnew.putcell('DATA_DESC_ID', range(nadded, nadded + nmissing), tsub.getcell('DATA_DESC_ID', 0)) nadded += nmissing # Combine the existing table and new table. if nadded > 0: # First initialize data and flags in the added rows. taql('update $tnew set DATA=0+0i') taql('update $tnew set FLAG=True') tcomb = table([t, tnew]) tcomb.rename(newname + '_adds') tcombs = tcomb.sort('TIME,DATA_DESC_ID,ANTENNA1,ANTENNA2') else: tcombs = t.query(offset=0) tcombs.rename(newname) six.print_(newname, 'has been created; it references the original MS') if nadded > 0: six.print_(' and', newname + '_adds', 'containing', nadded, 'new rows') else: six.print_(' no rows needed to be added')
0, module; 1, function_definition; 2, function_name:iter; 3, parameters; 4, block; 5, identifier:self; 6, identifier:columnnames; 7, default_parameter; 8, default_parameter; 9, expression_statement; 10, import_from_statement; 11, return_statement; 12, identifier:order; 13, string; 14, identifier:sort; 15, True; 16, comment:"""Return a tableiter object. :class:`tableiter` lets one iterate over a table by returning in each iteration step a reference table containing equal values for the given columns. By default a sort is done on the given columns to get the correct iteration order. `order` | 'ascending' is iterate in ascending order (is the default). | 'descending' is iterate in descending order. `sort=False` do not sort (because table is already in correct order). For example, iterate by time through a measurementset table:: t = table('3c343.MS') for ts in t.iter('TIME'): print ts.nrows() """; 17, relative_import; 18, dotted_name; 19, call; 20, import_prefix; 21, dotted_name; 22, identifier:tableiter; 23, identifier:tableiter; 24, argument_list; 25, identifier:tableiter; 26, identifier:self; 27, identifier:columnnames; 28, identifier:order; 29, identifier:sort
0, 1; 1, 2; 1, 3; 1, 4; 3, 5; 3, 6; 3, 7; 3, 8; 4, 9; 4, 10; 4, 11; 7, 12; 7, 13; 8, 14; 8, 15; 9, 16; 10, 17; 10, 18; 11, 19; 17, 20; 17, 21; 18, 22; 19, 23; 19, 24; 21, 25; 24, 26; 24, 27; 24, 28; 24, 29
def iter(self, columnnames, order='', sort=True): """Return a tableiter object. :class:`tableiter` lets one iterate over a table by returning in each iteration step a reference table containing equal values for the given columns. By default a sort is done on the given columns to get the correct iteration order. `order` | 'ascending' is iterate in ascending order (is the default). | 'descending' is iterate in descending order. `sort=False` do not sort (because table is already in correct order). For example, iterate by time through a measurementset table:: t = table('3c343.MS') for ts in t.iter('TIME'): print ts.nrows() """ from .tableiter import tableiter return tableiter(self, columnnames, order, sort)