code
stringlengths
52
7.75k
docs
stringlengths
1
5.85k
def validate_openssl(): try: open_ssl_exe = which("openssl") if not open_ssl_exe: raise Exception("No openssl exe found in path") try: # execute a an invalid command to get output with available options # since openssl does not have a --help option unfortunately execute_command([open_ssl_exe, "s_client", "invalidDummyCommand"]) except subprocess.CalledProcessError as e: if "fallback_scsv" not in e.output: raise Exception("openssl does not support TLS_FALLBACK_SCSV") except Exception as e: raise MongoctlException("Unsupported OpenSSL. %s" % e)
Validates OpenSSL to ensure it has TLS_FALLBACK_SCSV supported
def validate_against_current_config(self, current_rs_conf): # if rs is not configured yet then there is nothing to validate if not current_rs_conf: return my_host = self.get_host() current_member_confs = current_rs_conf['members'] err = None for curr_mem_conf in current_member_confs: if (self.id and self.id == curr_mem_conf['_id'] and not is_same_address(my_host, curr_mem_conf['host'])): err = ("Member config is not consistent with current rs " "config. \n%s\n. Both have the sam _id but addresses" " '%s' and '%s' do not resolve to the same host." % (document_pretty_string(curr_mem_conf), my_host, curr_mem_conf['host'] )) elif (is_same_address(my_host, curr_mem_conf['host']) and self.id and self.id != curr_mem_conf['_id']): err = ("Member config is not consistent with current rs " "config. \n%s\n. Both addresses" " '%s' and '%s' resolve to the same host but _ids '%s'" " and '%s' are not equal." % (document_pretty_string(curr_mem_conf), my_host, curr_mem_conf['host'], self.id, curr_mem_conf['_id'])) if err: raise MongoctlException("Invalid member configuration:\n%s \n%s" % (self, err))
Validates the member document against current rs conf 1- If there is a member in current config with _id equals to my id then ensure hosts addresses resolve to the same host 2- If there is a member in current config with host resolving to my host then ensure that if my id is et then it must equal member._id
def get_dump_best_secondary(self, max_repl_lag=None): secondary_lag_tuples = [] primary_member = self.get_primary_member() if not primary_member: raise MongoctlException("Unable to determine primary member for" " cluster '%s'" % self.id) master_status = primary_member.get_server().get_member_rs_status() if not master_status: raise MongoctlException("Unable to determine replicaset status for" " primary member '%s'" % primary_member.get_server().id) for member in self.get_members(): if member.get_server().is_secondary(): repl_lag = member.get_server().get_repl_lag(master_status) if max_repl_lag and repl_lag > max_repl_lag: log_info("Excluding member '%s' because it's repl lag " "(in seconds)%s is more than max %s. " % (member.get_server().id, repl_lag, max_repl_lag)) continue secondary_lag_tuples.append((member,repl_lag)) def best_secondary_comp(x, y): x_mem, x_lag = x y_mem, y_lag = y if x_mem.is_passive(): if y_mem.is_passive(): return x_lag - y_lag else: return -1 elif y_mem.is_passive(): return 1 else: return x_lag - y_lag if secondary_lag_tuples: secondary_lag_tuples.sort(best_secondary_comp) return secondary_lag_tuples[0][0]
Returns the best secondary member to be used for dumping best = passives with least lags, if no passives then least lag
def is_replicaset_initialized(self): # it's possible isMaster returns an "incomplete" result if we # query a replica set member while it's loading the replica set config # https://jira.mongodb.org/browse/SERVER-13458 # let's try to detect this state before proceeding # seems like if the "secondary" field is present, but "setName" isn't, # it's a good indicator that we just need to wait a bit # add an uptime check in for good measure for member in self.get_members(): server = member.get_server() if server.has_joined_replica(): return True return False
iterate on all members and check if any has joined the replica
def match_member_id(self, member_conf, current_member_confs): if current_member_confs is None: return None for curr_mem_conf in current_member_confs: if is_same_address(member_conf['host'], curr_mem_conf['host']): return curr_mem_conf['_id'] return None
Attempts to find an id for member_conf where fom current members confs there exists a element. Returns the id of an element of current confs WHERE member_conf.host and element.host are EQUAL or map to same host
def setup_server_users(server): """if not should_seed_users(server): log_verbose("Not seeding users for server '%s'" % server.id) return""" log_info("Checking if there are any users that need to be added for " "server '%s'..." % server.id) seed_users = server.get_seed_users() count_new_users = 0 # Note: If server member of a replica then don't setup admin # users because primary server will do that at replinit # Now create admin ones if not server.is_slave(): count_new_users += setup_server_admin_users(server) for dbname, db_seed_users in seed_users.items(): # create the admin ones last so we won't have an auth issue if dbname in ["admin", "local"]: continue count_new_users += setup_server_db_users(server, dbname, db_seed_users) if count_new_users > 0: log_info("Added %s users." % count_new_users) else: log_verbose("Did not add any new users.")
Seeds all users returned by get_seed_users() IF there are no users seed yet i.e. system.users collection is empty
def prepend_global_admin_user(other_users, server): cred0 = get_global_login_user(server, "admin") if cred0 and cred0["username"] and cred0["password"]: log_verbose("Seeding : CRED0 to the front of the line!") return [cred0] + other_users if other_users else [cred0] else: return other_users
When making lists of administrative users -- e.g., seeding a new server -- it's useful to put the credentials supplied on the command line at the head of the queue.
def get_os_dist_info(): distribution = platform.dist() dist_name = distribution[0].lower() dist_version_str = distribution[1] if dist_name and dist_version_str: return dist_name, dist_version_str else: return None, None
Returns the distribution info
def export_cmd_options(self, options_override=None, standalone=False): cmd_options = super(MongosServer, self).export_cmd_options( options_override=options_override) # Add configServers arg cluster = self.get_validate_cluster() cmd_options["configdb"] = cluster.get_config_db_address() return cmd_options
Override! :return:
def get_mongo_version(self): if self._mongo_version: return self._mongo_version mongo_version = self.read_current_mongo_version() if not mongo_version: mongo_version = self.get_configured_mongo_version() self._mongo_version = mongo_version return self._mongo_version
Gets mongo version of the server if it is running. Otherwise return version configured in mongoVersion property
def get_server_build_info(self): if self.is_online(): try: return self.get_mongo_client().server_info() except OperationFailure, ofe: log_exception(ofe) if "there are no users authenticated" in str(ofe): # this is a pymongo 3.6.1 regression where the buildinfo command fails on non authenticated client # fall-back to an authenticated client admin_db = self.get_db("admin", no_auth=False) return admin_db.command("buildinfo") except Exception, e: log_exception(e) return None
issues a buildinfo command
def authenticate_db(self, db, dbname, retry=True): log_verbose("Server '%s' attempting to authenticate to db '%s'" % (self.id, dbname)) login_user = self.get_login_user(dbname) username = None password = None auth_success = False if login_user: username = login_user["username"] if "password" in login_user: password = login_user["password"] # have three attempts to authenticate no_tries = 0 while not auth_success and no_tries < 3: if not username: username = read_username(dbname) if not password: password = self.lookup_password(dbname, username) if not password: password = read_password("Enter password for user '%s\%s'"% (dbname, username)) # if auth success then exit loop and memoize login try: auth_success = db.authenticate(username, password) log_verbose("Authentication attempt #%s to db '%s' result: %s" % (no_tries, dbname, auth_success)) except OperationFailure, ofe: if "auth fails" in str(ofe): auth_success = False if auth_success or not retry: break else: log_error("Invalid login!") username = None password = None no_tries += 1 if auth_success: self.set_login_user(dbname, username, password) log_verbose("Authentication Succeeded!") else: log_verbose("Authentication failed") return auth_success
Returns True if we manage to auth to the given db, else False.
def get_working_login(self, database, username=None, password=None): login_user = None # this will authenticate and update login user self.get_db(database, username=username, password=password, never_auth_with_admin=True) login_user = self.get_login_user(database) if login_user: username = login_user["username"] password = (login_user["password"] if "password" in login_user else None) return username, password
authenticate to the specified database starting with specified username/password (if present), try to return a successful login within 3 attempts
def needs_to_auth(self, dbname): log_debug("Checking if server '%s' needs to auth on db '%s'...." % (self.id, dbname)) try: client = self.get_mongo_client() db = client.get_database(dbname) db.collection_names() result = False except (RuntimeError,Exception), e: log_exception(e) # updated for to handle auth failures from mongodb 3.6 result = "authorized" in str(e) or "there are no users authenticated" in str(e) log_debug("needs_to_auth check for server '%s' on db '%s' : %s" % (self.id, dbname, result)) return result
Determines if the server needs to authenticate to the database. NOTE: we stopped depending on is_auth() since its only a configuration and may not be accurate
def needs_repl_key(self): cluster = self.get_cluster() return (self.supports_repl_key() and cluster is not None and cluster.get_repl_key() is not None)
We need a repl key if you are auth + a cluster member + version is None or >= 2.0.0
def exact_or_minor_exe_version_match(executable_name, exe_version_tuples, version): exe = exact_exe_version_match(executable_name, exe_version_tuples, version) if not exe: exe = minor_exe_version_match(executable_name, exe_version_tuples, version) return exe
IF there is an exact match then use it OTHERWISE try to find a minor version match
def is_server_or_cluster_db_address(value): # check if value is an id string id_path = value.split("/") id = id_path[0] return len(id_path) <= 2 and (repository.lookup_server(id) or repository.lookup_cluster(id))
checks if the specified value is in the form of [server or cluster id][/database]
def until(time): end = time # Convert datetime to unix timestamp and adjust for locality if isinstance(time, datetime): zoneDiff = pytime.time() - (datetime.now()- datetime(1970, 1, 1)).total_seconds() end = (time - datetime(1970, 1, 1)).total_seconds() + zoneDiff # Type check if not isinstance(end, (int, float)): raise Exception('The time parameter is not a number or datetime object') # Now we wait while True: now = pytime.time() diff = end - now # # Time is up! # if diff <= 0: break else: # 'logarithmic' sleeping to minimize loop iterations sleep(diff / 2)
Pause your program until a specific end time. 'time' is either a valid datetime object or unix timestamp in seconds (i.e. seconds since Unix epoch)
def seconds(num): now = pytime.time() end = now + num until(end)
Pause for this many seconds
def _pre_mongod_server_start(server, options_override=None): lock_file_path = server.get_lock_file_path() no_journal = (server.get_cmd_option("nojournal") or (options_override and "nojournal" in options_override)) if (os.path.exists(lock_file_path) and server.is_arbiter_server() and no_journal): log_warning("WARNING: Detected a lock file ('%s') for your server '%s'" " ; since this server is an arbiter, there is no need for" " repair or other action. Deleting mongod.lock and" " proceeding..." % (lock_file_path, server.id)) try: os.remove(lock_file_path) except Exception, e: log_exception(e) raise MongoctlException("Error while trying to delete '%s'. " "Cause: %s" % (lock_file_path, e))
Does necessary work before starting a server 1- An efficiency step for arbiters running with --no-journal * there is a lock file ==> * server must not have exited cleanly from last run, and does not know how to auto-recover (as a journalled server would) * however: this is an arbiter, therefore * there is no need to repair data files in any way ==> * i can rm this lockfile and start my server
def prepare_mongod_server(server): log_info("Preparing server '%s' for use as configured..." % server.id) cluster = server.get_cluster() # setup the local users if server supports that if server.supports_local_users(): users.setup_server_local_users(server) if not server.is_cluster_member() or server.is_standalone_config_server(): users.setup_server_users(server) if cluster and server.is_primary(): users.setup_cluster_users(cluster, server)
Contains post start server operations
def _rlimit_min(one_val, nother_val): if one_val < 0 or nother_val < 0 : return max(one_val, nother_val) else: return min(one_val, nother_val)
Returns the more stringent rlimit value. -1 means no limit.
def generate_start_command(server, options_override=None, standalone=False): command = [] if mongod_needs_numactl(): log_info("Running on a NUMA machine...") command = apply_numactl(command) # append the mongod executable command.append(get_server_executable(server)) # create the command args cmd_options = server.export_cmd_options(options_override=options_override, standalone=standalone) command.extend(options_to_command_args(cmd_options)) return command
Check if we need to use numactl if we are running on a NUMA box. 10gen recommends using numactl on NUMA. For more info, see http://www.mongodb.org/display/DOCS/NUMA
def get_server_home(self): home_dir = super(MongodServer, self).get_server_home() if not home_dir: home_dir = self.get_db_path() return home_dir
Override! :return:
def export_cmd_options(self, options_override=None, standalone=False): cmd_options = super(MongodServer, self).export_cmd_options( options_override=options_override) # reset some props to exporting vals cmd_options['dbpath'] = self.get_db_path() if 'repairpath' in cmd_options: cmd_options['repairpath'] = resolve_path(cmd_options['repairpath']) # Add ReplicaSet args if a cluster is configured repl_cluster = self.get_replicaset_cluster() if repl_cluster is not None: if "replSet" not in cmd_options: cmd_options["replSet"] = repl_cluster.id # apply standalone if specified if standalone: if "replSet" in cmd_options: del cmd_options["replSet"] if "keyFile" in cmd_options: del cmd_options["keyFile"] # add configsvr as needed if self.is_config_server(): cmd_options["configsvr"] = True # add shardsvr as needed if self.is_shard_server(): cmd_options["shardsvr"] = True # remove wiredTigerCacheSizeGB if its not an int since we set it in runtime parameter # wiredTigerEngineRuntimeConfig in this case if "wiredTigerCacheSizeGB" in cmd_options and not isinstance(self.get_cmd_option("wiredTigerCacheSizeGB"), int): del cmd_options["wiredTigerCacheSizeGB"] return cmd_options
Override! :return:
def get_seed_users(self): seed_users = super(MongodServer, self).get_seed_users() # exempt database users for config servers if seed_users and self.is_config_server(): for dbname in seed_users.keys(): if dbname not in ["admin", "local", "config"]: del seed_users[dbname] return seed_users
Override! :return:
def get_repl_lag(self, master_status): member_status = self.get_member_rs_status() if not member_status: raise MongoctlException("Unable to determine replicaset status for" " member '%s'" % self.id) return get_member_repl_lag(member_status, master_status)
Given two 'members' elements from rs.status(), return lag between their optimes (in secs).
def mongo_client(*args, **kwargs): kwargs = kwargs or {} connection_timeout_ms = kwargs.get("connectTimeoutMS") or CONN_TIMEOUT_MS kwargs.update({ "socketTimeoutMS": connection_timeout_ms, "connectTimeoutMS": connection_timeout_ms, "maxPoolSize": 1 }) if pymongo.get_version_string().startswith("3.2"): if kwargs and kwargs.get("serverSelectionTimeoutMS") is None: kwargs["connect"] = True kwargs["serverSelectionTimeoutMS"] = connection_timeout_ms return _mongo_client(*args, **kwargs)
wrapper around mongo client :param args: :param kwargs: :return:
def parse(self, data): graph = self._init_graph() # ensure is NetJSON NetworkGraph object if 'type' not in data or data['type'] != 'NetworkGraph': raise ParserError('Parse error, not a NetworkGraph object') # ensure required keys are present required_keys = ['protocol', 'version', 'metric', 'nodes', 'links'] for key in required_keys: if key not in data: raise ParserError('Parse error, "{0}" key not found'.format(key)) # store metadata self.protocol = data['protocol'] self.version = data['version'] self.revision = data.get('revision') # optional self.metric = data['metric'] # create graph for node in data['nodes']: graph.add_node(node['id'], label=node['label'] if 'label' in node else None, local_addresses=node.get('local_addresses', []), **node.get('properties', {})) for link in data['links']: try: source = link["source"] dest = link["target"] cost = link["cost"] except KeyError as e: raise ParserError('Parse error, "%s" key not found' % e) properties = link.get('properties', {}) graph.add_edge(source, dest, weight=cost, **properties) return graph
Converts a NetJSON 'NetworkGraph' object to a NetworkX Graph object,which is then returned. Additionally checks for protocol version, revision and metric.
def parse(self, data): # initialize graph and list of aggregated nodes graph = self._init_graph() server = self._server_common_name # add server (central node) to graph graph.add_node(server) # data may be empty if data is None: clients = [] links = [] else: clients = data.client_list.values() links = data.routing_table.values() # add clients in graph as nodes for client in clients: if client.common_name == 'UNDEF': continue client_properties = { 'label': client.common_name, 'real_address': str(client.real_address.host), 'port': int(client.real_address.port), 'connected_since': client.connected_since.strftime('%Y-%m-%dT%H:%M:%SZ'), 'bytes_received': int(client.bytes_received), 'bytes_sent': int(client.bytes_sent) } local_addresses = [ str(route.virtual_address) for route in data.routing_table.values() if route.real_address == client.real_address ] if local_addresses: client_properties['local_addresses'] = local_addresses graph.add_node(str(client.real_address.host), **client_properties) # add links in routing table to graph for link in links: if link.common_name == 'UNDEF': continue graph.add_edge(server, str(link.real_address.host), weight=1) return graph
Converts a OpenVPN JSON to a NetworkX Graph object which is then returned.
def to_python(self, data): try: return super(BatmanParser, self).to_python(data) except ConversionException as e: return self._txtinfo_to_python(e.data)
Adds support for txtinfo format
def _txtinfo_to_python(self, data): self._format = 'txtinfo' # find interesting section lines = data.split('\n') try: start = lines.index('Table: Topology') + 2 except ValueError: raise ParserError('Unrecognized format') topology_lines = [line for line in lines[start:] if line] # convert to python list parsed_lines = [] for line in topology_lines: values = line.split(' ') parsed_lines.append({ 'source': values[0], 'target': values[1], 'cost': float(values[4]) }) return parsed_lines
Converts txtinfo format to python
def _get_primary_address(self, mac_address, node_list): for local_addresses in node_list: if mac_address in local_addresses: return local_addresses[0] return mac_address
Uses the _get_aggregated_node_list structure to find the primary mac address associated to a secondary one, if none is found returns itself.
def _get_aggregated_node_list(self, data): node_list = [] for node in data: local_addresses = [node['primary']] if 'secondary' in node: local_addresses += node['secondary'] node_list.append(local_addresses) return node_list
Returns list of main and secondary mac addresses.
def parse(self, data): method = getattr(self, '_parse_{0}'.format(self._format)) return method(data)
Calls the right method depending on the format, which can be one of the wollowing: * alfred_vis * txtinfo
def _parse_alfred_vis(self, data): # initialize graph and list of aggregated nodes graph = self._init_graph() if 'source_version' in data: self.version = data['source_version'] if 'vis' not in data: raise ParserError('Parse error, "vis" key not found') node_list = self._get_aggregated_node_list(data['vis']) # loop over topology section and create networkx graph for node in data["vis"]: for neigh in node["neighbors"]: graph.add_node(node['primary'], **{ 'local_addresses': node.get('secondary', []), 'clients': node.get('clients', []) }) primary_neigh = self._get_primary_address(neigh['neighbor'], node_list) # networkx automatically ignores duplicated edges graph.add_edge(node['primary'], primary_neigh, weight=float(neigh['metric'])) return graph
Converts a alfred-vis JSON object to a NetworkX Graph object which is then returned. Additionally checks for "source_vesion" to determine the batman-adv version.
def _parse_txtinfo(self, data): graph = self._init_graph() for link in data: graph.add_edge(link['source'], link['target'], weight=link['cost']) return graph
Converts the python list returned by self._txtinfo_to_python() to a NetworkX Graph object, which is then returned.
def to_python(self, data): if isinstance(data, dict): return data elif isinstance(data, six.string_types): # assuming is JSON try: return json.loads(data) except ValueError: pass raise ConversionException('Could not recognize format', data=data)
Parses the input data and converts it into a Python data structure Input data might be: * a path which points to a JSON file * a URL which points to a JSON file (supported schemes: http, https, telnet) * a JSON formatted string * a dict representing a JSON structure
def json(self, dict=False, **kwargs): try: graph = self.graph except AttributeError: raise NotImplementedError() return _netjson_networkgraph(self.protocol, self.version, self.revision, self.metric, graph.nodes(data=True), graph.edges(data=True), dict, **kwargs)
Outputs NetJSON format
def diff(old, new): protocol = new.protocol version = new.version revision = new.revision metric = new.metric # calculate differences in_both = _find_unchanged(old.graph, new.graph) added_nodes, added_edges = _make_diff(old.graph, new.graph, in_both) removed_nodes, removed_edges = _make_diff(new.graph, old.graph, in_both) changed_edges = _find_changed(old.graph, new.graph, in_both) # create netjson objects # or assign None if no changes if added_nodes.nodes() or added_edges.edges(): added = _netjson_networkgraph(protocol, version, revision, metric, added_nodes.nodes(data=True), added_edges.edges(data=True), dict=True) else: added = None if removed_nodes.nodes() or removed_edges.edges(): removed = _netjson_networkgraph(protocol, version, revision, metric, removed_nodes.nodes(data=True), removed_edges.edges(data=True), dict=True) else: removed = None if changed_edges: changed = _netjson_networkgraph(protocol, version, revision, metric, [], changed_edges, dict=True) else: changed = None return OrderedDict(( ('added', added), ('removed', removed), ('changed', changed) ))
Returns differences of two network topologies old and new in NetJSON NetworkGraph compatible format
def _make_diff(old, new, both): # make a copy of old topology to avoid tampering with it diff_edges = new.copy() not_different = [tuple(edge) for edge in both] diff_edges.remove_edges_from(not_different) # repeat operation with nodes diff_nodes = new.copy() not_different = [] for new_node in new.nodes(): if new_node in old.nodes(): not_different.append(new_node) diff_nodes.remove_nodes_from(not_different) # return tuple with modified graphs # one for nodes and one for links return diff_nodes, diff_edges
calculates differences between topologies 'old' and 'new' returns a tuple with two network graph objects the first graph contains the added nodes, the secnod contains the added links
def _find_unchanged(old, new): edges = [] old_edges = [set(edge) for edge in old.edges()] new_edges = [set(edge) for edge in new.edges()] for old_edge in old_edges: if old_edge in new_edges: edges.append(set(old_edge)) return edges
returns edges that are in both old and new
def _find_changed(old, new, both): # create two list of sets of old and new edges including cost old_edges = [] for edge in old.edges(data=True): # skip links that are not in both if set((edge[0], edge[1])) not in both: continue # wrap cost in tuple so it will be recognizable cost = (edge[2]['weight'],) old_edges.append(set((edge[0], edge[1], cost))) new_edges = [] for edge in new.edges(data=True): # skip links that are not in both if set((edge[0], edge[1])) not in both: continue # wrap cost in tuple so it will be recognizable cost = (edge[2]['weight'],) new_edges.append(set((edge[0], edge[1], cost))) # find out which edge changed changed = [] for new_edge in new_edges: if new_edge not in old_edges: # new_edge is a set, convert it to list new_edge = list(new_edge) for item in new_edge: if isinstance(item, tuple): # unwrap cost from tuple and put it in a dict cost = {'weight': item[0]} new_edge.remove(item) changed.append((new_edge[0], new_edge[1], cost)) return changed
returns links that have changed cost
def parse(self, data): # initialize graph and list of aggregated nodes graph = self._init_graph() if len(data) != 0: if "links" not in data[0]: raise ParserError('Parse error, "links" key not found') # loop over topology section and create networkx graph # this data structure does not contain cost information, so we set it as 1 for node in data: for link in node['links']: cost = (link['txRate'] + link['rxRate']) / 2.0 graph.add_edge(node['name'], link['name'], weight=cost, tx_rate=link['txRate'], rx_rate=link['rxRate']) return graph
Converts a BMX6 b6m JSON to a NetworkX Graph object which is then returned.
def parse(self, data): graph = self._init_graph() # loop over links and create networkx graph # Add only working nodes with working links for link in data.get_inner_links(): if link.status != libcnml.libcnml.Status.WORKING: continue interface_a, interface_b = link.getLinkedInterfaces() source = interface_a.ipv4 dest = interface_b.ipv4 # add link to Graph graph.add_edge(source, dest, weight=1) return graph
Converts a CNML structure to a NetworkX Graph object which is then returned.
def to_python(self, data): try: return super(OlsrParser, self).to_python(data) except ConversionException as e: return self._txtinfo_to_jsoninfo(e.data)
Adds support for txtinfo format
def parse(self, data): graph = self._init_graph() if 'topology' not in data: raise ParserError('Parse error, "topology" key not found') elif 'mid' not in data: raise ParserError('Parse error, "mid" key not found') # determine version and revision if 'config' in data: version_info = data['config']['olsrdVersion'].replace(' ', '').split('-') self.version = version_info[1] # try to get only the git hash if 'hash_' in version_info[-1]: version_info[-1] = version_info[-1].split('hash_')[-1] self.revision = version_info[-1] # process alias list alias_dict = {} for node in data['mid']: local_addresses = [alias['ipAddress'] for alias in node['aliases']] alias_dict[node['ipAddress']] = local_addresses # loop over topology section and create networkx graph for link in data['topology']: try: source = link['lastHopIP'] target = link['destinationIP'] cost = link['tcEdgeCost'] properties = { 'link_quality': link['linkQuality'], 'neighbor_link_quality': link['neighborLinkQuality'] } except KeyError as e: raise ParserError('Parse error, "%s" key not found' % e) # add nodes with their local_addresses for node in [source, target]: if node not in alias_dict: continue graph.add_node(node, local_addresses=alias_dict[node]) # skip links with infinite cost if cost == float('inf'): continue # original olsrd cost (jsoninfo multiplies by 1024) cost = float(cost) / 1024.0 # add link to Graph graph.add_edge(source, target, weight=cost, **properties) return graph
Converts a dict representing an OLSR 0.6.x topology to a NetworkX Graph object, which is then returned. Additionally checks for "config" data in order to determine version and revision.
def _txtinfo_to_jsoninfo(self, data): # replace INFINITE with inf, which is convertible to float data = data.replace('INFINITE', 'inf') # find interesting section lines = data.split('\n') # process links in topology section try: start = lines.index('Table: Topology') + 2 end = lines[start:].index('') + start except ValueError: raise ParserError('Unrecognized format') topology_lines = lines[start:end] # convert topology section to jsoninfo format topology = [] for line in topology_lines: values = line.split('\t') topology.append({ 'destinationIP': values[0], 'lastHopIP': values[1], 'linkQuality': float(values[2]), 'neighborLinkQuality': float(values[3]), 'tcEdgeCost': float(values[4]) * 1024.0 }) # process alias (MID) section try: start = lines.index('Table: MID') + 2 end = lines[start:].index('') + start except ValueError: raise ParserError('Unrecognized format') mid_lines = lines[start:end] # convert mid section to jsoninfo format mid = [] for line in mid_lines: values = line.split('\t') node = values[0] aliases = values[1].split(';') mid.append({ 'ipAddress': node, 'aliases': [{'ipAddress': alias} for alias in aliases] }) return { 'topology': topology, 'mid': mid }
converts olsr 1 txtinfo format to jsoninfo
def create_issue_link(self, link_type, inwardissue, outwardissue, comment=None): self.jira.create_issue_link(type=link_type, inwardIssue=str(inwardissue), outwardIssue=str(outwardissue))
Create a link between two issues. Arguments: | link_type (string) | The type of link | | inwardissue (string) | The issue to link from | | outwardissue (string) | The issue to link to | | comment (string) | (Optional) A comment to add when joining issues | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | create issue link | relates to | ${issue} | PROJ-385 |
def assign_user_to_issue(self, issue, JIRAUsername): # TODO: Review docs self.jira.assign_issue(issue=issue, assignee=JIRAUsername)
Adds a user to a specified issue's watcher list Arguments: | issue (string) | A JIRA Issue that a user needs to be assigned to, can be an issue ID or Key | | JIRAUsername (string) | A JIRA Username to assign a user to an issue | Example: | *Keyword* | *Parameters* | | | connect to jira | asimmons | options= {'http://devjira01'} | | ${issue} | create issue | ${issue_field_dict} | | assign user to issue | ${issue} | aSample |
def add_watcher_to_issue(self, issue, JIRAUsername): self.jira.add_watcher(issue=issue, watcher=JIRAUsername)
Adds a user to a specified issue's watcher list. Arguments: | issue (string) | A JIRA Issue that a watcher needs added to, can be an issue ID or Key | | JIRAUsername (string) | A JIRA Username to add as a watcher to an issue | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | add watcher to issue | ${issue} | aSample | |
def add_comment_to_issue(self, issue, comment, visibility=None): self.jira.add_comment(issue=issue, body=comment)
Adds a comment to a specified issue from the current user. Arguments: | issue (string) | A JIRA Issue that a watcher needs added to, can be an issue ID or Key | | comment (string) | A body of text to add as a comment to an issue | | visibility (string) | (Optional) | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | add comment to issue | ${issue} | Starting work on this issue | |
def add_attachment_to_issue(self, issue, attachment, filename=None): self.jira.add_attachment(issue=issue, attachment=attachment, filename=filename)
Uploads and attaches a file a specified issue. (Note: include the file extention when using the 'filename' option or this will change the file type.) Arguments: | issue (string) | A JIRA Issue that a watcher needs added to, can be an issue ID or Key | | attachment (string) | A string pointing to a file location to upload and attach to the issue | | filename (string) | (Optional) A string to rename the file to upon attaching to the issue | Example: | *Keyword* | *Parameters* | | | | connect to jira | asimmons | options= {'http://devjira01'} | | | ${issue} | create issue | ${issue_field_dict} | True | | add attchment to issue | ${issue} | ./logfile.text | LogInformation.txt |
def format(self, record): super(HtmlFormatter, self).format(record) if record.funcName: record.funcName = escape_html(str(record.funcName)) if record.name: record.name = escape_html(str(record.name)) if record.msg: record.msg = escape_html(record.getMessage()) if self.use_emoji: if record.levelno == logging.DEBUG: record.levelname += ' ' + EMOJI.WHITE_CIRCLE elif record.levelno == logging.INFO: record.levelname += ' ' + EMOJI.BLUE_CIRCLE else: record.levelname += ' ' + EMOJI.RED_CIRCLE if hasattr(self, '_style'): return self._style.format(record) else: # py2.7 branch return self._fmt % record.__dict__
:param logging.LogRecord record:
def get_releases(data, **kwargs): if "versions" in data: return sorted(data["versions"].keys(), key=lambda v: parse(v), reverse=True) return []
Gets all releases from pypi meta data. :param data: dict, meta data :return: list, str releases
def get_urls(session, name, data, find_changelogs_fn, **kwargs): # if this package has valid meta data, build up a list of URL candidates we can possibly # search for changelogs on if "versions" in data: candidates = set() for version, item in data["versions"].items(): if "homepage" in item and item["homepage"] is not None: if isinstance(item["homepage"], list): candidates.add(*item["homepage"]) else: candidates.add(item["homepage"]) if "repository" in item and item["repository"] is not None: if "url" in item["repository"]: repo = item["repository"]["url"] elif "path" in item["repository"]: repo = item["repository"]["path"] else: continue repo = repo.replace("git://", "https://").replace(".git", "") candidates.add(repo) return find_changelogs_fn(session=session, name=name, candidates=candidates) return set(), set()
Gets URLs to changelogs. :param session: requests Session instance :param name: str, package name :param data: dict, meta data :param find_changelogs_fn: function, find_changelogs :return: tuple, (set(changelog URLs), set(repo URLs))
def parse(name, content, releases, get_head_fn): changelog = {} releases = frozenset(releases) head = False for line in content.splitlines(): new_head = get_head_fn(name=name, line=line, releases=releases) if new_head: head = new_head changelog[head] = "" continue if not head: continue line = line.replace("@", "") line = line.replace("#", "") changelog[head] += line + "\n" return changelog
Parses the given content for a valid changelog :param name: str, package name :param content: str, content :param releases: list, releases :param get_head_fn: function :return: dict, changelog
def parse_commit_log(name, content, releases, get_head_fn): log = "" raw_log = "" for path, _ in content: log += "\n".join(changelog(repository=GitRepos(path), tag_filter_regexp=r"v?\d+\.\d+(\.\d+)?")) raw_log += "\n" + subprocess.check_output( ["git", "-C", path, "--no-pager", "log", "--decorate"]).decode("utf-8") shutil.rmtree(path) log = parse(name, log, releases, get_head_fn) return log, raw_log
Parses the given commit log :param name: str, package name :param content: list, directory paths :param releases: list, releases :param get_head_fn: function :return: dict, changelog
def _load_custom_functions(vendor, name): functions = {} # Some packages have dash in their name, replace them with underscore # E.g. python-ldap to python_ldap filename = "{}.py".format(name.replace("-", "_").lower()) path = os.path.join( os.path.dirname(os.path.realpath(__file__)), # current working dir "custom", # /dir/parser vendor, # /dir/parser/pypi filename # /dir/parser/pypi/django.py ) if os.path.isfile(path): module_name = "parser.{vendor}.{name}".format(vendor=vendor, name=name) module = imp.load_source(module_name, path) functions = dict( (function, getattr(module, function, None)) for function in ALLOWED_CUSTOM_FUNCTIONS if hasattr(module, function) ) return functions
Loads custom functions from custom/{vendor}/{name}.py. This allows to quickly override any function that is used to retrieve and parse the changelog. :param name: str, package name :param vendor: str, vendor :return: dict, functions
def check_for_launchpad(old_vendor, name, urls): if old_vendor != "pypi": # XXX This might work for other starting vendors # XXX but I didn't check. For now only allow # XXX pypi -> launchpad. return '' for url in urls: try: return re.match(r"https?://launchpad.net/([\w.\-]+)", url).groups()[0] except AttributeError: continue return ''
Check if the project is hosted on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: the name of the project on launchpad, or an empty string.
def check_switch_vendor(old_vendor, name, urls, _depth=0): if _depth > 3: # Protect against recursive things vendors here. return "" new_name = check_for_launchpad(old_vendor, name, urls) if new_name: return "launchpad", new_name return "", ""
Check if the project should switch vendors. E.g project pushed on pypi, but changelog on launchpad. :param name: str, name of the project :param urls: set, urls to check. :return: tuple, (str(new vendor name), str(new project name))
def get(name, vendor="pypi", functions={}, _depth=0): fns = _bootstrap_functions(name=name, vendor=vendor, functions=functions) session = Session() # get meta data for the given package and use this metadata to # find urls pointing to a possible changelog data = fns["get_metadata"](session=session, name=name) releases = fns["get_releases"](name=name, data=data) urls, repos = fns["get_urls"]( session=session, name=name, data=data, releases=releases, find_changelogs_fn=fns["find_changelogs"] ) # load the content from the given urls and parse the changelog content = fns["get_content"](session=session, urls=urls) changelog = fns["parse"]( name=name, content=content, releases=releases, get_head_fn=fns["get_head"] ) del fns if changelog: return changelog # We could not find any changelogs. # Check to see if we can switch vendors. new_vendor, new_name = check_switch_vendor(vendor, name, repos, _depth=_depth) if new_vendor and new_vendor != vendor: return get(new_name, vendor=new_vendor, functions=functions, _depth=_depth+1) return {}
Tries to find a changelog for the given package. :param name: str, package name :param vendor: str, vendor :param functions: dict, custom functions :return: dict, changelog
def get_commit_log(name, vendor='pypi', functions={}, _depth=0): if "find_changelogs" not in functions: from .finder import find_git_repo functions["find_changelogs"] = find_git_repo if "get_content" not in functions: functions["get_content"] = clone_repo if "parse" not in functions: from .parser import parse_commit_log functions["parse"] = parse_commit_log return get( name=name, vendor=vendor, functions=functions )
Tries to parse a changelog from the raw commit log. :param name: str, package name :param vendor: str, vendor :param functions: dict, custom functions :return: tuple, (dict -> commit log, str -> raw git log)
def get_content(session, urls): content = "" for url in urls: try: logger.debug("GET changelog from {url}".format(url=url)) if "https://api.github.com" in url and url.endswith("releases"): # this is a github API release page, fetch it if token is set if not GITHUB_API_TOKEN: logger.warning("Fetching release pages requires CHANGELOGS_GITHUB_API_TOKEN " "to be set") continue resp = session.get(url, headers={ "Authorization": "token {}".format(GITHUB_API_TOKEN) }) if resp.status_code == 200: for item in resp.json(): content += "\n\n{}\n{}".format(item['tag_name'], item["body"]) else: resp = session.get(url) if resp.status_code == 200: content += "\n\n" + resp.text except requests.ConnectionError: pass return content
Loads the content from URLs, ignoring connection errors. :param session: requests Session instance :param urls: list, str URLs :return: str, content
def clone_repo(session, urls): repos = [] for url in urls: dir = mkdtemp() call = ["git", "clone", url, dir] subprocess.call(call) repos.append((dir, url)) return repos
Clones the given repos in temp directories :param session: requests Session instance :param urls: list, str URLs :return: tuple, (str -> directory, str -> URL)
def unknown(*args, **kwargs): name = kwargs.get('name', '') return "%s(%s)" % (name, ', '.join(str(a) for a in args))
Unknow scss function handler. Simple return 'funcname(args)'
def check_pil(func): def __wrapper(*args, **kwargs): root = kwargs.get('root') if not Image: if root and root.get_opt('warn'): warn("Images manipulation require PIL") return 'none' return func(*args, **kwargs) return __wrapper
PIL module checking decorator.
def _rgba(r, g, b, a, **kwargs): return ColorValue((float(r), float(g), float(b), float(a)))
Converts an rgba(red, green, blue, alpha) quadruplet into a color.
def _mix(color1, color2, weight=0.5, **kwargs): weight = float(weight) c1 = color1.value c2 = color2.value p = 0.0 if weight < 0 else 1.0 if weight > 1 else weight w = p * 2 - 1 a = c1[3] - c2[3] w1 = ((w if (w * a == -1) else (w + a) / (1 + w * a)) + 1) / 2.0 w2 = 1 - w1 q = [w1, w1, w1, p] r = [w2, w2, w2, 1 - p] return ColorValue([c1[i] * q[i] + c2[i] * r[i] for i in range(4)])
Mixes two colors together.
def _hsla(h, s, l, a, **kwargs): res = colorsys.hls_to_rgb(float(h), float(l), float(s)) return ColorValue([x * 255.0 for x in res] + [float(a)])
HSL with alpha channel color value.
def _hue(color, **kwargs): h = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[0] return NumberValue(h * 360.0)
Get hue value of HSL color.
def _lightness(color, **kwargs): l = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[1] return NumberValue((l * 100, '%'))
Get lightness value of HSL color.
def _saturation(color, **kwargs): s = colorsys.rgb_to_hls(*[x / 255.0 for x in color.value[:3]])[2] return NumberValue((s * 100, '%'))
Get saturation value of HSL color.
def _invert(color, **kwargs): col = ColorValue(color) args = [ 255.0 - col.value[0], 255.0 - col.value[1], 255.0 - col.value[2], col.value[3], ] inverted = ColorValue(args) return inverted
Returns the inverse (negative) of a color. The red, green, and blue values are inverted, while the opacity is left alone.
def load(path, cache=None, precache=False): parser = Stylesheet(cache) return parser.load(path, precache=precache)
Parse from file.
def parse(self, target): if isinstance(target, ContentNode): if target.name: self.parent = target self.name.parse(self) self.name += target.name target.ruleset.append(self) self.root.cache['rset'][str(self.name).split()[0]].add(self) super(Ruleset, self).parse(target)
Parse nested rulesets and save it in cache.
def parse(self, target): if not isinstance(target, Node): parent = ContentNode(None, None, []) parent.parse(target) target = parent super(Declaration, self).parse(target) self.name = str(self.data[0]) while isinstance(target, Declaration): self.name = '-'.join((str(target.data[0]), self.name)) target = target.parent self.expr = ' '.join(str (n) for n in self.data [2:] if not isinstance(n, Declaration)) if self.expr: target.declareset.append(self)
Parse nested declaration.
def parse(self, target): super(VarDefinition, self).parse(target) if isinstance(self.parent, ParseNode): self.parent.ctx.update({self.name: self.expression.value}) self.root.set_var(self)
Update root and parent context.
def set_var(self, vardef): if not(vardef.default and self.cache['ctx'].get(vardef.name)): self.cache['ctx'][vardef.name] = vardef.expression.value
Set variable to global stylesheet context.
def set_opt(self, name, value): self.cache['opts'][name] = value if name == 'compress': self.cache['delims'] = self.def_delims if not value else ( '', '', '')
Set option.
def update(self, cache): self.cache['delims'] = cache.get('delims') self.cache['opts'].update(cache.get('opts')) self.cache['rset'].update(cache.get('rset')) self.cache['mix'].update(cache.get('mix')) map(self.set_var, cache['ctx'].values())
Update self cache from other.
def scan(src): assert isinstance(src, (unicode_, bytes_)) try: nodes = STYLESHEET.parseString(src, parseAll=True) return nodes except ParseBaseException: err = sys.exc_info()[1] print(err.line, file=sys.stderr) print(" " * (err.column - 1) + "^", file=sys.stderr) print(err, file=sys.stderr) sys.exit(1)
Scan scss from string and return nodes.
def loads(self, src): assert isinstance(src, (unicode_, bytes_)) nodes = self.scan(src.strip()) self.parse(nodes) return ''.join(map(str, nodes))
Compile css from scss string.
def load(self, f, precache=None): precache = precache or self.get_opt('cache') or False nodes = None if isinstance(f, file_): path = os.path.abspath(f.name) else: path = os.path.abspath(f) f = open(f) cache_path = os.path.splitext(path)[0] + '.ccss' if precache and os.path.exists(cache_path): ptime = os.path.getmtime(cache_path) ttime = os.path.getmtime(path) if ptime > ttime: dump = open(cache_path, 'rb').read() nodes = pickle.loads(dump) if not nodes: src = f.read() nodes = self.scan(src.strip()) if precache: f = open(cache_path, 'wb') pickle.dump(nodes, f, protocol=1) self.parse(nodes) return ''.join(map(str, nodes))
Compile scss from file. File is string path of file object.
def load_config(filename, filepath=''): FILE = path.join(filepath, filename) try: cfg.read(FILE) global _loaded _loaded = True except: print("configfile not found.")
Loads config file Parameters ---------- filename: str Filename of config file (incl. file extension filepath: str Absolute path to directory of desired config file
def get_metadata(session, name): resp = session.get( "https://api.launchpad.net/1.0/{}/releases".format(name)) if resp.status_code == 200: return resp.json() return {}
Gets meta data from launchpad for the given package. :param session: requests Session instance :param name: str, package :return: dict, meta data
def get_content(session, urls): for url in urls: resp = session.get(url) if resp.ok: return resp.json() return {}
Loads the content from URLs, ignoring connection errors. :param session: requests Session instance :param urls: list, str URLs :return: str, content
def parse(name, content, releases, get_head_fn): try: return {e["version"]: e["changelog"] for e in content["entries"] if e["changelog"]} except KeyError: return {}
Parses the given content for a valid changelog :param name: str, package name :param content: str, content :param releases: list, releases :param get_head_fn: function :return: dict, changelog
def init_app(self, app, add_context_processor=True): # Check if login manager has been initialized if not hasattr(app, 'login_manager'): self.login_manager.init_app( app, add_context_processor=add_context_processor) # Clear flashed messages since we redirect to auth immediately self.login_manager.login_message = None self.login_manager.needs_refresh_message = None # Set default unauthorized callback self.login_manager.unauthorized_handler(self.unauthorized_callback)
Initialize with app configuration
def login_url(self, params=None, **kwargs): kwargs.setdefault('response_type', 'code') kwargs.setdefault('access_type', 'online') if 'prompt' not in kwargs: kwargs.setdefault('approval_prompt', 'auto') scopes = kwargs.pop('scopes', self.scopes.split(',')) if USERINFO_PROFILE_SCOPE not in scopes: scopes.append(USERINFO_PROFILE_SCOPE) redirect_uri = kwargs.pop('redirect_uri', self.redirect_uri) state = self.sign_params(params or {}) return GOOGLE_OAUTH2_AUTH_URL + '?' + urlencode( dict(client_id=self.client_id, scope=' '.join(scopes), redirect_uri=redirect_uri, state=state, **kwargs))
Return login url with params encoded in state Available Google auth server params: response_type: code, token prompt: none, select_account, consent approval_prompt: force, auto access_type: online, offline scopes: string (separated with commas) or list redirect_uri: string login_hint: string
def unauthorized_callback(self): return redirect(self.login_url(params=dict(next=request.url)))
Redirect to login url with next param set as request.url
def exchange_code(self, code, redirect_uri): token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict( code=code, redirect_uri=redirect_uri, grant_type='authorization_code', client_id=self.client_id, client_secret=self.client_secret, )).json() if not token or token.get('error'): abort(400) return token
Exchanges code for token/s
def get_access_token(self, refresh_token): token = requests.post(GOOGLE_OAUTH2_TOKEN_URL, data=dict( refresh_token=refresh_token, grant_type='refresh_token', client_id=self.client_id, client_secret=self.client_secret, )).json() if not token or token.get('error'): return return token
Use a refresh token to obtain a new access token
def oauth2callback(self, view_func): @wraps(view_func) def decorated(*args, **kwargs): params = {} # Check sig if 'state' in request.args: params.update(**self.parse_state(request.args.get('state'))) if params.pop('sig', None) != make_secure_token(**params): return self.login_manager.unauthorized() code = request.args.get('code') # Web server flow if code: token = self.exchange_code( code, url_for( request.endpoint, _external=True, _scheme=self.redirect_scheme, ), ) userinfo = self.get_userinfo(token['access_token']) params.update(token=token, userinfo=userinfo) # Browser flow else: if params: params.update(dict(request.args.items())) else: return ''' <script> window.onload = function() { location.href = '?' + window.location.hash.substr(1); }; </script> ''' return view_func(**params) return decorated
Decorator for OAuth2 callback. Calls `GoogleLogin.login` then passes results to `view_func`.
def parse(name, content, releases, get_head_fn): changelog = {} releases = frozenset(releases) head = False date_line = None for line in content.splitlines(): if DATE_RE.match(line): date_line = line continue if line.strip().startswith("PyAudio"): try: head = line.strip().split()[1] except: continue changelog[head] = date_line + "\n" continue if not head: continue line = line.replace("@", "") line = line.replace("#", "") changelog[head] += line + "\n" return changelog
Parses the given content for a valid changelog :param name: str, package name :param content: str, content :param releases: list, releases :param get_head_fn: function :return: dict, changelog
def validate_url(url): if validators.url(url): return url elif validators.domain(url): return "http://{}".format(url) return ""
Validates the URL :param url: :return:
def validate_repo_url(url): try: if "github.com" in url: return re.findall(r"https?://w?w?w?.?github.com/[\w\-]+/[\w.-]+", url)[0] elif "bitbucket.org" in url: return re.findall(r"https?://bitbucket.org/[\w.-]+/[\w.-]+", url)[0] + "/src/" elif "launchpad.net" in url: return re.findall(r"https?://launchpad.net/[\w.-]+", url)[0] elif "sourceforge.net" in url: mo = re.match(r"https?://sourceforge.net/projects/" r"([\w.-]+)/", url, re.I) template = "https://sourceforge.net/p/{}/code/HEAD/tree/trunk/src/" return template.format(mo.groups()[0]) except (IndexError, AttributeError): pass return ""
Validates and formats `url` to be valid URL pointing to a repo on bitbucket.org or github.com :param url: str, URL :return: str, valid URL if valid repo, emptry string otherwise
def contains_project_name(name, link): def unclutter(string): # strip out all python references and remove all excessive characters string = string.lower().replace("_", "-").replace(".", "-") for replace in ["python-", "py-", "-py", "-python"]: string = string.replace(replace, "") return re.sub("[^0123456789 a-zA-Z]", "", string).strip() return unclutter(name) in unclutter(link)
Checks if the given link `somewhat` contains the project name. :param name: str, project name :param link: str, link :return: bool, True if the link contains the project name
def find_repo_urls(session, name, candidates): for _url in candidates: if validate_url(_url): try: resp = session.get(_url) if resp.status_code == 200: tree = etree.HTML(resp.content) if tree: for link in frozenset([str(l) for l in tree.xpath("//a/@href")]): # check if the link 1) is to github.com / bitbucket.org AND 2) somewhat # contains the project name if ("github.com" in link or "bitbucket.org" in link or "sourceforge.net" in link) \ and contains_project_name(name, link): link = validate_url(validate_repo_url(url=link)) if link: logger.debug("Found repo URL {}".format(link)) yield link except ConnectionError: # we really don't care about connection errors here. a lot of project pages are simply # down because the project is no longer maintained pass except etree.XMLSyntaxError: # unable to parse HTML pass except UnicodeEncodeError: pass
Visits the given URL candidates and searches the page for valid links to a repository. :param session: requests Session instance :param name: str, project name :param candidates: list, list of URL candidates :return: str, URL to a repo
def filter_repo_urls(candidates): # first, we are going to filter down the URL candidates to be all valid urls candidates = set(url for url in [validate_url(_url) for _url in candidates] if url) logger.info("Got repo candidates {}".format(candidates)) repos = set(url for url in [validate_repo_url(_url) for _url in candidates] if url) logger.info("Filtered initial candidates down to {}".format(repos)) return repos
Filters down a list of URL candidates :param candidates: list, URL candidates :return: set, Repo URLs