docstring
stringlengths
52
499
function
stringlengths
67
35.2k
__index_level_0__
int64
52.6k
1.16M
Set attributes to scroll the buffer correctly. Args: lines: An int, number of lines to scroll. If None, scrolls by the terminal length.
def _Scroll(self, lines=None): if lines is None: lines = self._cli_lines if lines < 0: self._displayed -= self._cli_lines self._displayed += lines if self._displayed < 0: self._displayed = 0 self._lines_to_show = self._cli_lines else: self._lines_to_show = lines self._lastscroll = lines
222,064
Embeds scikit-plot instance methods in an sklearn classifier. Args: clf: Scikit-learn classifier instance Returns: The same scikit-learn classifier instance passed in **clf** with embedded scikit-plot instance methods. Raises: ValueError: If **clf** does not contain the instance methods necessary for scikit-plot instance methods.
def classifier_factory(clf): required_methods = ['fit', 'score', 'predict'] for method in required_methods: if not hasattr(clf, method): raise TypeError('"{}" is not in clf. Did you pass a ' 'classifier instance?'.format(method)) optional_methods = ['predict_proba'] for method in optional_methods: if not hasattr(clf, method): warnings.warn('{} not in clf. Some plots may ' 'not be possible to generate.'.format(method)) additional_methods = { 'plot_learning_curve': plot_learning_curve, 'plot_confusion_matrix': plot_confusion_matrix_with_cv, 'plot_roc_curve': plot_roc_curve_with_cv, 'plot_ks_statistic': plot_ks_statistic_with_cv, 'plot_precision_recall_curve': plot_precision_recall_curve_with_cv, 'plot_feature_importances': plot_feature_importances } for key, fn in six.iteritems(additional_methods): if hasattr(clf, key): warnings.warn('"{}" method already in clf. ' 'Overriding anyway. This may ' 'result in unintended behavior.'.format(key)) setattr(clf, key, types.MethodType(fn, clf)) return clf
222,307
Embeds scikit-plot plotting methods in an sklearn clusterer instance. Args: clf: Scikit-learn clusterer instance Returns: The same scikit-learn clusterer instance passed in **clf** with embedded scikit-plot instance methods. Raises: ValueError: If **clf** does not contain the instance methods necessary for scikit-plot instance methods.
def clustering_factory(clf): required_methods = ['fit', 'fit_predict'] for method in required_methods: if not hasattr(clf, method): raise TypeError('"{}" is not in clf. Did you ' 'pass a clusterer instance?'.format(method)) additional_methods = { 'plot_silhouette': plot_silhouette, 'plot_elbow_curve': plot_elbow_curve } for key, fn in six.iteritems(additional_methods): if hasattr(clf, key): warnings.warn('"{}" method already in clf. ' 'Overriding anyway. This may ' 'result in unintended behavior.'.format(key)) setattr(clf, key, types.MethodType(fn, clf)) return clf
222,325
Generator for exponential decay. Args: base: The mathematical base of the exponentiation operation factor: Factor to multiply the exponentation by. max_value: The maximum value to yield. Once the value in the true exponential sequence exceeds this, the value of max_value will forever after be yielded.
def expo(base=2, factor=1, max_value=None): n = 0 while True: a = factor * base ** n if max_value is None or a < max_value: yield a n += 1 else: yield max_value
224,699
Generator for fibonaccial decay. Args: max_value: The maximum value to yield. Once the value in the true fibonacci sequence exceeds this, the value of max_value will forever after be yielded.
def fibo(max_value=None): a = 1 b = 1 while True: if max_value is None or a < max_value: yield a a, b = b, a + b else: yield max_value
224,700
Generator for constant intervals. Args: interval: A constant value to yield or an iterable of such values.
def constant(interval=1): try: itr = iter(interval) except TypeError: itr = itertools.repeat(interval) for val in itr: yield val
224,701
_filename is not used Args: _filename(string)
def output(self, _filename): for contract in self.slither.contracts_derived: txt = "\nContract %s"%contract.name table = PrettyTable(["Function", "Modifiers"]) for function in contract.functions: modifiers = function.modifiers for call in function.all_internal_calls(): if isinstance(call, Function): modifiers += call.modifiers for (_, call) in function.all_library_calls(): if isinstance(call, Function): modifiers += call.modifiers table.add_row([function.name, [m.name for m in set(modifiers)]]) txt += "\n"+str(table) self.info(txt)
224,743
_filename is not used Args: _filename(string)
def output(self, _filename): for contract in self.slither.contracts_derived: txt = "\nContract %s"%contract.name table = PrettyTable(["Function", "require or assert"]) for function in contract.functions: require = function.all_slithir_operations() require = [ir for ir in require if isinstance(ir, SolidityCall) and ir.function in require_or_assert] require = [ir.node for ir in require] table.add_row([function.name, self._convert([str(m.expression) for m in set(require)])]) txt += "\n"+str(table) self.info(txt)
224,756
Return the nodes where the return value of a call is unused Args: f (Function) Returns: list(Node)
def detect_unused_return_values(self, f): values_returned = [] nodes_origin = {} for n in f.nodes: for ir in n.irs: if isinstance(ir, HighLevelCall): # if a return value is stored in a state variable, it's ok if ir.lvalue and not isinstance(ir.lvalue, StateVariable): values_returned.append(ir.lvalue) nodes_origin[ir.lvalue] = ir for read in ir.read: if read in values_returned: values_returned.remove(read) return [nodes_origin[value].node for value in values_returned]
224,757
Check if the code is complex Heuristic, the code is complex if: - One function has a cyclomatic complexity > 7 Args: contract
def is_complex_code(self, contract): is_complex = self._is_complex_code(contract) result = red('Yes') if is_complex else green('No') return "\tComplex code? {}\n".format(result)
224,763
_filename is not used Args: _filename(string)
def output(self, _filename): txt = "Analyze of {}\n".format(self.slither.filename) txt += self.get_detectors_result() for contract in self.slither.contracts_derived: txt += "\nContract {}\n".format(contract.name) txt += self.is_complex_code(contract) is_erc20 = contract.is_erc20() txt += '\tNumber of functions:{}'.format(self._number_functions(contract)) txt += "\tIs ERC20 token: {}\n".format(contract.is_erc20()) if is_erc20: txt += self.get_summary_erc20(contract) self.info(txt)
224,764
Output the graph in filename Args: filename(string)
def output(self, filename): if filename == '': filename = 'contracts.dot' if not filename.endswith('.dot'): filename += ".dot" info = 'Inheritance Graph: ' + filename self.info(info) with open(filename, 'w', encoding='utf8') as f: f.write('digraph "" {\n') for c in self.contracts: f.write(self._summary(c)) f.write('}')
224,771
_filename is not used Args: _filename(string)
def output(self, _filename): txt = "" for c in self.contracts: (name, _inheritance, _var, func_summaries, _modif_summaries) = c.get_summary() txt += blue("\n+ Contract %s\n"%name) # (c_name, f_name, visi, _, _, _, _, _) in func_summaries public = [(elem[0], (elem[1], elem[2]) ) for elem in func_summaries] collect = collections.defaultdict(list) for a,b in public: collect[a].append(b) public = list(collect.items()) for contract, functions in public: txt += blue(" - From {}\n".format(contract)) functions = sorted(functions) for (function, visi) in functions: if visi in ['external', 'public']: txt += green(" - {} ({})\n".format(function, visi)) for (function, visi) in functions: if visi in ['internal', 'private']: txt += magenta(" - {} ({})\n".format(function, visi)) for (function, visi) in functions: if visi not in ['external', 'public', 'internal', 'private']: txt += " - {}  ({})\n".format(function, visi) self.info(txt)
224,773
_filename is not used Args: _filename(string)
def output(self, _filename): txt = '' for contract in self.slither.contracts_derived: txt += '\n{}:\n'.format(contract.name) table = PrettyTable(['Name', 'ID']) for function in contract.functions: if function.visibility in ['public', 'external']: table.add_row([function.full_name, hex(get_function_id(function.full_name))]) for variable in contract.state_variables: if variable.visibility in ['public']: variable_getter_args = "" if type(variable.type) is ArrayType: length = 0 v = variable while type(v.type) is ArrayType: length += 1 v = v.type variable_getter_args = ','.join(["uint256"]*length) elif type(variable.type) is MappingType: variable_getter_args = variable.type.type_from table.add_row([f"{variable.name}({variable_getter_args})", hex(get_function_id(f"{variable.name}({variable_getter_args})"))]) txt += str(table) + '\n' self.info(txt)
224,788
_filename is not used Args: _filename(string)
def output(self, _filename): for c in self.contracts: (name, inheritance, var, func_summaries, modif_summaries) = c.get_summary() txt = "\nContract %s"%name txt += '\nContract vars: '+str(var) txt += '\nInheritance:: '+str(inheritance) table = PrettyTable(["Function", "Visibility", "Modifiers", "Read", "Write", "Internal Calls", "External Calls"]) for (_c_name, f_name, visi, modifiers, read, write, internal_calls, external_calls) in func_summaries: read = self._convert(read) write = self._convert(write) internal_calls = self._convert(internal_calls) external_calls = self._convert(external_calls) table.add_row([f_name, visi, modifiers, read, write, internal_calls, external_calls]) txt += "\n \n"+str(table) table = PrettyTable(["Modifiers", "Visibility", "Read", "Write", "Internal Calls", "External Calls"]) for (_c_name, f_name, visi, _, read, write, internal_calls, external_calls) in modif_summaries: read = self._convert(read) write = self._convert(write) internal_calls = self._convert(internal_calls) external_calls = self._convert(external_calls) table.add_row([f_name, visi, read, write, internal_calls, external_calls]) txt += "\n\n"+str(table) txt += "\n" self.info(txt)
224,798
Return the function id of the given signature Args: sig (str) Return: (int)
def get_function_id(sig): s = sha3.keccak_256() s.update(sig.encode('utf-8')) return int("0x" + s.hexdigest()[:8], 16)
224,799
Output the graph in filename Args: filename(string)
def output(self, filename): if not filename.endswith('.dot'): filename += '.dot' if filename == ".dot": filename = "all_contracts.dot" with open(filename, 'w', encoding='utf8') as f: self.info(f'Call Graph: {filename}') f.write('\n'.join(['strict digraph {'] + [self._process_functions(self.slither.functions)] + ['}'])) for derived_contract in self.slither.contracts_derived: with open(f'{derived_contract.name}.dot', 'w', encoding='utf8') as f: self.info(f'Call Graph: {derived_contract.name}.dot') f.write('\n'.join(['strict digraph {'] + [self._process_functions(derived_contract.functions)] + ['}']))
224,853
_filename is not used Args: _filename(string)
def output(self, _filename): txt = "" for contract in self.contracts: print('Contract {}'.format(contract.name)) for function in contract.functions: if function.contract == contract: print('\tFunction {}'.format(function.full_name)) for node in function.nodes: if node.expression: print('\t\tExpression: {}'.format(node.expression)) print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) elif node.irs: print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) for modifier in contract.modifiers: if modifier.contract == contract: print('\tModifier {}'.format(modifier.full_name)) for node in modifier.nodes: print(node) if node.expression: print('\t\tExpression: {}'.format(node.expression)) print('\t\tIRs:') for ir in node.irs: print('\t\t\t{}'.format(ir)) self.info(txt)
224,857
_filename is not used Args: _filename(string)
def output(self, _filename): txt = '' for c in self.contracts: txt += "\nContract %s\n"%c.name table = PrettyTable(['Variable', 'Dependencies']) for v in c.state_variables: table.add_row([v.name, _get(v, c)]) txt += str(table) txt += "\n" for f in c.functions_and_modifiers_not_inherited: txt += "\nFunction %s\n"%f.full_name table = PrettyTable(['Variable', 'Dependencies']) for v in f.variables: table.add_row([v.name, _get(v, f)]) for v in c.state_variables: table.add_row([v.canonical_name, _get(v, f)]) txt += str(table) self.info(txt)
224,916
_filename is not used Args: _filename(string)
def output(self, original_filename): for contract in self.contracts: for function in contract.functions + contract.modifiers: filename = "{}-{}-{}.dot".format(original_filename, contract.name, function.full_name) self.info('Export {}'.format(filename)) function.slithir_cfg_to_dot(filename)
224,937
Add SSA version of the IR Args: function all_state_variables_instances
def add_ssa_ir(function, all_state_variables_instances): if not function.is_implemented: return init_definition = dict() for v in function.parameters: if v.name: init_definition[v.name] = (v, function.entry_point) function.entry_point.add_ssa_ir(Phi(LocalIRVariable(v), set())) for v in function.returns: if v.name: init_definition[v.name] = (v, function.entry_point) # We only add phi function for state variable at entry node if # The state variable is used # And if the state variables is written in another function (otherwise its stay at index 0) for (_, variable_instance) in all_state_variables_instances.items(): if is_used_later(function.entry_point, variable_instance): # rvalues are fixed in solc_parsing.declaration.function function.entry_point.add_ssa_ir(Phi(StateIRVariable(variable_instance), set())) add_phi_origins(function.entry_point, init_definition, dict()) for node in function.nodes: for (variable, nodes) in node.phi_origins_local_variables.values(): if len(nodes)<2: continue if not is_used_later(node, variable): continue node.add_ssa_ir(Phi(LocalIRVariable(variable), nodes)) for (variable, nodes) in node.phi_origins_state_variables.values(): if len(nodes)<2: continue #if not is_used_later(node, variable.name, []): # continue node.add_ssa_ir(Phi(StateIRVariable(variable), nodes)) init_local_variables_instances = dict() for v in function.parameters: if v.name: new_var = LocalIRVariable(v) function.add_parameter_ssa(new_var) if new_var.is_storage: fake_variable = LocalIRVariable(v) fake_variable.name = 'STORAGE_'+fake_variable.name fake_variable.set_location('reference_to_storage') new_var.refers_to = {fake_variable} init_local_variables_instances[fake_variable.name] = fake_variable init_local_variables_instances[v.name] = new_var for v in function.returns: if v.name: new_var = LocalIRVariable(v) function.add_return_ssa(new_var) if new_var.is_storage: fake_variable = LocalIRVariable(v) fake_variable.name = 'STORAGE_'+fake_variable.name fake_variable.set_location('reference_to_storage') new_var.refers_to = {fake_variable} init_local_variables_instances[fake_variable.name] = fake_variable init_local_variables_instances[v.name] = new_var all_init_local_variables_instances = dict(init_local_variables_instances) init_state_variables_instances = dict(all_state_variables_instances) initiate_all_local_variables_instances(function.nodes, init_local_variables_instances, all_init_local_variables_instances) generate_ssa_irs(function.entry_point, dict(init_local_variables_instances), all_init_local_variables_instances, dict(init_state_variables_instances), all_state_variables_instances, init_local_variables_instances, []) fix_phi_rvalues_and_storage_ref(function.entry_point, dict(init_local_variables_instances), all_init_local_variables_instances, dict(init_state_variables_instances), all_state_variables_instances, init_local_variables_instances)
224,984
Detect arbitrary send Args: contract (Contract) Returns: list((Function), (list (Node)))
def detect_arbitrary_send(self, contract): ret = [] for f in [f for f in contract.functions if f.contract == contract]: nodes = self.arbitrary_send(f) if nodes: ret.append((f, nodes)) return ret
225,005
Return a contract from a name Args: contract_name (str): name of the contract Returns: Contract
def get_contract_from_name(self, contract_name): return next((c for c in self.contracts if c.name == contract_name), None)
225,026
Output the inheritance relation _filename is not used Args: _filename(string)
def output(self, filename): info = 'Inheritance\n' if not self.contracts: return info += blue('Child_Contract -> ') + green('Immediate_Base_Contracts') info += green(' [Not_Immediate_Base_Contracts]') for child in self.contracts: info += blue(f'\n+ {child.name}') if child.inheritance: immediate = child.immediate_inheritance not_immediate = [i for i in child.inheritance if i not in immediate] info += ' -> ' + green(", ".join(map(str, immediate))) if not_immediate: info += ", ["+ green(", ".join(map(str, not_immediate))) + "]" info += green('\n\nBase_Contract -> ') + blue('Immediate_Child_Contracts') info += blue(' [Not_Immediate_Child_Contracts]') for base in self.contracts: info += green(f'\n+ {base.name}') children = list(self._get_child_contracts(base)) if children: immediate = [child for child in children if base in child.immediate_inheritance] not_immediate = [child for child in children if not child in immediate] info += ' -> ' + blue(", ".join(map(str, immediate))) if not_immediate: info += ', [' + blue(", ".join(map(str, not_immediate))) + ']' self.info(info)
225,049
_filename is not used Args: _filename(string)
def output(self, _filename): for contract in self.contracts: txt = "\nContract %s\n"%contract.name table = PrettyTable(["Function", "State variables written", "Conditions on msg.sender"]) for function in contract.functions: state_variables_written = [v.name for v in function.all_state_variables_written()] msg_sender_condition = self.get_msg_sender_checks(function) table.add_row([function.name, str(state_variables_written), str(msg_sender_condition)]) self.info(txt + str(table))
225,132
Remove the father node. Do nothing if the node is not a father Args: fathers: list of fathers to add
def remove_father(self, father): self._fathers = [x for x in self._fathers if x.node_id != father.node_id]
225,140
Remove the son node. Do nothing if the node is not a son Args: fathers: list of fathers to add
def remove_son(self, son): self._sons = [x for x in self._sons if x.node_id != son.node_id]
225,141
Compute the number of edges of the CFG Args: function (core.declarations.function.Function) Returns: int
def compute_number_edges(function): n = 0 for node in function.nodes: n += len(node.sons) return n
225,176
Compute strongly connected components Based on Kosaraju algo Implem follows wikipedia algo: https://en.wikipedia.org/wiki/Kosaraju%27s_algorithm#The_algorithm Args: function (core.declarations.function.Function) Returns: list(list(nodes))
def compute_strongly_connected_components(function): visited = {n:False for n in function.nodes} assigned = {n:False for n in function.nodes} components = [] l = [] def visit(node): if not visited[node]: visited[node] = True for son in node.sons: visit(son) l.append(node) for n in function.nodes: visit(n) def assign(node, root): if not assigned[node]: assigned[node] = True root.append(node) for father in node.fathers: assign(father, root) for n in l: component = [] assign(n, component) if component: components.append(component) return components
225,177
Compute the cyclomatic complexity of a function Args: function (core.declarations.function.Function) Returns: int
def compute_cyclomatic_complexity(function): # from https://en.wikipedia.org/wiki/Cyclomatic_complexity # M = E - N + 2P # where M is the complexity # E number of edges # N number of nodes # P number of connected components E = compute_number_edges(function) N = len(function.nodes) P = len(compute_strongly_connected_components(function)) return E - N + 2 * P
225,178
_filename is not used Args: _filename(string)
def output(self, _filename): txt = '' for contract in self.slither.contracts_derived: txt += '\n{}:\n'.format(contract.name) table = PrettyTable(['Name', 'Type']) for variable in contract.state_variables: if not variable.is_constant: table.add_row([variable.name, str(variable.type)]) txt += str(table) + '\n' self.info(txt)
225,184
Return the source mapping where the variable is declared Args: var (str): variable name Returns: (dict): sourceMapping
def get_source_var_declaration(self, var): return next((x.source_mapping for x in self.variables if x.name == var))
225,199
Return the source mapping where the event is declared Args: event (str): event name Returns: (dict): sourceMapping
def get_source_event_declaration(self, event): return next((x.source_mapping for x in self.events if x.name == event))
225,200
Return a function from a signature Args: function_signature (str): signature of the function (without return statement) Returns: Function
def get_function_from_signature(self, function_signature): return next((f for f in self.functions if f.full_name == function_signature), None)
225,201
Return a modifier from a signature Args: modifier_name (str): signature of the modifier Returns: Modifier
def get_modifier_from_signature(self, modifier_signature): return next((m for m in self.modifiers if m.full_name == modifier_signature), None)
225,202
Return a state variable from a name Args: varible_name (str): name of the variable Returns: StateVariable
def get_state_variable_from_name(self, variable_name): return next((v for v in self.state_variables if v.name == variable_name), None)
225,203
Return a structure from a name Args: structure_name (str): name of the structure Returns: Structure
def get_structure_from_name(self, structure_name): return next((st for st in self.structures if st.name == structure_name), None)
225,204
Return a structure from a canonical name Args: structure_name (str): canonical name of the structure Returns: Structure
def get_structure_from_canonical_name(self, structure_name): return next((st for st in self.structures if st.canonical_name == structure_name), None)
225,205
Return an event from a name Args: event_name (str): name of the event Returns: Event
def get_event_from_name(self, event_name): return next((e for e in self.events if e.name == event_name), None)
225,206
Return an enum from a name Args: enum_name (str): name of the enum Returns: Enum
def get_enum_from_name(self, enum_name): return next((e for e in self.enums if e.name == enum_name), None)
225,207
Return an enum from a canonical name Args: enum_name (str): canonical name of the enum Returns: Enum
def get_enum_from_canonical_name(self, enum_name): return next((e for e in self.enums if e.canonical_name == enum_name), None)
225,208
Return the list of functions overriden by the function Args: (core.Function) Returns: list(core.Function)
def get_functions_overridden_by(self, function): candidates = [c.functions_not_inherited for c in self.inheritance] candidates = [candidate for sublist in candidates for candidate in sublist] return [f for f in candidates if f.full_name == function.full_name]
225,209
Return a list of potential signature It is a list, as Constant variables can be converted to int256 Args: ir (slithIR.operation) Returns: list(str)
def get_sig(ir, name): sig = '{}({})' # list of list of arguments argss = convert_arguments(ir.arguments) return [sig.format(name, ','.join(args)) for args in argss]
225,228
Apply a visitor to all the function expressions Args: Visitor: slither.visitors Returns list(): results of the visit
def apply_visitor(self, Visitor): expressions = self.expressions v = [Visitor(e).result() for e in expressions] return [item for sublist in v for item in sublist]
225,284
Return a local variable from a name Args: varible_name (str): name of the variable Returns: LocalVariable
def get_local_variable_from_name(self, variable_name): return next((v for v in self.variables if v.name == variable_name), None)
225,285
Export the function to a dot file Args: filename (str)
def cfg_to_dot(self, filename): with open(filename, 'w', encoding='utf8') as f: f.write('digraph{\n') for node in self.nodes: f.write('{}[label="{}"];\n'.format(node.node_id, str(node))) for son in node.sons: f.write('{}->{};\n'.format(node.node_id, son.node_id)) f.write("}\n")
225,286
Export the function to a dot file Args: filename (str)
def slithir_cfg_to_dot(self, filename): from slither.core.cfg.node import NodeType with open(filename, 'w', encoding='utf8') as f: f.write('digraph{\n') for node in self.nodes: label = 'Node Type: {} {}\n'.format(NodeType.str(node.type), node.node_id) if node.expression: label += '\nEXPRESSION:\n{}\n'.format(node.expression) if node.irs: label += '\nIRs:\n' + '\n'.join([str(ir) for ir in node.irs]) f.write('{}[label="{}"];\n'.format(node.node_id, label)) for son in node.sons: f.write('{}->{};\n'.format(node.node_id, son.node_id)) f.write("}\n")
225,287
Export the dominator tree of the function to a dot file Args: filename (str)
def dominator_tree_to_dot(self, filename): def description(node): desc ='{}\n'.format(node) desc += 'id: {}'.format(node.node_id) if node.dominance_frontier: desc += '\ndominance frontier: {}'.format([n.node_id for n in node.dominance_frontier]) return desc with open(filename, 'w', encoding='utf8') as f: f.write('digraph{\n') for node in self.nodes: f.write('{}[label="{}"];\n'.format(node.node_id, description(node))) if node.immediate_dominator: f.write('{}->{};\n'.format(node.immediate_dominator.node_id, node.node_id)) f.write("}\n")
225,288
Check if the function reads the variable in a IF node Args: variable (Variable): Returns: bool: True if the variable is read
def is_reading_in_conditional_node(self, variable): variables_read = [n.variables_read for n in self.nodes if n.contains_if()] variables_read = [item for sublist in variables_read for item in sublist] return variable in variables_read
225,289
Check if the function reads the variable in an require or assert Args: variable (Variable): Returns: bool: True if the variable is read
def is_reading_in_require_or_assert(self, variable): variables_read = [n.variables_read for n in self.nodes if n.contains_require_or_assert()] variables_read = [item for sublist in variables_read for item in sublist] return variable in variables_read
225,290
Reader for the MaxMind DB file format Arguments: database -- A path to a valid MaxMind DB file such as a GeoIP2 database file.
def __init__(self, database): with open(database, 'rb') as db_file: self._buffer = mmap.mmap( db_file.fileno(), 0, access=mmap.ACCESS_READ) metadata_start = self._buffer.rfind(self._METADATA_START_MARKER, self._buffer.size() - 128 * 1024) if metadata_start == -1: raise InvalidDatabaseError('Error opening database file ({0}). ' 'Is this a valid MaxMind DB file?' ''.format(database)) metadata_start += len(self._METADATA_START_MARKER) metadata_decoder = Decoder(self._buffer, metadata_start) (metadata, _) = metadata_decoder.decode(metadata_start) self._metadata = Metadata(**metadata) # pylint: disable=star-args self._decoder = Decoder(self._buffer, self._metadata.search_tree_size + self._DATA_SECTION_SEPARATOR_SIZE)
225,390
Return the record for the ip_address in the MaxMind DB Arguments: ip_address -- an IP address in the standard string notation
def get(self, ip_address): address = ipaddress.ip_address(ip_address) if address.version == 6 and self._metadata.ip_version == 4: raise ValueError('Error looking up {0}. You attempted to look up ' 'an IPv6 address in an IPv4-only database.'.format( ip_address)) pointer = self._find_address_in_tree(address) return self._resolve_data_pointer(pointer) if pointer else None
225,391
Created a Decoder for a MaxMind DB Arguments: database_buffer -- an mmap'd MaxMind DB file. pointer_base -- the base number to use when decoding a pointer pointer_test -- used for internal unit testing of pointer code
def __init__(self, database_buffer, pointer_base=0, pointer_test=False): self._pointer_test = pointer_test self._buffer = database_buffer self._pointer_base = pointer_base
225,566
Decode a section of the data section starting at offset Arguments: offset -- the location of the data structure to decode
def decode(self, offset): new_offset = offset + 1 (ctrl_byte,) = struct.unpack(b'!B', self._buffer[offset:new_offset]) type_num = ctrl_byte >> 5 # Extended type if not type_num: (type_num, new_offset) = self._read_extended(new_offset) (size, new_offset) = self._size_from_ctrl_byte( ctrl_byte, new_offset, type_num) return self._type_decoder[type_num](self, size, new_offset)
225,574
The binary representation of this address. Args: address: An integer representation of an IPv4 IP address. Returns: The binary representation of this address. Raises: ValueError: If the integer is too large to be an IPv4 IP address.
def v4_int_to_packed(address): if address > _BaseV4._ALL_ONES: raise ValueError('Address too large for IPv4') return Bytes(struct.pack('!I', address))
225,638
Get the number of leading bits that are same for two numbers. Args: number1: an integer. number2: another integer. bits: the maximum number of bits to compare. Returns: The number of leading bits that are the same for two numbers.
def _get_prefix_length(number1, number2, bits): for i in range(bits): if number1 >> i == number2 >> i: return bits - i return 0
225,639
Collapse a list of IP objects. Example: collapse_address_list([IPv4('1.1.0.0/24'), IPv4('1.1.1.0/24')]) -> [IPv4('1.1.0.0/23')] Args: addresses: A list of IPv4Network or IPv6Network objects. Returns: A list of IPv4Network or IPv6Network objects depending on what we were passed. Raises: TypeError: If passed a list of mixed version objects.
def collapse_address_list(addresses): i = 0 addrs = [] ips = [] nets = [] # split IP addresses and networks for ip in addresses: if isinstance(ip, _BaseIP): if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(ips[-1]))) ips.append(ip) elif ip._prefixlen == ip._max_prefixlen: if ips and ips[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(ips[-1]))) ips.append(ip.ip) else: if nets and nets[-1]._version != ip._version: raise TypeError("%s and %s are not of the same version" % ( str(ip), str(nets[-1]))) nets.append(ip) # sort and dedup ips = sorted(set(ips)) nets = sorted(set(nets)) while i < len(ips): (first, last) = _find_address_range(ips[i:]) i = ips.index(last) + 1 addrs.extend(summarize_address_range(first, last)) return _collapse_address_list_recursive(sorted( addrs + nets, key=_BaseNet._get_networks_key))
225,642
Return prefix length from a bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format. Returns: An integer, the prefix length. Raises: NetmaskValueError: If the input is not a valid netmask.
def _prefix_from_ip_int(self, ip_int): prefixlen = self._max_prefixlen while prefixlen: if ip_int & 1: break ip_int >>= 1 prefixlen -= 1 if ip_int == (1 << prefixlen) - 1: return prefixlen else: raise NetmaskValueError('Bit pattern does not match /1*0*/')
225,658
Turn a prefix length string into an integer. Args: prefixlen_str: A decimal string containing the prefix length. Returns: The prefix length as an integer. Raises: NetmaskValueError: If the input is malformed or out of range.
def _prefix_from_prefix_string(self, prefixlen_str): try: if not _BaseV4._DECIMAL_DIGITS.issuperset(prefixlen_str): raise ValueError prefixlen = int(prefixlen_str) if not (0 <= prefixlen <= self._max_prefixlen): raise ValueError except ValueError: raise NetmaskValueError('%s is not a valid prefix length' % prefixlen_str) return prefixlen
225,659
Turn a netmask/hostmask string into a prefix length. Args: ip_str: A netmask or hostmask, formatted as an IP address. Returns: The prefix length as an integer. Raises: NetmaskValueError: If the input is not a netmask or hostmask.
def _prefix_from_ip_string(self, ip_str): # Parse the netmask/hostmask like an IP address. try: ip_int = self._ip_int_from_string(ip_str) except AddressValueError: raise NetmaskValueError('%s is not a valid netmask' % ip_str) # Try matching a netmask (this would be /1*0*/ as a bitwise regexp). # Note that the two ambiguous cases (all-ones and all-zeroes) are # treated as netmasks. try: return self._prefix_from_ip_int(ip_int) except NetmaskValueError: pass # Invert the bits, and try matching a /0+1+/ hostmask instead. ip_int ^= self._ALL_ONES try: return self._prefix_from_ip_int(ip_int) except NetmaskValueError: raise NetmaskValueError('%s is not a valid netmask' % ip_str)
225,660
Turn the given IP string into an integer for comparison. Args: ip_str: A string, the IP ip_str. Returns: The IP ip_str as an integer. Raises: AddressValueError: if ip_str isn't a valid IPv4 Address.
def _ip_int_from_string(self, ip_str): octets = ip_str.split('.') if len(octets) != 4: raise AddressValueError(ip_str) packed_ip = 0 for oc in octets: try: packed_ip = (packed_ip << 8) | self._parse_octet(oc) except ValueError: raise AddressValueError(ip_str) return packed_ip
225,664
Convert a decimal octet into an integer. Args: octet_str: A string, the number to parse. Returns: The octet as an integer. Raises: ValueError: if the octet isn't strictly a decimal from [0..255].
def _parse_octet(self, octet_str): # Whitelist the characters, since int() allows a lot of bizarre stuff. if not self._DECIMAL_DIGITS.issuperset(octet_str): raise ValueError octet_int = int(octet_str, 10) # Disallow leading zeroes, because no clear standard exists on # whether these should be interpreted as decimal or octal. if octet_int > 255 or (octet_str[0] == '0' and len(octet_str) > 1): raise ValueError return octet_int
225,665
Turns a 32-bit integer into dotted decimal notation. Args: ip_int: An integer, the IP address. Returns: The IP address as a string in dotted decimal notation.
def _string_from_ip_int(self, ip_int): octets = [] for _ in xrange(4): octets.insert(0, str(ip_int & 0xFF)) ip_int >>= 8 return '.'.join(octets)
225,666
Turn an IPv6 ip_str into an integer. Args: ip_str: A string, the IPv6 ip_str. Returns: A long, the IPv6 ip_str. Raises: AddressValueError: if ip_str isn't a valid IPv6 Address.
def _ip_int_from_string(self, ip_str): parts = ip_str.split(':') # An IPv6 address needs at least 2 colons (3 parts). if len(parts) < 3: raise AddressValueError(ip_str) # If the address has an IPv4-style suffix, convert it to hexadecimal. if '.' in parts[-1]: ipv4_int = IPv4Address(parts.pop())._ip parts.append('%x' % ((ipv4_int >> 16) & 0xFFFF)) parts.append('%x' % (ipv4_int & 0xFFFF)) # An IPv6 address can't have more than 8 colons (9 parts). if len(parts) > self._HEXTET_COUNT + 1: raise AddressValueError(ip_str) # Disregarding the endpoints, find '::' with nothing in between. # This indicates that a run of zeroes has been skipped. try: skip_index, = ( [i for i in xrange(1, len(parts) - 1) if not parts[i]] or [None]) except ValueError: # Can't have more than one '::' raise AddressValueError(ip_str) # parts_hi is the number of parts to copy from above/before the '::' # parts_lo is the number of parts to copy from below/after the '::' if skip_index is not None: # If we found a '::', then check if it also covers the endpoints. parts_hi = skip_index parts_lo = len(parts) - skip_index - 1 if not parts[0]: parts_hi -= 1 if parts_hi: raise AddressValueError(ip_str) # ^: requires ^:: if not parts[-1]: parts_lo -= 1 if parts_lo: raise AddressValueError(ip_str) # :$ requires ::$ parts_skipped = self._HEXTET_COUNT - (parts_hi + parts_lo) if parts_skipped < 1: raise AddressValueError(ip_str) else: # Otherwise, allocate the entire address to parts_hi. The endpoints # could still be empty, but _parse_hextet() will check for that. if len(parts) != self._HEXTET_COUNT: raise AddressValueError(ip_str) parts_hi = len(parts) parts_lo = 0 parts_skipped = 0 try: # Now, parse the hextets into a 128-bit integer. ip_int = 0L for i in xrange(parts_hi): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) ip_int <<= 16 * parts_skipped for i in xrange(-parts_lo, 0): ip_int <<= 16 ip_int |= self._parse_hextet(parts[i]) return ip_int except ValueError: raise AddressValueError(ip_str)
225,667
Convert an IPv6 hextet string into an integer. Args: hextet_str: A string, the number to parse. Returns: The hextet as an integer. Raises: ValueError: if the input isn't strictly a hex number from [0..FFFF].
def _parse_hextet(self, hextet_str): # Whitelist the characters, since int() allows a lot of bizarre stuff. if not self._HEX_DIGITS.issuperset(hextet_str): raise ValueError if len(hextet_str) > 4: raise ValueError hextet_int = int(hextet_str, 16) if hextet_int > 0xFFFF: raise ValueError return hextet_int
225,668
Compresses a list of hextets. Compresses a list of strings, replacing the longest continuous sequence of "0" in the list with "" and adding empty strings at the beginning or at the end of the string such that subsequently calling ":".join(hextets) will produce the compressed version of the IPv6 address. Args: hextets: A list of strings, the hextets to compress. Returns: A list of strings.
def _compress_hextets(self, hextets): best_doublecolon_start = -1 best_doublecolon_len = 0 doublecolon_start = -1 doublecolon_len = 0 for index in range(len(hextets)): if hextets[index] == '0': doublecolon_len += 1 if doublecolon_start == -1: # Start of a sequence of zeros. doublecolon_start = index if doublecolon_len > best_doublecolon_len: # This is the longest sequence of zeros so far. best_doublecolon_len = doublecolon_len best_doublecolon_start = doublecolon_start else: doublecolon_len = 0 doublecolon_start = -1 if best_doublecolon_len > 1: best_doublecolon_end = (best_doublecolon_start + best_doublecolon_len) # For zeros at the end of the address. if best_doublecolon_end == len(hextets): hextets += [''] hextets[best_doublecolon_start:best_doublecolon_end] = [''] # For zeros at the beginning of the address. if best_doublecolon_start == 0: hextets = [''] + hextets return hextets
225,669
Turns a 128-bit integer into hexadecimal notation. Args: ip_int: An integer, the IP address. Returns: A string, the hexadecimal representation of the address. Raises: ValueError: The address is bigger than 128 bits of all ones.
def _string_from_ip_int(self, ip_int=None): if not ip_int and ip_int != 0: ip_int = int(self._ip) if ip_int > self._ALL_ONES: raise ValueError('IPv6 address is too large') hex_str = '%032x' % ip_int hextets = [] for x in range(0, 32, 4): hextets.append('%x' % int(hex_str[x:x+4], 16)) hextets = self._compress_hextets(hextets) return ':'.join(hextets)
225,670
Expand a shortened IPv6 address. Args: ip_str: A string, the IPv6 address. Returns: A string, the expanded IPv6 address.
def _explode_shorthand_ip_string(self): if isinstance(self, _BaseNet): ip_str = str(self.ip) else: ip_str = str(self) ip_int = self._ip_int_from_string(ip_str) parts = [] for i in xrange(self._HEXTET_COUNT): parts.append('%04x' % (ip_int & 0xFFFF)) ip_int >>= 16 parts.reverse() if isinstance(self, _BaseNet): return '%s/%d' % (':'.join(parts), self.prefixlen) return ':'.join(parts)
225,671
Apply causal discovery on observational data using CCDr. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the CCDR algorithm.
def create_graph_from_data(self, data, **kwargs): # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_ccdr(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,476
Execute a subprocess to check the package's availability. Args: package (str): Name of the package to be tested. Returns: bool: `True` if the package is available, `False` otherwise
def check_R_package(self, package): test_package = not bool(launch_R_script("{}/R_templates/test_import.R".format(os.path.dirname(os.path.realpath(__file__))), {"{package}": package}, verbose=True)) return test_package
226,485
Use CGNN to create a graph from scratch. All the possible structures are tested, which leads to a super exponential complexity. It would be preferable to start from a graph skeleton for large graphs. Args: data (pandas.DataFrame): Observational data on which causal discovery has to be performed. Returns: networkx.DiGraph: Solution given by CGNN.
def create_graph_from_data(self, data): warnings.warn("An exhaustive search of the causal structure of CGNN without" " skeleton is super-exponential in the number of variables.") # Building all possible candidates: nb_vars = len(list(data.columns)) data = scale(data.values).astype('float32') candidates = [np.reshape(np.array(i), (nb_vars, nb_vars)) for i in itertools.product([0, 1], repeat=nb_vars*nb_vars) if (np.trace(np.reshape(np.array(i), (nb_vars, nb_vars))) == 0 and nx.is_directed_acyclic_graph(nx.DiGraph(np.reshape(np.array(i), (nb_vars, nb_vars)))))] warnings.warn("A total of {} graphs will be evaluated.".format(len(candidates))) scores = [parallel_graph_evaluation(data, i, nh=self.nh, nb_runs=self.nb_runs, gpu=self.gpu, nb_jobs=self.nb_jobs, lr=self.lr, train_epochs=self.train_epochs, test_epochs=self.test_epochs, verbose=self.verbose) for i in candidates] final_candidate = candidates[scores.index(min(scores))] output = np.zeros(final_candidate.shape) # Retrieve the confidence score on each edge. for (i, j), x in np.ndenumerate(final_candidate): if x > 0: cand = final_candidate cand[i, j] = 0 output[i, j] = min(scores) - scores[candidates.index(cand)] return nx.DiGraph(candidates[output], {idx: i for idx, i in enumerate(data.columns)})
226,498
Projects the causal pair to the RKHS using the sampled kernel approximation. Args: x (np.ndarray): Variable 1 y (np.ndarray): Variable 2 Returns: np.ndarray: projected empirical distributions into a single fixed-size vector.
def featurize_row(self, x, y): x = x.ravel() y = y.ravel() b = np.ones(x.shape) dx = np.cos(np.dot(self.W2, np.vstack((x, b)))).mean(1) dy = np.cos(np.dot(self.W2, np.vstack((y, b)))).mean(1) if(sum(dx) > sum(dy)): return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((x, y, b)))).mean(1))) else: return np.hstack((dx, dy, np.cos(np.dot(self.W, np.vstack((y, x, b)))).mean(1)))
226,516
Train the model. Args: x_tr (pd.DataFrame): CEPC format dataframe containing the pairs y_tr (pd.DataFrame or np.ndarray): labels associated to the pairs
def fit(self, x, y): train = np.vstack((np.array([self.featurize_row(row.iloc[0], row.iloc[1]) for idx, row in x.iterrows()]), np.array([self.featurize_row(row.iloc[1], row.iloc[0]) for idx, row in x.iterrows()]))) labels = np.vstack((y, -y)).ravel() verbose = 1 if self.verbose else 0 self.clf = CLF(verbose=verbose, min_samples_leaf=self.L, n_estimators=self.E, max_depth=self.max_depth, n_jobs=self.n_jobs).fit(train, labels)
226,517
Predict the causal score using a trained RCC model Args: x (numpy.array or pandas.DataFrame or pandas.Series): First variable or dataset. args (numpy.array): second variable (optional depending on the 1st argument). Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
def predict_proba(self, x, y=None, **kwargs): if self.clf is None: raise ValueError("Model has to be trained before making predictions.") if x is pandas.Series: input_ = self.featurize_row(x.iloc[0], x.iloc[1]).reshape((1, -1)) elif x is pandas.DataFrame: input_ = np.array([self.featurize_row(x.iloc[0], x.iloc[1]) for row in x]) elif y is not None: input_ = self.featurize_row(x, y).reshape((1, -1)) else: raise TypeError("DataType not understood.") return self.clf.predict(input_)
226,518
Build a skeleton using a pairwise independence criterion. Args: data (pandas.DataFrame): Raw data table Returns: networkx.Graph: Undirected graph representing the skeleton.
def predict_undirected_graph(self, data): graph = Graph() for idx_i, i in enumerate(data.columns): for idx_j, j in enumerate(data.columns[idx_i+1:]): score = self.predict(data[i].values, data[j].values) if abs(score) > 0.001: graph.add_edge(i, j, weight=score) return graph
226,528
Run feature selection for one node: wrapper around ``self.predict_features``. Args: df_data (pandas.DataFrame): All the observational data target (str): Name of the target variable idx (int): (optional) For printing purposes Returns: list: scores of each feature relatively to the target
def run_feature_selection(self, df_data, target, idx=0, **kwargs): list_features = list(df_data.columns.values) list_features.remove(target) df_target = pd.DataFrame(df_data[target], columns=[target]) df_features = df_data[list_features] return self.predict_features(df_features, df_target, idx=idx, **kwargs)
226,529
Predict the skeleton of the graph from raw data. Returns iteratively the feature selection algorithm on each node. Args: df_data (pandas.DataFrame): data to construct a graph from threshold (float): cutoff value for feature selection scores kwargs (dict): additional arguments for algorithms Returns: networkx.Graph: predicted skeleton of the graph.
def predict(self, df_data, threshold=0.05, **kwargs): nb_jobs = kwargs.get("nb_jobs", SETTINGS.NB_JOBS) list_nodes = list(df_data.columns.values) if nb_jobs != 1: result_feature_selection = Parallel(n_jobs=nb_jobs)(delayed(self.run_feature_selection) (df_data, node, idx, **kwargs) for idx, node in enumerate(list_nodes)) else: result_feature_selection = [self.run_feature_selection(df_data, node, idx, **kwargs) for idx, node in enumerate(list_nodes)] for idx, i in enumerate(result_feature_selection): try: i.insert(idx, 0) except AttributeError: # if results are numpy arrays result_feature_selection[idx] = np.insert(i, idx, 0) matrix_results = np.array(result_feature_selection) matrix_results *= matrix_results.transpose() np.fill_diagonal(matrix_results, 0) matrix_results /= 2 graph = nx.Graph() for (i, j), x in np.ndenumerate(matrix_results): if matrix_results[i, j] > threshold: graph.add_edge(list_nodes[i], list_nodes[j], weight=matrix_results[i, j]) for node in list_nodes: if node not in graph.nodes(): graph.add_node(node) return graph
226,530
Run GIES on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution given by the GIES algorithm.
def orient_undirected_graph(self, data, graph): # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.scores[self.score] fe = DataFrame(nx.adj_matrix(graph, weight=None).todense()) fg = DataFrame(1 - fe.values) results = self._run_gies(data, fixedGaps=fg, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,532
Run the GIES algorithm. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the GIES algorithm.
def create_graph_from_data(self, data): # Building setup w/ arguments. self.arguments['{SCORE}'] = self.scores[self.score] self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_gies(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,533
Compute the RECI fit score Args: x (numpy.ndarray): Variable 1 y (numpy.ndarray): Variable 2 Returns: float: RECI fit score
def b_fit_score(self, x, y): x = np.reshape(minmax_scale(x), (-1, 1)) y = np.reshape(minmax_scale(y), (-1, 1)) poly = PolynomialFeatures(degree=self.degree) poly_x = poly.fit_transform(x) poly_x[:,1] = 0 poly_x[:,2] = 0 regressor = LinearRegression() regressor.fit(poly_x, y) y_predict = regressor.predict(poly_x) error = mean_squared_error(y_predict, y) return error
226,551
Infer causal relationships between 2 variables using the CDS statistic Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
def predict_proba(self, a, b, **kwargs): return self.cds_score(b, a) - self.cds_score(a, b)
226,557
Computes the cds statistic from variable 1 to variable 2 Args: x_te (numpy.ndarray): Variable 1 y_te (numpy.ndarray): Variable 2 Returns: float: CDS fit score
def cds_score(self, x_te, y_te): if type(x_te) == np.ndarray: x_te, y_te = pd.Series(x_te.reshape(-1)), pd.Series(y_te.reshape(-1)) xd, yd = discretized_sequences(x_te, y_te, self.ffactor, self.maxdev) cx = Counter(xd) cy = Counter(yd) yrange = sorted(cy.keys()) ny = len(yrange) py = np.array([cy[i] for i in yrange], dtype=float) py = py / py.sum() pyx = [] for a in cx: if cx[a] > self.minc: yx = y_te[xd == a] # if not numerical(ty): # cyx = Counter(yx) # pyxa = np.array([cyx[i] for i in yrange], dtype=float) # pyxa.sort() if count_unique(y_te) > len_discretized_values(y_te, "Numerical", self.ffactor, self.maxdev): yx = (yx - np.mean(yx)) / np.std(y_te) yx = discretized_sequence(yx, "Numerical", self.ffactor, self.maxdev, norm=False) cyx = Counter(yx.astype(int)) pyxa = np.array([cyx[i] for i in discretized_values(y_te, "Numerical", self.ffactor, self.maxdev)], dtype=float) else: cyx = Counter(yx) pyxa = [cyx[i] for i in yrange] pyxax = np.array([0] * (ny - 1) + pyxa + [0] * (ny - 1), dtype=float) xcorr = [sum(py * pyxax[i:i + ny]) for i in range(2 * ny - 1)] imax = xcorr.index(max(xcorr)) pyxa = np.array([0] * (2 * ny - 2 - imax) + pyxa + [0] * imax, dtype=float) assert pyxa.sum() == cx[a] pyxa = pyxa / pyxa.sum() pyx.append(pyxa) if len(pyx) == 0: return 0 pyx = np.array(pyx) pyx = pyx - pyx.mean(axis=0) return np.std(pyx)
226,558
Prediction method for pairwise causal inference using the ANM model. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
def predict_proba(self, a, b, **kwargs): a = scale(a).reshape((-1, 1)) b = scale(b).reshape((-1, 1)) return self.anm_score(b, a) - self.anm_score(a, b)
226,563
Compute the fitness score of the ANM model in the x->y direction. Args: a (numpy.ndarray): Variable seen as cause b (numpy.ndarray): Variable seen as effect Returns: float: ANM fit score
def anm_score(self, x, y): gp = GaussianProcessRegressor().fit(x, y) y_predict = gp.predict(x) indepscore = normalized_hsic(y_predict - y, x) return indepscore
226,564
Run PC on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution given by PC on the given skeleton.
def orient_undirected_graph(self, data, graph, **kwargs): # Building setup w/ arguments. self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test] self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep] self.arguments['{DIRECTED}'] = 'TRUE' self.arguments['{ALPHA}'] = str(self.alpha) self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() fe = DataFrame(nx.adj_matrix(graph, weight=None).todense()) fg = DataFrame(1 - fe.values) results = self._run_pc(data, fixedEdges=fe, fixedGaps=fg, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,566
Run the PC algorithm. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by PC on the given data.
def create_graph_from_data(self, data, **kwargs): # Building setup w/ arguments. self.arguments['{CITEST}'] = self.dir_CI_test[self.CI_test] self.arguments['{METHOD_INDEP}'] = self.dir_method_indep[self.method_indep] self.arguments['{DIRECTED}'] = 'TRUE' self.arguments['{ALPHA}'] = str(self.alpha) self.arguments['{NJOBS}'] = str(self.nb_jobs) self.arguments['{VERBOSE}'] = str(self.verbose).upper() results = self._run_pc(data, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,567
Computes the cds statistic from variable 1 to variable 2 Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 Returns: float: BF fit score
def b_fit_score(self, x, y): x = np.reshape(scale(x), (-1, 1)) y = np.reshape(scale(y), (-1, 1)) gp = GaussianProcessRegressor().fit(x, y) y_predict = gp.predict(x) error = mean_squared_error(y_predict, y) return error
226,569
Predict the graph skeleton. Args: data (pandas.DataFrame): observational data alpha (float): regularization parameter max_iter (int): maximum number of iterations Returns: networkx.Graph: Graph skeleton
def predict(self, data, alpha=0.01, max_iter=2000, **kwargs): edge_model = GraphLasso(alpha=alpha, max_iter=max_iter) edge_model.fit(data.values) return nx.relabel_nodes(nx.DiGraph(edge_model.get_precision()), {idx: i for idx, i in enumerate(data.columns)})
226,570
For one variable, predict its neighbouring nodes. Args: df_features (pandas.DataFrame): df_target (pandas.Series): idx (int): (optional) for printing purposes kwargs (dict): additional options for algorithms Returns: list: scores of each feature relatively to the target .. warning:: Not implemented. Implemented by the algorithms.
def predict_features(self, df_features, df_target, idx=0, **kwargs): y = np.transpose(df_target.values) X = np.transpose(df_features.values) path, beta, A, lam = hsiclasso(X, y) return beta
226,571
Tests the loop condition based on the new results and the parameters. Args: xy (list): list containing all the results for one set of samples yx (list): list containing all the results for the other set. Returns: bool: True if the loop has to continue, False otherwise.
def loop(self, xy, yx): if len(xy) > 0: self.iter += self.runs_per_iter if self.iter < 2: return True t_test, self.p_value = ttest_ind(xy, yx, equal_var=False) if self.p_value > self.threshold and self.iter < self.max_iter: return True else: return False
226,586
Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 Returns: float: test statistic
def predict(self, a, b): a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) return (mutual_info_regression(a, b.reshape((-1,))) + mutual_info_regression(b, a.reshape((-1,))))/2
226,591
Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 Returns: float: test statistic
def predict(self, a, b): a = np.array(a).reshape((-1, 1)) b = np.array(b).reshape((-1, 1)) return sp.kendalltau(a, b)[0]
226,592
Compute the test statistic Args: a (array-like): Variable 1 b (array-like): Variable 2 sig (list): [0] (resp [1]) is kernel size for a(resp b) (set to median distance if -1) maxpnt (int): maximum number of points used, for computational time Returns: float: test statistic
def predict(self, a, b, sig=[-1, -1], maxpnt=500): a = (a - np.mean(a)) / np.std(a) b = (b - np.mean(b)) / np.std(b) return FastHsicTestGamma(a, b, sig, maxpnt)
226,593
Generic dataset prediction function. Runs the score independently on all pairs. Args: x (pandas.DataFrame): a CEPC format Dataframe. kwargs (dict): additional arguments for the algorithms Returns: pandas.DataFrame: a Dataframe with the predictions.
def predict_dataset(self, x, **kwargs): printout = kwargs.get("printout", None) pred = [] res = [] x.columns = ["A", "B"] for idx, row in x.iterrows(): a = scale(row['A'].reshape((len(row['A']), 1))) b = scale(row['B'].reshape((len(row['B']), 1))) pred.append(self.predict_proba(a, b, idx=idx)) if printout is not None: res.append([row['SampleID'], pred[-1]]) DataFrame(res, columns=['SampleID', 'Predictions']).to_csv( printout, index=False) return pred
226,595
Run the algorithm on an undirected graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.Graph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton.
def orient_undirected_graph(self, data, graph): # Building setup w/ arguments. self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{SCORE}'] = self.score self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) whitelist = DataFrame(list(nx.edges(graph)), columns=["from", "to"]) blacklist = DataFrame(list(nx.edges(nx.DiGraph(DataFrame(-nx.adj_matrix(graph, weight=None).to_dense() + 1, columns=list(graph.nodes()), index=list(graph.nodes()))))), columns=["from", "to"]) results = self._run_bnlearn(data, whitelist=whitelist, blacklist=blacklist, verbose=self.verbose) return nx.relabel_nodes(nx.DiGraph(results), {idx: i for idx, i in enumerate(data.columns)})
226,598
Run the algorithm on a directed_graph. Args: data (pandas.DataFrame): DataFrame containing the data graph (networkx.DiGraph): Skeleton of the graph to orient Returns: networkx.DiGraph: Solution on the given skeleton. .. warning:: The algorithm is ran on the skeleton of the given graph.
def orient_directed_graph(self, data, graph): warnings.warn("The algorithm is ran on the skeleton of the given graph.") return self.orient_undirected_graph(data, nx.Graph(graph))
226,599
Run the algorithm on data. Args: data (pandas.DataFrame): DataFrame containing the data Returns: networkx.DiGraph: Solution given by the algorithm.
def create_graph_from_data(self, data): # Building setup w/ arguments. self.arguments['{SCORE}'] = self.score self.arguments['{VERBOSE}'] = str(self.verbose).upper() self.arguments['{BETA}'] = str(self.beta) self.arguments['{OPTIM}'] = str(self.optim).upper() self.arguments['{ALPHA}'] = str(self.alpha) results = self._run_bnlearn(data, verbose=self.verbose) graph = nx.DiGraph() graph.add_edges_from(results) return graph
226,600
Runs Jarfo independently on all pairs. Args: x (pandas.DataFrame): a CEPC format Dataframe. kwargs (dict): additional arguments for the algorithms Returns: pandas.DataFrame: a Dataframe with the predictions.
def predict_dataset(self, df): if len(list(df.columns)) == 2: df.columns = ["A", "B"] if self.model is None: raise AssertionError("Model has not been trained before predictions") df2 = DataFrame() for idx, row in df.iterrows(): df2 = df2.append(row, ignore_index=True) df2 = df2.append({'A': row["B"], 'B': row["A"]}, ignore_index=True) return predict.predict(deepcopy(df2), deepcopy(self.model))[::2]
226,640
Use Jarfo to predict the causal direction of a pair of vars. Args: a (numpy.ndarray): Variable 1 b (numpy.ndarray): Variable 2 idx (int): (optional) index number for printing purposes Returns: float: Causation score (Value : 1 if a->b and -1 if b->a)
def predict_proba(self, a, b, idx=0, **kwargs): return self.predict_dataset(DataFrame([[a, b]], columns=['A', 'B']))
226,641
Apply deconvolution to a networkx graph. Args: g (networkx.Graph): Graph to apply deconvolution to alg (str): Algorithm to use ('aracne', 'clr', 'nd') kwargs (dict): extra options for algorithms Returns: networkx.Graph: graph with undirected links removed.
def remove_indirect_links(g, alg="aracne", **kwargs): alg = {"aracne": aracne, "nd": network_deconvolution, "clr": clr}[alg] mat = np.array(nx.adjacency_matrix(g).todense()) return nx.relabel_nodes(nx.DiGraph(alg(mat, **kwargs)), {idx: i for idx, i in enumerate(list(g.nodes()))})
226,645