code
stringlengths 26
870k
| docstring
stringlengths 1
65.6k
| func_name
stringlengths 1
194
| language
stringclasses 1
value | repo
stringlengths 8
68
| path
stringlengths 5
194
| url
stringlengths 46
254
| license
stringclasses 4
values |
---|---|---|---|---|---|---|---|
def handle_abort(signal, frame) -> None:
"""Handle interrupt and abort scan.
Args:
signal: TBD
frame: TBD
"""
log = logging.getLogger(f"spiderfoot.{__name__}")
global dbh
global scanId
if scanId and dbh:
log.info(f"Aborting scan [{scanId}] ...")
dbh.scanInstanceSet(scanId, None, None, "ABORTED")
sys.exit(-1) | Handle interrupt and abort scan.
Args:
signal: TBD
frame: TBD | handle_abort | python | smicallef/spiderfoot | sf.py | https://github.com/smicallef/spiderfoot/blob/master/sf.py | MIT |
def __init__(self: 'SpiderFootWebUi', web_config: dict, config: dict, loggingQueue: 'logging.handlers.QueueListener' = None) -> None:
"""Initialize web server.
Args:
web_config (dict): config settings for web interface (interface, port, root path)
config (dict): SpiderFoot config
loggingQueue: TBD
Raises:
TypeError: arg type is invalid
ValueError: arg value is invalid
"""
if not isinstance(config, dict):
raise TypeError(f"config is {type(config)}; expected dict()")
if not config:
raise ValueError("config is empty")
if not isinstance(web_config, dict):
raise TypeError(f"web_config is {type(web_config)}; expected dict()")
if not config:
raise ValueError("web_config is empty")
self.docroot = web_config.get('root', '/').rstrip('/')
# 'config' supplied will be the defaults, let's supplement them
# now with any configuration which may have previously been saved.
self.defaultConfig = deepcopy(config)
dbh = SpiderFootDb(self.defaultConfig, init=True)
sf = SpiderFoot(self.defaultConfig)
self.config = sf.configUnserialize(dbh.configGet(), self.defaultConfig)
# Set up logging
if loggingQueue is None:
self.loggingQueue = mp.Queue()
logListenerSetup(self.loggingQueue, self.config)
else:
self.loggingQueue = loggingQueue
logWorkerSetup(self.loggingQueue)
self.log = logging.getLogger(f"spiderfoot.{__name__}")
cherrypy.config.update({
'error_page.401': self.error_page_401,
'error_page.404': self.error_page_404,
'request.error_response': self.error_page
})
csp = (
secure.ContentSecurityPolicy()
.default_src("'self'")
.script_src("'self'", "'unsafe-inline'", "blob:")
.style_src("'self'", "'unsafe-inline'")
.base_uri("'self'")
.connect_src("'self'", "data:")
.frame_src("'self'", 'data:')
.img_src("'self'", "data:")
)
secure_headers = secure.Secure(
server=secure.Server().set("server"),
cache=secure.CacheControl().must_revalidate(),
csp=csp,
referrer=secure.ReferrerPolicy().no_referrer(),
)
cherrypy.config.update({
"tools.response_headers.on": True,
"tools.response_headers.headers": secure_headers.framework.cherrypy()
}) | Initialize web server.
Args:
web_config (dict): config settings for web interface (interface, port, root path)
config (dict): SpiderFoot config
loggingQueue: TBD
Raises:
TypeError: arg type is invalid
ValueError: arg value is invalid | __init__ | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def error_page(self: 'SpiderFootWebUi') -> None:
"""Error page."""
cherrypy.response.status = 500
if self.config.get('_debug'):
cherrypy.response.body = _cperror.get_error_page(status=500, traceback=_cperror.format_exc())
else:
cherrypy.response.body = b"<html><body>Error</body></html>" | Error page. | error_page | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def error_page_401(self: 'SpiderFootWebUi', status: str, message: str, traceback: str, version: str) -> str:
"""Unauthorized access HTTP 401 error page.
Args:
status (str): HTTP response status code and message
message (str): Error message
traceback (str): Error stack trace
version (str): CherryPy version
Returns:
str: HTML response
"""
return "" | Unauthorized access HTTP 401 error page.
Args:
status (str): HTTP response status code and message
message (str): Error message
traceback (str): Error stack trace
version (str): CherryPy version
Returns:
str: HTML response | error_page_401 | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def error_page_404(self: 'SpiderFootWebUi', status: str, message: str, traceback: str, version: str) -> str:
"""Not found error page 404.
Args:
status (str): HTTP response status code and message
message (str): Error message
traceback (str): Error stack trace
version (str): CherryPy version
Returns:
str: HTTP response template
"""
templ = Template(filename='spiderfoot/templates/error.tmpl', lookup=self.lookup)
return templ.render(message='Not Found', docroot=self.docroot, status=status, version=__version__) | Not found error page 404.
Args:
status (str): HTTP response status code and message
message (str): Error message
traceback (str): Error stack trace
version (str): CherryPy version
Returns:
str: HTTP response template | error_page_404 | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def jsonify_error(self: 'SpiderFootWebUi', status: str, message: str) -> dict:
"""Jsonify error response.
Args:
status (str): HTTP response status code and message
message (str): Error message
Returns:
dict: HTTP error response template
"""
cherrypy.response.headers['Content-Type'] = 'application/json'
cherrypy.response.status = status
return {
'error': {
'http_status': status,
'message': message,
}
} | Jsonify error response.
Args:
status (str): HTTP response status code and message
message (str): Error message
Returns:
dict: HTTP error response template | jsonify_error | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def error(self: 'SpiderFootWebUi', message: str) -> None:
"""Show generic error page with error message.
Args:
message (str): error message
Returns:
None
"""
templ = Template(filename='spiderfoot/templates/error.tmpl', lookup=self.lookup)
return templ.render(message=message, docroot=self.docroot, version=__version__) | Show generic error page with error message.
Args:
message (str): error message
Returns:
None | error | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def cleanUserInput(self: 'SpiderFootWebUi', inputList: list) -> list:
"""Convert data to HTML entities; except quotes and ampersands.
Args:
inputList (list): list of strings to sanitize
Returns:
list: sanitized input
Raises:
TypeError: inputList type was invalid
Todo:
Review all uses of this function, then remove it.
Use of this function is overloaded.
"""
if not isinstance(inputList, list):
raise TypeError(f"inputList is {type(inputList)}; expected list()")
ret = list()
for item in inputList:
if not item:
ret.append('')
continue
c = html.escape(item, True)
# Decode '&' and '"' HTML entities
c = c.replace("&", "&").replace(""", "\"")
ret.append(c)
return ret | Convert data to HTML entities; except quotes and ampersands.
Args:
inputList (list): list of strings to sanitize
Returns:
list: sanitized input
Raises:
TypeError: inputList type was invalid
Todo:
Review all uses of this function, then remove it.
Use of this function is overloaded. | cleanUserInput | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def searchBase(self: 'SpiderFootWebUi', id: str = None, eventType: str = None, value: str = None) -> list:
"""Search.
Args:
id (str): scan ID
eventType (str): TBD
value (str): TBD
Returns:
list: search results
"""
retdata = []
if not id and not eventType and not value:
return retdata
if not value:
value = ''
regex = ""
if value.startswith("/") and value.endswith("/"):
regex = value[1:len(value) - 1]
value = ""
value = value.replace('*', '%')
if value in [None, ""] and regex in [None, ""]:
value = "%"
regex = ""
dbh = SpiderFootDb(self.config)
criteria = {
'scan_id': id or '',
'type': eventType or '',
'value': value or '',
'regex': regex or '',
}
try:
data = dbh.search(criteria)
except Exception:
return retdata
for row in data:
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
escapeddata = html.escape(row[1])
escapedsrc = html.escape(row[2])
retdata.append([lastseen, escapeddata, escapedsrc,
row[3], row[5], row[6], row[7], row[8], row[10],
row[11], row[4], row[13], row[14]])
return retdata | Search.
Args:
id (str): scan ID
eventType (str): TBD
value (str): TBD
Returns:
list: search results | searchBase | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def buildExcel(self: 'SpiderFootWebUi', data: list, columnNames: list, sheetNameIndex: int = 0) -> str:
"""Convert supplied raw data into GEXF (Graph Exchange XML Format) format (e.g. for Gephi).
Args:
data (list): Scan result as list
columnNames (list): column names
sheetNameIndex (int): TBD
Returns:
str: Excel workbook
"""
rowNums = dict()
workbook = openpyxl.Workbook()
defaultSheet = workbook.active
columnNames.pop(sheetNameIndex)
allowed_sheet_chars = string.ascii_uppercase + string.digits + '_'
for row in data:
sheetName = "".join([c for c in str(row.pop(sheetNameIndex)) if c.upper() in allowed_sheet_chars])
try:
sheet = workbook[sheetName]
except KeyError:
# Create sheet
workbook.create_sheet(sheetName)
sheet = workbook[sheetName]
# Write headers
for col_num, column_title in enumerate(columnNames, 1):
cell = sheet.cell(row=1, column=col_num)
cell.value = column_title
rowNums[sheetName] = 2
# Write row
for col_num, cell_value in enumerate(row, 1):
cell = sheet.cell(row=rowNums[sheetName], column=col_num)
cell.value = cell_value
rowNums[sheetName] += 1
if rowNums:
workbook.remove(defaultSheet)
# Sort sheets alphabetically
workbook._sheets.sort(key=lambda ws: ws.title)
# Save workbook
with BytesIO() as f:
workbook.save(f)
f.seek(0)
return f.read() | Convert supplied raw data into GEXF (Graph Exchange XML Format) format (e.g. for Gephi).
Args:
data (list): Scan result as list
columnNames (list): column names
sheetNameIndex (int): TBD
Returns:
str: Excel workbook | buildExcel | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanexportlogs(self: 'SpiderFootWebUi', id: str, dialect: str = "excel") -> bytes:
"""Get scan log
Args:
id (str): scan ID
dialect (str): CSV dialect (default: excel)
Returns:
bytes: scan logs in CSV format
"""
dbh = SpiderFootDb(self.config)
try:
data = dbh.scanLogs(id, None, None, True)
except Exception:
return self.error("Scan ID not found.")
if not data:
return self.error("Scan ID not found.")
fileobj = StringIO()
parser = csv.writer(fileobj, dialect=dialect)
parser.writerow(["Date", "Component", "Type", "Event", "Event ID"])
for row in data:
parser.writerow([
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0] / 1000)),
str(row[1]),
str(row[2]),
str(row[3]),
row[4]
])
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename=SpiderFoot-{id}.log.csv"
cherrypy.response.headers['Content-Type'] = "application/csv"
cherrypy.response.headers['Pragma'] = "no-cache"
return fileobj.getvalue().encode('utf-8') | Get scan log
Args:
id (str): scan ID
dialect (str): CSV dialect (default: excel)
Returns:
bytes: scan logs in CSV format | scanexportlogs | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scancorrelationsexport(self: 'SpiderFootWebUi', id: str, filetype: str = "csv", dialect: str = "excel") -> str:
"""Get scan correlation data in CSV or Excel format.
Args:
id (str): scan ID
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format
"""
dbh = SpiderFootDb(self.config)
try:
scaninfo = dbh.scanInstanceGet(id)
scan_name = scaninfo[0]
except Exception:
return json.dumps(["ERROR", "Could not retrieve info for scan."]).encode('utf-8')
try:
correlations = dbh.scanCorrelationList(id)
except Exception:
return json.dumps(["ERROR", "Could not retrieve correlations for scan."]).encode('utf-8')
headings = ["Rule Name", "Correlation", "Risk", "Description"]
if filetype.lower() in ["xlsx", "excel"]:
rows = []
for row in correlations:
correlation = row[1]
rule_name = row[2]
rule_risk = row[3]
rule_description = row[5]
rows.append([rule_name, correlation, rule_risk, rule_description])
if scan_name:
fname = f"{scan_name}-SpiderFoot-correlations.xlxs"
else:
fname = "SpiderFoot-correlations.xlxs"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
cherrypy.response.headers['Pragma'] = "no-cache"
return self.buildExcel(rows, headings, sheetNameIndex=0)
if filetype.lower() == 'csv':
fileobj = StringIO()
parser = csv.writer(fileobj, dialect=dialect)
parser.writerow(headings)
for row in correlations:
correlation = row[1]
rule_name = row[2]
rule_risk = row[3]
rule_description = row[5]
parser.writerow([rule_name, correlation, rule_risk, rule_description])
if scan_name:
fname = f"{scan_name}-SpiderFoot-correlations.csv"
else:
fname = "SpiderFoot-correlations.csv"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/csv"
cherrypy.response.headers['Pragma'] = "no-cache"
return fileobj.getvalue().encode('utf-8')
return self.error("Invalid export filetype.") | Get scan correlation data in CSV or Excel format.
Args:
id (str): scan ID
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format | scancorrelationsexport | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scaneventresultexport(self: 'SpiderFootWebUi', id: str, type: str, filetype: str = "csv", dialect: str = "excel") -> str:
"""Get scan event result data in CSV or Excel format
Args:
id (str): scan ID
type (str): TBD
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format
"""
dbh = SpiderFootDb(self.config)
data = dbh.scanResultEvent(id, type)
if filetype.lower() in ["xlsx", "excel"]:
rows = []
for row in data:
if row[4] == "ROOT":
continue
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
rows.append([lastseen, str(row[4]), str(row[3]), str(row[2]), row[13], datafield])
fname = "SpiderFoot.xlsx"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
cherrypy.response.headers['Pragma'] = "no-cache"
return self.buildExcel(rows, ["Updated", "Type", "Module", "Source",
"F/P", "Data"], sheetNameIndex=1)
if filetype.lower() == 'csv':
fileobj = StringIO()
parser = csv.writer(fileobj, dialect=dialect)
parser.writerow(["Updated", "Type", "Module", "Source", "F/P", "Data"])
for row in data:
if row[4] == "ROOT":
continue
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
parser.writerow([lastseen, str(row[4]), str(row[3]), str(row[2]), row[13], datafield])
fname = "SpiderFoot.csv"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/csv"
cherrypy.response.headers['Pragma'] = "no-cache"
return fileobj.getvalue().encode('utf-8')
return self.error("Invalid export filetype.") | Get scan event result data in CSV or Excel format
Args:
id (str): scan ID
type (str): TBD
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format | scaneventresultexport | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scaneventresultexportmulti(self: 'SpiderFootWebUi', ids: str, filetype: str = "csv", dialect: str = "excel") -> str:
"""Get scan event result data in CSV or Excel format for multiple scans
Args:
ids (str): comma separated list of scan IDs
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format
"""
dbh = SpiderFootDb(self.config)
scaninfo = dict()
data = list()
scan_name = ""
for id in ids.split(','):
scaninfo[id] = dbh.scanInstanceGet(id)
if scaninfo[id] is None:
continue
scan_name = scaninfo[id][0]
data = data + dbh.scanResultEvent(id)
if not data:
return None
if filetype.lower() in ["xlsx", "excel"]:
rows = []
for row in data:
if row[4] == "ROOT":
continue
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
rows.append([scaninfo[row[12]][0], lastseen, str(row[4]), str(row[3]),
str(row[2]), row[13], datafield])
if len(ids.split(',')) > 1 or scan_name == "":
fname = "SpiderFoot.xlsx"
else:
fname = scan_name + "-SpiderFoot.xlsx"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
cherrypy.response.headers['Pragma'] = "no-cache"
return self.buildExcel(rows, ["Scan Name", "Updated", "Type", "Module",
"Source", "F/P", "Data"], sheetNameIndex=2)
if filetype.lower() == 'csv':
fileobj = StringIO()
parser = csv.writer(fileobj, dialect=dialect)
parser.writerow(["Scan Name", "Updated", "Type", "Module", "Source", "F/P", "Data"])
for row in data:
if row[4] == "ROOT":
continue
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
parser.writerow([scaninfo[row[12]][0], lastseen, str(row[4]), str(row[3]),
str(row[2]), row[13], datafield])
if len(ids.split(',')) > 1 or scan_name == "":
fname = "SpiderFoot.csv"
else:
fname = scan_name + "-SpiderFoot.csv"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/csv"
cherrypy.response.headers['Pragma'] = "no-cache"
return fileobj.getvalue().encode('utf-8')
return self.error("Invalid export filetype.") | Get scan event result data in CSV or Excel format for multiple scans
Args:
ids (str): comma separated list of scan IDs
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format | scaneventresultexportmulti | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scansearchresultexport(self: 'SpiderFootWebUi', id: str, eventType: str = None, value: str = None, filetype: str = "csv", dialect: str = "excel") -> str:
"""Get search result data in CSV or Excel format
Args:
id (str): scan ID
eventType (str): TBD
value (str): TBD
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format
"""
data = self.searchBase(id, eventType, value)
if not data:
return None
if filetype.lower() in ["xlsx", "excel"]:
rows = []
for row in data:
if row[10] == "ROOT":
continue
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
rows.append([row[0], str(row[10]), str(row[3]), str(row[2]), row[11], datafield])
cherrypy.response.headers['Content-Disposition'] = "attachment; filename=SpiderFoot.xlsx"
cherrypy.response.headers['Content-Type'] = "application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
cherrypy.response.headers['Pragma'] = "no-cache"
return self.buildExcel(rows, ["Updated", "Type", "Module", "Source",
"F/P", "Data"], sheetNameIndex=1)
if filetype.lower() == 'csv':
fileobj = StringIO()
parser = csv.writer(fileobj, dialect=dialect)
parser.writerow(["Updated", "Type", "Module", "Source", "F/P", "Data"])
for row in data:
if row[10] == "ROOT":
continue
datafield = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
parser.writerow([row[0], str(row[10]), str(row[3]), str(row[2]), row[11], datafield])
cherrypy.response.headers['Content-Disposition'] = "attachment; filename=SpiderFoot.csv"
cherrypy.response.headers['Content-Type'] = "application/csv"
cherrypy.response.headers['Pragma'] = "no-cache"
return fileobj.getvalue().encode('utf-8')
return self.error("Invalid export filetype.") | Get search result data in CSV or Excel format
Args:
id (str): scan ID
eventType (str): TBD
value (str): TBD
filetype (str): type of file ("xlsx|excel" or "csv")
dialect (str): CSV dialect (default: excel)
Returns:
str: results in CSV or Excel format | scansearchresultexport | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanexportjsonmulti(self: 'SpiderFootWebUi', ids: str) -> str:
"""Get scan event result data in JSON format for multiple scans.
Args:
ids (str): comma separated list of scan IDs
Returns:
str: results in JSON format
"""
dbh = SpiderFootDb(self.config)
scaninfo = list()
scan_name = ""
for id in ids.split(','):
scan = dbh.scanInstanceGet(id)
if scan is None:
continue
scan_name = scan[0]
for row in dbh.scanResultEvent(id):
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
event_data = str(row[1]).replace("<SFURL>", "").replace("</SFURL>", "")
source_data = str(row[2])
source_module = str(row[3])
event_type = row[4]
false_positive = row[13]
if event_type == "ROOT":
continue
scaninfo.append({
"data": event_data,
"event_type": event_type,
"module": source_module,
"source_data": source_data,
"false_positive": false_positive,
"last_seen": lastseen,
"scan_name": scan_name,
"scan_target": scan[1]
})
if len(ids.split(',')) > 1 or scan_name == "":
fname = "SpiderFoot.json"
else:
fname = scan_name + "-SpiderFoot.json"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
cherrypy.response.headers['Pragma'] = "no-cache"
return json.dumps(scaninfo).encode('utf-8') | Get scan event result data in JSON format for multiple scans.
Args:
ids (str): comma separated list of scan IDs
Returns:
str: results in JSON format | scanexportjsonmulti | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanviz(self: 'SpiderFootWebUi', id: str, gexf: str = "0") -> str:
"""Export entities from scan results for visualising.
Args:
id (str): scan ID
gexf (str): TBD
Returns:
str: GEXF data
"""
if not id:
return None
dbh = SpiderFootDb(self.config)
data = dbh.scanResultEvent(id, filterFp=True)
scan = dbh.scanInstanceGet(id)
if not scan:
return None
scan_name = scan[0]
root = scan[1]
if gexf == "0":
return SpiderFootHelpers.buildGraphJson([root], data)
if not scan_name:
fname = "SpiderFoot.gexf"
else:
fname = scan_name + "SpiderFoot.gexf"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/gexf"
cherrypy.response.headers['Pragma'] = "no-cache"
return SpiderFootHelpers.buildGraphGexf([root], "SpiderFoot Export", data) | Export entities from scan results for visualising.
Args:
id (str): scan ID
gexf (str): TBD
Returns:
str: GEXF data | scanviz | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanvizmulti(self: 'SpiderFootWebUi', ids: str, gexf: str = "1") -> str:
"""Export entities results from multiple scans in GEXF format.
Args:
ids (str): scan IDs
gexf (str): TBD
Returns:
str: GEXF data
"""
dbh = SpiderFootDb(self.config)
data = list()
roots = list()
scan_name = ""
if not ids:
return None
for id in ids.split(','):
scan = dbh.scanInstanceGet(id)
if not scan:
continue
data = data + dbh.scanResultEvent(id, filterFp=True)
roots.append(scan[1])
scan_name = scan[0]
if not data:
return None
if gexf == "0":
# Not implemented yet
return None
if len(ids.split(',')) > 1 or scan_name == "":
fname = "SpiderFoot.gexf"
else:
fname = scan_name + "-SpiderFoot.gexf"
cherrypy.response.headers['Content-Disposition'] = f"attachment; filename={fname}"
cherrypy.response.headers['Content-Type'] = "application/gexf"
cherrypy.response.headers['Pragma'] = "no-cache"
return SpiderFootHelpers.buildGraphGexf(roots, "SpiderFoot Export", data) | Export entities results from multiple scans in GEXF format.
Args:
ids (str): scan IDs
gexf (str): TBD
Returns:
str: GEXF data | scanvizmulti | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanopts(self: 'SpiderFootWebUi', id: str) -> dict:
"""Return configuration used for the specified scan as JSON.
Args:
id: scan ID
Returns:
dict: scan options for the specified scan
"""
dbh = SpiderFootDb(self.config)
ret = dict()
meta = dbh.scanInstanceGet(id)
if not meta:
return ret
if meta[3] != 0:
started = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(meta[3]))
else:
started = "Not yet"
if meta[4] != 0:
finished = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(meta[4]))
else:
finished = "Not yet"
ret['meta'] = [meta[0], meta[1], meta[2], started, finished, meta[5]]
ret['config'] = dbh.scanConfigGet(id)
ret['configdesc'] = dict()
for key in list(ret['config'].keys()):
if ':' not in key:
globaloptdescs = self.config['__globaloptdescs__']
if globaloptdescs:
ret['configdesc'][key] = globaloptdescs.get(key, f"{key} (legacy)")
else:
[modName, modOpt] = key.split(':')
if modName not in list(self.config['__modules__'].keys()):
continue
if modOpt not in list(self.config['__modules__'][modName]['optdescs'].keys()):
continue
ret['configdesc'][key] = self.config['__modules__'][modName]['optdescs'][modOpt]
return ret | Return configuration used for the specified scan as JSON.
Args:
id: scan ID
Returns:
dict: scan options for the specified scan | scanopts | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def rerunscan(self: 'SpiderFootWebUi', id: str) -> None:
"""Rerun a scan.
Args:
id (str): scan ID
Returns:
None
Raises:
HTTPRedirect: redirect to info page for new scan
"""
# Snapshot the current configuration to be used by the scan
cfg = deepcopy(self.config)
modlist = list()
dbh = SpiderFootDb(cfg)
info = dbh.scanInstanceGet(id)
if not info:
return self.error("Invalid scan ID.")
scanname = info[0]
scantarget = info[1]
scanconfig = dbh.scanConfigGet(id)
if not scanconfig:
return self.error(f"Error loading config from scan: {id}")
modlist = scanconfig['_modulesenabled'].split(',')
if "sfp__stor_stdout" in modlist:
modlist.remove("sfp__stor_stdout")
targetType = SpiderFootHelpers.targetTypeFromString(scantarget)
if not targetType:
# It must then be a name, as a re-run scan should always have a clean
# target. Put quotes around the target value and try to determine the
# target type again.
targetType = SpiderFootHelpers.targetTypeFromString(f'"{scantarget}"')
if targetType not in ["HUMAN_NAME", "BITCOIN_ADDRESS"]:
scantarget = scantarget.lower()
# Start running a new scan
scanId = SpiderFootHelpers.genScanInstanceId()
try:
p = mp.Process(target=startSpiderFootScanner, args=(self.loggingQueue, scanname, scanId, scantarget, targetType, modlist, cfg))
p.daemon = True
p.start()
except Exception as e:
self.log.error(f"[-] Scan [{scanId}] failed: {e}")
return self.error(f"[-] Scan [{scanId}] failed: {e}")
# Wait until the scan has initialized
while dbh.scanInstanceGet(scanId) is None:
self.log.info("Waiting for the scan to initialize...")
time.sleep(1)
raise cherrypy.HTTPRedirect(f"{self.docroot}/scaninfo?id={scanId}", status=302) | Rerun a scan.
Args:
id (str): scan ID
Returns:
None
Raises:
HTTPRedirect: redirect to info page for new scan | rerunscan | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def rerunscanmulti(self: 'SpiderFootWebUi', ids: str) -> str:
"""Rerun scans.
Args:
ids (str): comma separated list of scan IDs
Returns:
str: Scan list page HTML
"""
# Snapshot the current configuration to be used by the scan
cfg = deepcopy(self.config)
modlist = list()
dbh = SpiderFootDb(cfg)
for id in ids.split(","):
info = dbh.scanInstanceGet(id)
if not info:
return self.error("Invalid scan ID.")
scanconfig = dbh.scanConfigGet(id)
scanname = info[0]
scantarget = info[1]
targetType = None
if len(scanconfig) == 0:
return self.error("Something went wrong internally.")
modlist = scanconfig['_modulesenabled'].split(',')
if "sfp__stor_stdout" in modlist:
modlist.remove("sfp__stor_stdout")
targetType = SpiderFootHelpers.targetTypeFromString(scantarget)
if targetType is None:
# Should never be triggered for a re-run scan..
return self.error("Invalid target type. Could not recognize it as a target SpiderFoot supports.")
# Start running a new scan
scanId = SpiderFootHelpers.genScanInstanceId()
try:
p = mp.Process(target=startSpiderFootScanner, args=(self.loggingQueue, scanname, scanId, scantarget, targetType, modlist, cfg))
p.daemon = True
p.start()
except Exception as e:
self.log.error(f"[-] Scan [{scanId}] failed: {e}")
return self.error(f"[-] Scan [{scanId}] failed: {e}")
# Wait until the scan has initialized
while dbh.scanInstanceGet(scanId) is None:
self.log.info("Waiting for the scan to initialize...")
time.sleep(1)
templ = Template(filename='spiderfoot/templates/scanlist.tmpl', lookup=self.lookup)
return templ.render(rerunscans=True, docroot=self.docroot, pageid="SCANLIST", version=__version__) | Rerun scans.
Args:
ids (str): comma separated list of scan IDs
Returns:
str: Scan list page HTML | rerunscanmulti | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def newscan(self: 'SpiderFootWebUi') -> str:
"""Configure a new scan.
Returns:
str: New scan page HTML
"""
dbh = SpiderFootDb(self.config)
types = dbh.eventTypes()
templ = Template(filename='spiderfoot/templates/newscan.tmpl', lookup=self.lookup)
return templ.render(pageid='NEWSCAN', types=types, docroot=self.docroot,
modules=self.config['__modules__'], scanname="",
selectedmods="", scantarget="", version=__version__) | Configure a new scan.
Returns:
str: New scan page HTML | newscan | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def clonescan(self: 'SpiderFootWebUi', id: str) -> str:
"""Clone an existing scan (pre-selected options in the newscan page).
Args:
id (str): scan ID to clone
Returns:
str: New scan page HTML pre-populated with options from cloned scan.
"""
dbh = SpiderFootDb(self.config)
types = dbh.eventTypes()
info = dbh.scanInstanceGet(id)
if not info:
return self.error("Invalid scan ID.")
scanconfig = dbh.scanConfigGet(id)
scanname = info[0]
scantarget = info[1]
targetType = None
if scanname == "" or scantarget == "" or len(scanconfig) == 0:
return self.error("Something went wrong internally.")
targetType = SpiderFootHelpers.targetTypeFromString(scantarget)
if targetType is None:
# It must be a name, so wrap quotes around it
scantarget = """ + scantarget + """
modlist = scanconfig['_modulesenabled'].split(',')
templ = Template(filename='spiderfoot/templates/newscan.tmpl', lookup=self.lookup)
return templ.render(pageid='NEWSCAN', types=types, docroot=self.docroot,
modules=self.config['__modules__'], selectedmods=modlist,
scanname=str(scanname),
scantarget=str(scantarget), version=__version__) | Clone an existing scan (pre-selected options in the newscan page).
Args:
id (str): scan ID to clone
Returns:
str: New scan page HTML pre-populated with options from cloned scan. | clonescan | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def index(self: 'SpiderFootWebUi') -> str:
"""Show scan list page.
Returns:
str: Scan list page HTML
"""
templ = Template(filename='spiderfoot/templates/scanlist.tmpl', lookup=self.lookup)
return templ.render(pageid='SCANLIST', docroot=self.docroot, version=__version__) | Show scan list page.
Returns:
str: Scan list page HTML | index | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scaninfo(self: 'SpiderFootWebUi', id: str) -> str:
"""Information about a selected scan.
Args:
id (str): scan id
Returns:
str: scan info page HTML
"""
dbh = SpiderFootDb(self.config)
res = dbh.scanInstanceGet(id)
if res is None:
return self.error("Scan ID not found.")
templ = Template(filename='spiderfoot/templates/scaninfo.tmpl', lookup=self.lookup, input_encoding='utf-8')
return templ.render(id=id, name=html.escape(res[0]), status=res[5], docroot=self.docroot, version=__version__,
pageid="SCANLIST") | Information about a selected scan.
Args:
id (str): scan id
Returns:
str: scan info page HTML | scaninfo | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def opts(self: 'SpiderFootWebUi', updated: str = None) -> str:
"""Show module and global settings page.
Args:
updated (str): scan options were updated successfully
Returns:
str: scan options page HTML
"""
templ = Template(filename='spiderfoot/templates/opts.tmpl', lookup=self.lookup)
self.token = random.SystemRandom().randint(0, 99999999)
return templ.render(opts=self.config, pageid='SETTINGS', token=self.token, version=__version__,
updated=updated, docroot=self.docroot) | Show module and global settings page.
Args:
updated (str): scan options were updated successfully
Returns:
str: scan options page HTML | opts | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def optsexport(self: 'SpiderFootWebUi', pattern: str = None) -> str:
"""Export configuration.
Args:
pattern (str): TBD
Returns:
str: Configuration settings
"""
sf = SpiderFoot(self.config)
conf = sf.configSerialize(self.config)
content = ""
for opt in sorted(conf):
if ":_" in opt or opt.startswith("_"):
continue
if pattern:
if pattern in opt:
content += f"{opt}={conf[opt]}\n"
else:
content += f"{opt}={conf[opt]}\n"
cherrypy.response.headers['Content-Disposition'] = 'attachment; filename="SpiderFoot.cfg"'
cherrypy.response.headers['Content-Type'] = "text/plain"
return content | Export configuration.
Args:
pattern (str): TBD
Returns:
str: Configuration settings | optsexport | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def optsraw(self: 'SpiderFootWebUi') -> str:
"""Return global and module settings as json.
Returns:
str: settings as JSON
"""
ret = dict()
self.token = random.SystemRandom().randint(0, 99999999)
for opt in self.config:
if not opt.startswith('__'):
ret["global." + opt] = self.config[opt]
continue
if opt == '__modules__':
for mod in sorted(self.config['__modules__'].keys()):
for mo in sorted(self.config['__modules__'][mod]['opts'].keys()):
if mo.startswith("_"):
continue
ret["module." + mod + "." + mo] = self.config['__modules__'][mod]['opts'][mo]
return ['SUCCESS', {'token': self.token, 'data': ret}] | Return global and module settings as json.
Returns:
str: settings as JSON | optsraw | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scandelete(self: 'SpiderFootWebUi', id: str) -> str:
"""Delete scan(s).
Args:
id (str): comma separated list of scan IDs
Returns:
str: JSON response
"""
if not id:
return self.jsonify_error('404', "No scan specified")
dbh = SpiderFootDb(self.config)
ids = id.split(',')
for scan_id in ids:
res = dbh.scanInstanceGet(scan_id)
if not res:
return self.jsonify_error('404', f"Scan {scan_id} does not exist")
if res[5] in ["RUNNING", "STARTING", "STARTED"]:
return self.jsonify_error('400', f"Scan {scan_id} is {res[5]}. You cannot delete running scans.")
for scan_id in ids:
dbh.scanInstanceDelete(scan_id)
return "" | Delete scan(s).
Args:
id (str): comma separated list of scan IDs
Returns:
str: JSON response | scandelete | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def savesettings(self: 'SpiderFootWebUi', allopts: str, token: str, configFile: 'cherrypy._cpreqbody.Part' = None) -> None:
"""Save settings, also used to completely reset them to default.
Args:
allopts: TBD
token (str): CSRF token
configFile (cherrypy._cpreqbody.Part): TBD
Returns:
None
Raises:
HTTPRedirect: redirect to scan settings
"""
if str(token) != str(self.token):
return self.error(f"Invalid token ({token})")
# configFile seems to get set even if a file isn't uploaded
if configFile and configFile.file:
try:
contents = configFile.file.read()
if isinstance(contents, bytes):
contents = contents.decode('utf-8')
tmp = dict()
for line in contents.split("\n"):
if "=" not in line:
continue
opt_array = line.strip().split("=")
if len(opt_array) == 1:
opt_array[1] = ""
tmp[opt_array[0]] = '='.join(opt_array[1:])
allopts = json.dumps(tmp).encode('utf-8')
except Exception as e:
return self.error(f"Failed to parse input file. Was it generated from SpiderFoot? ({e})")
# Reset config to default
if allopts == "RESET":
if self.reset_settings():
raise cherrypy.HTTPRedirect(f"{self.docroot}/opts?updated=1")
return self.error("Failed to reset settings")
# Save settings
try:
dbh = SpiderFootDb(self.config)
useropts = json.loads(allopts)
cleanopts = dict()
for opt in list(useropts.keys()):
cleanopts[opt] = self.cleanUserInput([useropts[opt]])[0]
currentopts = deepcopy(self.config)
# Make a new config where the user options override
# the current system config.
sf = SpiderFoot(self.config)
self.config = sf.configUnserialize(cleanopts, currentopts)
dbh.configSet(sf.configSerialize(self.config))
except Exception as e:
return self.error(f"Processing one or more of your inputs failed: {e}")
raise cherrypy.HTTPRedirect(f"{self.docroot}/opts?updated=1") | Save settings, also used to completely reset them to default.
Args:
allopts: TBD
token (str): CSRF token
configFile (cherrypy._cpreqbody.Part): TBD
Returns:
None
Raises:
HTTPRedirect: redirect to scan settings | savesettings | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def savesettingsraw(self: 'SpiderFootWebUi', allopts: str, token: str) -> str:
"""Save settings, also used to completely reset them to default.
Args:
allopts: TBD
token (str): CSRF token
Returns:
str: save success as JSON
"""
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
if str(token) != str(self.token):
return json.dumps(["ERROR", f"Invalid token ({token})."]).encode('utf-8')
# Reset config to default
if allopts == "RESET":
if self.reset_settings():
return json.dumps(["SUCCESS", ""]).encode('utf-8')
return json.dumps(["ERROR", "Failed to reset settings"]).encode('utf-8')
# Save settings
try:
dbh = SpiderFootDb(self.config)
useropts = json.loads(allopts)
cleanopts = dict()
for opt in list(useropts.keys()):
cleanopts[opt] = self.cleanUserInput([useropts[opt]])[0]
currentopts = deepcopy(self.config)
# Make a new config where the user options override
# the current system config.
sf = SpiderFoot(self.config)
self.config = sf.configUnserialize(cleanopts, currentopts)
dbh.configSet(sf.configSerialize(self.config))
except Exception as e:
return json.dumps(["ERROR", f"Processing one or more of your inputs failed: {e}"]).encode('utf-8')
return json.dumps(["SUCCESS", ""]).encode('utf-8') | Save settings, also used to completely reset them to default.
Args:
allopts: TBD
token (str): CSRF token
Returns:
str: save success as JSON | savesettingsraw | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def reset_settings(self: 'SpiderFootWebUi') -> bool:
"""Reset settings to default.
Returns:
bool: success
"""
try:
dbh = SpiderFootDb(self.config)
dbh.configClear() # Clear it in the DB
self.config = deepcopy(self.defaultConfig) # Clear in memory
except Exception:
return False
return True | Reset settings to default.
Returns:
bool: success | reset_settings | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def resultsetfp(self: 'SpiderFootWebUi', id: str, resultids: str, fp: str) -> str:
"""Set a bunch of results (hashes) as false positive.
Args:
id (str): scan ID
resultids (str): comma separated list of result IDs
fp (str): 0 or 1
Returns:
str: set false positive status as JSON
"""
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
dbh = SpiderFootDb(self.config)
if fp not in ["0", "1"]:
return json.dumps(["ERROR", "No FP flag set or not set correctly."]).encode('utf-8')
try:
ids = json.loads(resultids)
except Exception:
return json.dumps(["ERROR", "No IDs supplied."]).encode('utf-8')
# Cannot set FPs if a scan is not completed
status = dbh.scanInstanceGet(id)
if not status:
return self.error(f"Invalid scan ID: {id}")
if status[5] not in ["ABORTED", "FINISHED", "ERROR-FAILED"]:
return json.dumps([
"WARNING",
"Scan must be in a finished state when setting False Positives."
]).encode('utf-8')
# Make sure the user doesn't set something as non-FP when the
# parent is set as an FP.
if fp == "0":
data = dbh.scanElementSourcesDirect(id, ids)
for row in data:
if str(row[14]) == "1":
return json.dumps([
"WARNING",
f"Cannot unset element {id} as False Positive if a parent element is still False Positive."
]).encode('utf-8')
# Set all the children as FPs too.. it's only logical afterall, right?
childs = dbh.scanElementChildrenAll(id, ids)
allIds = ids + childs
ret = dbh.scanResultsUpdateFP(id, allIds, fp)
if ret:
return json.dumps(["SUCCESS", ""]).encode('utf-8')
return json.dumps(["ERROR", "Exception encountered."]).encode('utf-8') | Set a bunch of results (hashes) as false positive.
Args:
id (str): scan ID
resultids (str): comma separated list of result IDs
fp (str): 0 or 1
Returns:
str: set false positive status as JSON | resultsetfp | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def eventtypes(self: 'SpiderFootWebUi') -> list:
"""List all event types.
Returns:
list: list of event types
"""
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
dbh = SpiderFootDb(self.config)
types = dbh.eventTypes()
ret = list()
for r in types:
ret.append([r[1], r[0]])
return sorted(ret, key=itemgetter(0)) | List all event types.
Returns:
list: list of event types | eventtypes | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def modules(self: 'SpiderFootWebUi') -> list:
"""List all modules.
Returns:
list: list of modules
"""
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
ret = list()
modinfo = list(self.config['__modules__'].keys())
if not modinfo:
return ret
modinfo.sort()
for m in modinfo:
if "__" in m:
continue
ret.append({'name': m, 'descr': self.config['__modules__'][m]['descr']})
return ret | List all modules.
Returns:
list: list of modules | modules | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def correlationrules(self: 'SpiderFootWebUi') -> list:
"""List all correlation rules.
Returns:
list: list of correlation rules
"""
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
ret = list()
rules = self.config['__correlationrules__']
if not rules:
return ret
for r in rules:
ret.append({
'id': r['id'],
'name': r['meta']['name'],
'descr': r['meta']['description'],
'risk': r['meta']['risk'],
})
return ret | List all correlation rules.
Returns:
list: list of correlation rules | correlationrules | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def ping(self: 'SpiderFootWebUi') -> list:
"""For the CLI to test connectivity to this server.
Returns:
list: SpiderFoot version as JSON
"""
return ["SUCCESS", __version__] | For the CLI to test connectivity to this server.
Returns:
list: SpiderFoot version as JSON | ping | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def query(self: 'SpiderFootWebUi', query: str) -> str:
"""For the CLI to run queries against the database.
Args:
query (str): SQL query
Returns:
str: query results as JSON
"""
dbh = SpiderFootDb(self.config)
if not query:
return self.jsonify_error('400', "Invalid query.")
if not query.lower().startswith("select"):
return self.jsonify_error('400', "Non-SELECTs are unpredictable and not recommended.")
try:
ret = dbh.dbh.execute(query)
data = ret.fetchall()
columnNames = [c[0] for c in dbh.dbh.description]
return [dict(zip(columnNames, row)) for row in data]
except Exception as e:
return self.jsonify_error('500', str(e)) | For the CLI to run queries against the database.
Args:
query (str): SQL query
Returns:
str: query results as JSON | query | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def startscan(self: 'SpiderFootWebUi', scanname: str, scantarget: str, modulelist: str, typelist: str, usecase: str) -> str:
"""Initiate a scan.
Args:
scanname (str): scan name
scantarget (str): scan target
modulelist (str): comma separated list of modules to use
typelist (str): selected modules based on produced event data types
usecase (str): selected module group (passive, investigate, footprint, all)
Returns:
str: start scan status as JSON
Raises:
HTTPRedirect: redirect to new scan info page
"""
scanname = self.cleanUserInput([scanname])[0]
scantarget = self.cleanUserInput([scantarget])[0]
if not scanname:
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["ERROR", "Incorrect usage: scan name was not specified."]).encode('utf-8')
return self.error("Invalid request: scan name was not specified.")
if not scantarget:
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["ERROR", "Incorrect usage: scan target was not specified."]).encode('utf-8')
return self.error("Invalid request: scan target was not specified.")
if not typelist and not modulelist and not usecase:
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["ERROR", "Incorrect usage: no modules specified for scan."]).encode('utf-8')
return self.error("Invalid request: no modules specified for scan.")
targetType = SpiderFootHelpers.targetTypeFromString(scantarget)
if targetType is None:
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["ERROR", "Unrecognised target type."]).encode('utf-8')
return self.error("Invalid target type. Could not recognize it as a target SpiderFoot supports.")
# Swap the globalscantable for the database handler
dbh = SpiderFootDb(self.config)
# Snapshot the current configuration to be used by the scan
cfg = deepcopy(self.config)
sf = SpiderFoot(cfg)
modlist = list()
# User selected modules
if modulelist:
modlist = modulelist.replace('module_', '').split(',')
# User selected types
if len(modlist) == 0 and typelist:
typesx = typelist.replace('type_', '').split(',')
# 1. Find all modules that produce the requested types
modlist = sf.modulesProducing(typesx)
newmods = deepcopy(modlist)
newmodcpy = deepcopy(newmods)
# 2. For each type those modules consume, get modules producing
while len(newmodcpy) > 0:
for etype in sf.eventsToModules(newmodcpy):
xmods = sf.modulesProducing([etype])
for mod in xmods:
if mod not in modlist:
modlist.append(mod)
newmods.append(mod)
newmodcpy = deepcopy(newmods)
newmods = list()
# User selected a use case
if len(modlist) == 0 and usecase:
for mod in self.config['__modules__']:
if usecase == 'all' or usecase in self.config['__modules__'][mod]['group']:
modlist.append(mod)
# If we somehow got all the way through to here and still don't have any modules selected
if not modlist:
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["ERROR", "Incorrect usage: no modules specified for scan."]).encode('utf-8')
return self.error("Invalid request: no modules specified for scan.")
# Add our mandatory storage module
if "sfp__stor_db" not in modlist:
modlist.append("sfp__stor_db")
modlist.sort()
# Delete the stdout module in case it crept in
if "sfp__stor_stdout" in modlist:
modlist.remove("sfp__stor_stdout")
# Start running a new scan
if targetType in ["HUMAN_NAME", "USERNAME", "BITCOIN_ADDRESS"]:
scantarget = scantarget.replace("\"", "")
else:
scantarget = scantarget.lower()
# Start running a new scan
scanId = SpiderFootHelpers.genScanInstanceId()
try:
p = mp.Process(target=startSpiderFootScanner, args=(self.loggingQueue, scanname, scanId, scantarget, targetType, modlist, cfg))
p.daemon = True
p.start()
except Exception as e:
self.log.error(f"[-] Scan [{scanId}] failed: {e}")
return self.error(f"[-] Scan [{scanId}] failed: {e}")
# Wait until the scan has initialized
# Check the database for the scan status results
while dbh.scanInstanceGet(scanId) is None:
self.log.info("Waiting for the scan to initialize...")
time.sleep(1)
if cherrypy.request.headers.get('Accept') and 'application/json' in cherrypy.request.headers.get('Accept'):
cherrypy.response.headers['Content-Type'] = "application/json; charset=utf-8"
return json.dumps(["SUCCESS", scanId]).encode('utf-8')
raise cherrypy.HTTPRedirect(f"{self.docroot}/scaninfo?id={scanId}") | Initiate a scan.
Args:
scanname (str): scan name
scantarget (str): scan target
modulelist (str): comma separated list of modules to use
typelist (str): selected modules based on produced event data types
usecase (str): selected module group (passive, investigate, footprint, all)
Returns:
str: start scan status as JSON
Raises:
HTTPRedirect: redirect to new scan info page | startscan | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def stopscan(self: 'SpiderFootWebUi', id: str) -> str:
"""Stop a scan.
Args:
id (str): comma separated list of scan IDs
Returns:
str: JSON response
"""
if not id:
return self.jsonify_error('404', "No scan specified")
dbh = SpiderFootDb(self.config)
ids = id.split(',')
for scan_id in ids:
res = dbh.scanInstanceGet(scan_id)
if not res:
return self.jsonify_error('404', f"Scan {scan_id} does not exist")
scan_status = res[5]
if scan_status == "FINISHED":
return self.jsonify_error('400', f"Scan {scan_id} has already finished.")
if scan_status == "ABORTED":
return self.jsonify_error('400', f"Scan {scan_id} has already aborted.")
if scan_status != "RUNNING" and scan_status != "STARTING":
return self.jsonify_error('400', f"The running scan is currently in the state '{scan_status}', please try again later or restart SpiderFoot.")
for scan_id in ids:
dbh.scanInstanceSet(scan_id, status="ABORT-REQUESTED")
return "" | Stop a scan.
Args:
id (str): comma separated list of scan IDs
Returns:
str: JSON response | stopscan | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanlog(self: 'SpiderFootWebUi', id: str, limit: str = None, rowId: str = None, reverse: str = None) -> list:
"""Scan log data.
Args:
id (str): scan ID
limit (str): TBD
rowId (str): TBD
reverse (str): TBD
Returns:
list: scan log
"""
dbh = SpiderFootDb(self.config)
retdata = []
try:
data = dbh.scanLogs(id, limit, rowId, reverse)
except Exception:
return retdata
for row in data:
generated = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0] / 1000))
retdata.append([generated, row[1], row[2], html.escape(row[3]), row[4]])
return retdata | Scan log data.
Args:
id (str): scan ID
limit (str): TBD
rowId (str): TBD
reverse (str): TBD
Returns:
list: scan log | scanlog | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanerrors(self: 'SpiderFootWebUi', id: str, limit: str = None) -> list:
"""Scan error data.
Args:
id (str): scan ID
limit (str): limit number of results
Returns:
list: scan errors
"""
dbh = SpiderFootDb(self.config)
retdata = []
try:
data = dbh.scanErrors(id, limit)
except Exception:
return retdata
for row in data:
generated = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0] / 1000))
retdata.append([generated, row[1], html.escape(str(row[2]))])
return retdata | Scan error data.
Args:
id (str): scan ID
limit (str): limit number of results
Returns:
list: scan errors | scanerrors | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanlist(self: 'SpiderFootWebUi') -> list:
"""Produce a list of scans.
Returns:
list: scan list
"""
dbh = SpiderFootDb(self.config)
data = dbh.scanInstanceList()
retdata = []
for row in data:
created = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[3]))
riskmatrix = {
"HIGH": 0,
"MEDIUM": 0,
"LOW": 0,
"INFO": 0
}
correlations = dbh.scanCorrelationSummary(row[0], by="risk")
if correlations:
for c in correlations:
riskmatrix[c[0]] = c[1]
if row[4] == 0:
started = "Not yet"
else:
started = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[4]))
if row[5] == 0:
finished = "Not yet"
else:
finished = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[5]))
retdata.append([row[0], row[1], row[2], created, started, finished, row[6], row[7], riskmatrix])
return retdata | Produce a list of scans.
Returns:
list: scan list | scanlist | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanstatus(self: 'SpiderFootWebUi', id: str) -> list:
"""Show basic information about a scan, including status and number of each event type.
Args:
id (str): scan ID
Returns:
list: scan status
"""
dbh = SpiderFootDb(self.config)
data = dbh.scanInstanceGet(id)
if not data:
return []
created = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(data[2]))
started = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(data[3]))
ended = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(data[4]))
riskmatrix = {
"HIGH": 0,
"MEDIUM": 0,
"LOW": 0,
"INFO": 0
}
correlations = dbh.scanCorrelationSummary(id, by="risk")
if correlations:
for c in correlations:
riskmatrix[c[0]] = c[1]
return [data[0], data[1], created, started, ended, data[5], riskmatrix] | Show basic information about a scan, including status and number of each event type.
Args:
id (str): scan ID
Returns:
list: scan status | scanstatus | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scansummary(self: 'SpiderFootWebUi', id: str, by: str) -> list:
"""Summary of scan results.
Args:
id (str): scan ID
by (str): filter by type
Returns:
list: scan summary
"""
retdata = []
dbh = SpiderFootDb(self.config)
try:
scandata = dbh.scanResultSummary(id, by)
except Exception:
return retdata
try:
statusdata = dbh.scanInstanceGet(id)
except Exception:
return retdata
for row in scandata:
if row[0] == "ROOT":
continue
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[2]))
retdata.append([row[0], row[1], lastseen, row[3], row[4], statusdata[5]])
return retdata | Summary of scan results.
Args:
id (str): scan ID
by (str): filter by type
Returns:
list: scan summary | scansummary | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scancorrelations(self: 'SpiderFootWebUi', id: str) -> list:
"""Correlation results from a scan.
Args:
id (str): scan ID
Returns:
list: correlation result list
"""
retdata = []
dbh = SpiderFootDb(self.config)
try:
corrdata = dbh.scanCorrelationList(id)
except Exception:
return retdata
for row in corrdata:
retdata.append([row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7]])
return retdata | Correlation results from a scan.
Args:
id (str): scan ID
Returns:
list: correlation result list | scancorrelations | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scaneventresults(self: 'SpiderFootWebUi', id: str, eventType: str = None, filterfp: bool = False, correlationId: str = None) -> list:
"""Return all event results for a scan as JSON.
Args:
id (str): scan ID
eventType (str): filter by event type
filterfp (bool): remove false positives from search results
correlationId (str): filter by events associated with a correlation
Returns:
list: scan results
"""
retdata = []
dbh = SpiderFootDb(self.config)
if not eventType:
eventType = 'ALL'
try:
data = dbh.scanResultEvent(id, eventType, filterfp, correlationId=correlationId)
except Exception:
return retdata
for row in data:
lastseen = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(row[0]))
retdata.append([
lastseen,
html.escape(row[1]),
html.escape(row[2]),
row[3],
row[5],
row[6],
row[7],
row[8],
row[13],
row[14],
row[4]
])
return retdata | Return all event results for a scan as JSON.
Args:
id (str): scan ID
eventType (str): filter by event type
filterfp (bool): remove false positives from search results
correlationId (str): filter by events associated with a correlation
Returns:
list: scan results | scaneventresults | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scaneventresultsunique(self: 'SpiderFootWebUi', id: str, eventType: str, filterfp: bool = False) -> list:
"""Return unique event results for a scan as JSON.
Args:
id (str): filter search results by scan ID
eventType (str): filter search results by event type
filterfp (bool): remove false positives from search results
Returns:
list: unique search results
"""
dbh = SpiderFootDb(self.config)
retdata = []
try:
data = dbh.scanResultEventUnique(id, eventType, filterfp)
except Exception:
return retdata
for row in data:
escaped = html.escape(row[0])
retdata.append([escaped, row[1], row[2]])
return retdata | Return unique event results for a scan as JSON.
Args:
id (str): filter search results by scan ID
eventType (str): filter search results by event type
filterfp (bool): remove false positives from search results
Returns:
list: unique search results | scaneventresultsunique | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def search(self: 'SpiderFootWebUi', id: str = None, eventType: str = None, value: str = None) -> list:
"""Search scans.
Args:
id (str): filter search results by scan ID
eventType (str): filter search results by event type
value (str): filter search results by event value
Returns:
list: search results
"""
try:
return self.searchBase(id, eventType, value)
except Exception:
return [] | Search scans.
Args:
id (str): filter search results by scan ID
eventType (str): filter search results by event type
value (str): filter search results by event value
Returns:
list: search results | search | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanhistory(self: 'SpiderFootWebUi', id: str) -> list:
"""Historical data for a scan.
Args:
id (str): scan ID
Returns:
list: scan history
"""
if not id:
return self.jsonify_error('404', "No scan specified")
dbh = SpiderFootDb(self.config)
try:
return dbh.scanResultHistory(id)
except Exception:
return [] | Historical data for a scan.
Args:
id (str): scan ID
Returns:
list: scan history | scanhistory | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def scanelementtypediscovery(self: 'SpiderFootWebUi', id: str, eventType: str) -> dict:
"""Scan element type discovery.
Args:
id (str): scan ID
eventType (str): filter by event type
Returns:
dict
"""
dbh = SpiderFootDb(self.config)
pc = dict()
datamap = dict()
retdata = dict()
# Get the events we will be tracing back from
try:
leafSet = dbh.scanResultEvent(id, eventType)
[datamap, pc] = dbh.scanElementSourcesAll(id, leafSet)
except Exception:
return retdata
# Delete the ROOT key as it adds no value from a viz perspective
del pc['ROOT']
retdata['tree'] = SpiderFootHelpers.dataParentChildToTree(pc)
retdata['data'] = datamap
return retdata | Scan element type discovery.
Args:
id (str): scan ID
eventType (str): filter by event type
Returns:
dict | scanelementtypediscovery | python | smicallef/spiderfoot | sfwebui.py | https://github.com/smicallef/spiderfoot/blob/master/sfwebui.py | MIT |
def __init__(self, scanName: str, scanId: str, targetValue: str, targetType: str, moduleList: list, globalOpts: dict, start: bool = True) -> None:
"""Initialize SpiderFootScanner object.
Args:
scanName (str): name of the scan
scanId (str): unique ID of the scan
targetValue (str): scan target
targetType (str): scan target type
moduleList (list): list of modules to run
globalOpts (dict): scan options
start (bool): start the scan immediately
Raises:
TypeError: arg type was invalid
ValueError: arg value was invalid
Todo:
Eventually change this to be able to control multiple scan instances
"""
if not isinstance(globalOpts, dict):
raise TypeError(f"globalOpts is {type(globalOpts)}; expected dict()")
if not globalOpts:
raise ValueError("globalOpts is empty")
self.__config = deepcopy(globalOpts)
self.__dbh = SpiderFootDb(self.__config)
if not isinstance(scanName, str):
raise TypeError(f"scanName is {type(scanName)}; expected str()")
if not scanName:
raise ValueError("scanName value is blank")
self.__scanName = scanName
if not isinstance(scanId, str):
raise TypeError(f"scanId is {type(scanId)}; expected str()")
if not scanId:
raise ValueError("scanId value is blank")
if not isinstance(targetValue, str):
raise TypeError(f"targetValue is {type(targetValue)}; expected str()")
if not targetValue:
raise ValueError("targetValue value is blank")
self.__targetValue = targetValue
if not isinstance(targetType, str):
raise TypeError(f"targetType is {type(targetType)}; expected str()")
if not targetType:
raise ValueError("targetType value is blank")
self.__targetType = targetType
if not isinstance(moduleList, list):
raise TypeError(f"moduleList is {type(moduleList)}; expected list()")
if not moduleList:
raise ValueError("moduleList is empty")
self.__moduleList = moduleList
self.__sf = SpiderFoot(self.__config)
self.__sf.dbh = self.__dbh
# Create a unique ID for this scan in the back-end DB.
if scanId:
self.__scanId = scanId
else:
self.__scanId = SpiderFootHelpers.genScanInstanceId()
self.__sf.scanId = self.__scanId
self.__dbh.scanInstanceCreate(self.__scanId, self.__scanName, self.__targetValue)
# Create our target
try:
self.__target = SpiderFootTarget(self.__targetValue, self.__targetType)
except (TypeError, ValueError) as e:
self.__sf.status(f"Scan [{self.__scanId}] failed: {e}")
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
raise ValueError(f"Invalid target: {e}") from None
# Save the config current set for this scan
self.__config['_modulesenabled'] = self.__moduleList
self.__dbh.scanConfigSet(self.__scanId, self.__sf.configSerialize(deepcopy(self.__config)))
# Process global options that point to other places for data
# If a proxy server was specified, set it up
proxy_type = self.__config.get('_socks1type')
if proxy_type:
# TODO: allow DNS lookup to be configurable when using a proxy
# - proxy DNS lookup: socks5h:// and socks4a://
# - local DNS lookup: socks5:// and socks4://
if proxy_type == '4':
proxy_proto = 'socks4://'
elif proxy_type == '5':
proxy_proto = 'socks5://'
elif proxy_type == 'HTTP':
proxy_proto = 'http://'
elif proxy_type == 'TOR':
proxy_proto = 'socks5h://'
else:
self.__sf.status(f"Scan [{self.__scanId}] failed: Invalid proxy type: {proxy_type}")
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
raise ValueError(f"Invalid proxy type: {proxy_type}")
proxy_host = self.__config.get('_socks2addr', '')
if not proxy_host:
self.__sf.status(f"Scan [{self.__scanId}] failed: Proxy type is set ({proxy_type}) but proxy address value is blank")
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
raise ValueError(f"Proxy type is set ({proxy_type}) but proxy address value is blank")
proxy_port = int(self.__config.get('_socks3port') or 0)
if not proxy_port:
if proxy_type in ['4', '5']:
proxy_port = 1080
elif proxy_type.upper() == 'HTTP':
proxy_port = 8080
elif proxy_type.upper() == 'TOR':
proxy_port = 9050
proxy_username = self.__config.get('_socks4user', '')
proxy_password = self.__config.get('_socks5pwd', '')
if proxy_username or proxy_password:
proxy_auth = f"{proxy_username}:{proxy_password}"
proxy = f"{proxy_proto}{proxy_auth}@{proxy_host}:{proxy_port}"
else:
proxy = f"{proxy_proto}{proxy_host}:{proxy_port}"
self.__sf.debug(f"Using proxy: {proxy}")
self.__sf.socksProxy = proxy
else:
self.__sf.socksProxy = None
# Override the default DNS server
if self.__config['_dnsserver']:
res = dns.resolver.Resolver()
res.nameservers = [self.__config['_dnsserver']]
dns.resolver.override_system_resolver(res)
else:
dns.resolver.restore_system_resolver()
# Set the user agent
self.__config['_useragent'] = self.__sf.optValueToData(self.__config['_useragent'])
# Set up the Internet TLD list.
# If the cached does not exist or has expired, reload it from scratch.
tld_data = self.__sf.cacheGet("internet_tlds", self.__config['_internettlds_cache'])
if tld_data is None:
tld_data = self.__sf.optValueToData(self.__config['_internettlds'])
if tld_data is None:
self.__sf.status(f"Scan [{self.__scanId}] failed: Could not update TLD list")
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
raise ValueError("Could not update TLD list")
self.__sf.cachePut("internet_tlds", tld_data)
self.__config['_internettlds'] = tld_data.splitlines()
self.__setStatus("INITIALIZING", time.time() * 1000, None)
self.__sharedThreadPool = SpiderFootThreadPool(threads=self.__config.get("_maxthreads", 3), name='sharedThreadPool')
# Used when module threading is enabled
self.eventQueue = None
if start:
self.__startScan() | Initialize SpiderFootScanner object.
Args:
scanName (str): name of the scan
scanId (str): unique ID of the scan
targetValue (str): scan target
targetType (str): scan target type
moduleList (list): list of modules to run
globalOpts (dict): scan options
start (bool): start the scan immediately
Raises:
TypeError: arg type was invalid
ValueError: arg value was invalid
Todo:
Eventually change this to be able to control multiple scan instances | __init__ | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def __setStatus(self, status: str, started: float = None, ended: float = None) -> None:
"""Set the status of the currently running scan (if any).
Args:
status (str): scan status
started (float): timestamp at start of scan
ended (float): timestamp at end of scan
Raises:
TypeError: arg type was invalid
ValueError: arg value was invalid
"""
if not isinstance(status, str):
raise TypeError(f"status is {type(status)}; expected str()")
if status not in [
"INITIALIZING",
"STARTING",
"STARTED",
"RUNNING",
"ABORT-REQUESTED",
"ABORTED",
"ABORTING",
"FINISHED",
"ERROR-FAILED"
]:
raise ValueError(f"Invalid scan status {status}")
self.__status = status
self.__dbh.scanInstanceSet(self.__scanId, started, ended, status) | Set the status of the currently running scan (if any).
Args:
status (str): scan status
started (float): timestamp at start of scan
ended (float): timestamp at end of scan
Raises:
TypeError: arg type was invalid
ValueError: arg value was invalid | __setStatus | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def __startScan(self) -> None:
"""Start running a scan.
Raises:
AssertionError: Never actually raised.
"""
failed = True
try:
self.__setStatus("STARTING", time.time() * 1000, None)
self.__sf.status(f"Scan [{self.__scanId}] for '{self.__target.targetValue}' initiated.")
self.eventQueue = queue.Queue()
self.__sharedThreadPool.start()
# moduleList = list of modules the user wants to run
self.__sf.debug(f"Loading {len(self.__moduleList)} modules ...")
for modName in self.__moduleList:
if not modName:
continue
# Module may have been renamed or removed
if modName not in self.__config['__modules__']:
self.__sf.error(f"Failed to load module: {modName}")
continue
try:
module = __import__('modules.' + modName, globals(), locals(), [modName])
except ImportError:
self.__sf.error(f"Failed to load module: {modName}")
continue
try:
mod = getattr(module, modName)()
mod.__name__ = modName
except Exception:
self.__sf.error(f"Module {modName} initialization failed", exc_info=True)
continue
# Set up the module options, scan ID, database handle and listeners
try:
# Configuration is a combined global config with module-specific options
self.__modconfig[modName] = deepcopy(self.__config['__modules__'][modName]['opts'])
for opt in list(self.__config.keys()):
self.__modconfig[modName][opt] = deepcopy(self.__config[opt])
# clear any listener relationships from the past
mod.clearListeners()
mod.setScanId(self.__scanId)
mod.setSharedThreadPool(self.__sharedThreadPool)
mod.setDbh(self.__dbh)
mod.setup(self.__sf, self.__modconfig[modName])
except Exception:
self.__sf.error(f"Module {modName} initialization failed", exc_info=True)
mod.errorState = True
continue
# Override the module's local socket module to be the SOCKS one.
if self.__config['_socks1type'] != '':
try:
mod._updateSocket(socket)
except Exception as e:
self.__sf.error(f"Module {modName} socket setup failed: {e}")
continue
# Set up event output filters if requested
if self.__config['__outputfilter']:
try:
mod.setOutputFilter(self.__config['__outputfilter'])
except Exception as e:
self.__sf.error(f"Module {modName} output filter setup failed: {e}")
continue
# Give modules a chance to 'enrich' the original target with aliases of that target.
try:
newTarget = mod.enrichTarget(self.__target)
if newTarget is not None:
self.__target = newTarget
except Exception as e:
self.__sf.error(f"Module {modName} target enrichment failed: {e}")
continue
# Register the target with the module
try:
mod.setTarget(self.__target)
except Exception as e:
self.__sf.error(f"Module {modName} failed to set target '{self.__target}': {e}")
continue
# Set up the outgoing event queue
try:
mod.outgoingEventQueue = self.eventQueue
mod.incomingEventQueue = queue.Queue()
except Exception as e:
self.__sf.error(f"Module {modName} event queue setup failed: {e}")
continue
self.__moduleInstances[modName] = mod
self.__sf.status(f"{modName} module loaded.")
self.__sf.debug(f"Scan [{self.__scanId}] loaded {len(self.__moduleInstances)} modules.")
if not self.__moduleInstances:
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
self.__dbh.close()
return
# sort modules by priority
self.__moduleInstances = OrderedDict(sorted(self.__moduleInstances.items(), key=lambda m: m[-1]._priority))
# Now we are ready to roll..
self.__setStatus("RUNNING")
# Create a pseudo module for the root event to originate from
psMod = SpiderFootPlugin()
psMod.__name__ = "SpiderFoot UI"
psMod.setTarget(self.__target)
psMod.setDbh(self.__dbh)
psMod.clearListeners()
psMod.outgoingEventQueue = self.eventQueue
psMod.incomingEventQueue = queue.Queue()
# Create the "ROOT" event which un-triggered modules will link events to
rootEvent = SpiderFootEvent("ROOT", self.__targetValue, "", None)
psMod.notifyListeners(rootEvent)
firstEvent = SpiderFootEvent(self.__targetType, self.__targetValue,
"SpiderFoot UI", rootEvent)
psMod.notifyListeners(firstEvent)
# Special case.. check if an INTERNET_NAME is also a domain
if self.__targetType == 'INTERNET_NAME' and self.__sf.isDomain(self.__targetValue, self.__config['_internettlds']):
firstEvent = SpiderFootEvent('DOMAIN_NAME', self.__targetValue, "SpiderFoot UI", rootEvent)
psMod.notifyListeners(firstEvent)
# If in interactive mode, loop through this shared global variable
# waiting for inputs, and process them until my status is set to
# FINISHED.
# Check in case the user requested to stop the scan between modules
# initializing
scanstatus = self.__dbh.scanInstanceGet(self.__scanId)
if scanstatus and scanstatus[5] == "ABORT-REQUESTED":
raise AssertionError("ABORT-REQUESTED")
# start threads
self.waitForThreads()
failed = False
except (KeyboardInterrupt, AssertionError):
self.__sf.status(f"Scan [{self.__scanId}] aborted.")
self.__setStatus("ABORTED", None, time.time() * 1000)
except BaseException as e:
self.__sf.error(
f"Unhandled exception ({e.__class__.__name__}) encountered during scan. Please report this as a bug",
exc_info=True
)
self.__sf.status(f"Scan [{self.__scanId}] failed: {e}")
self.__setStatus("ERROR-FAILED", None, time.time() * 1000)
finally:
if not failed:
self.__setStatus("FINISHED", None, time.time() * 1000)
self.runCorrelations()
self.__sf.status(f"Scan [{self.__scanId}] completed.")
self.__dbh.close() | Start running a scan.
Raises:
AssertionError: Never actually raised. | __startScan | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def runCorrelations(self) -> None:
"""Run correlation rules."""
self.__sf.status(f"Running {len(self.__config['__correlationrules__'])} correlation rules on scan {self.__scanId}.")
ruleset = dict()
for rule in self.__config['__correlationrules__']:
ruleset[rule['id']] = rule['rawYaml']
corr = SpiderFootCorrelator(self.__dbh, ruleset, self.__scanId)
corr.run_correlations() | Run correlation rules. | runCorrelations | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def waitForThreads(self) -> None:
"""Wait for threads.
Raises:
TypeError: queue tried to process a malformed event
AssertionError: scan halted for some reason
"""
if not self.eventQueue:
return
counter = 0
try:
# start one thread for each module
for mod in self.__moduleInstances.values():
mod.start()
final_passes = 3
# watch for newly-generated events
while True:
# log status of threads every 10 iterations
log_status = counter % 10 == 0
counter += 1
if log_status:
scanstatus = self.__dbh.scanInstanceGet(self.__scanId)
if scanstatus and scanstatus[5] == "ABORT-REQUESTED":
raise AssertionError("ABORT-REQUESTED")
try:
sfEvent = self.eventQueue.get_nowait()
self.__sf.debug(f"waitForThreads() got event, {sfEvent.eventType}, from eventQueue.")
except queue.Empty:
# check if we're finished
if self.threadsFinished(log_status):
sleep(.1)
# but are we really?
if self.threadsFinished(log_status):
if final_passes < 1:
break
# Trigger module.finished()
for mod in self.__moduleInstances.values():
if not mod.errorState and mod.incomingEventQueue is not None:
mod.incomingEventQueue.put('FINISHED')
sleep(.1)
while not self.threadsFinished(log_status):
log_status = counter % 100 == 0
counter += 1
sleep(.01)
final_passes -= 1
else:
# save on CPU
sleep(.1)
continue
if not isinstance(sfEvent, SpiderFootEvent):
raise TypeError(f"sfEvent is {type(sfEvent)}; expected SpiderFootEvent")
# for every module
for mod in self.__moduleInstances.values():
# if it's been aborted
if mod._stopScanning:
# break out of the while loop
raise AssertionError(f"{mod.__name__} requested stop")
# send it the new event if applicable
if not mod.errorState and mod.incomingEventQueue is not None:
watchedEvents = mod.watchedEvents()
if sfEvent.eventType in watchedEvents or "*" in watchedEvents:
mod.incomingEventQueue.put(deepcopy(sfEvent))
finally:
# tell the modules to stop
for mod in self.__moduleInstances.values():
mod._stopScanning = True
self.__sharedThreadPool.shutdown(wait=True) | Wait for threads.
Raises:
TypeError: queue tried to process a malformed event
AssertionError: scan halted for some reason | waitForThreads | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def threadsFinished(self, log_status: bool = False) -> bool:
"""Check if all threads are complete.
Args:
log_status (bool): print thread queue status to debug log
Returns:
bool: True if all threads are finished
"""
if self.eventQueue is None:
return True
modules_waiting = dict()
for m in self.__moduleInstances.values():
try:
if m.incomingEventQueue is not None:
modules_waiting[m.__name__] = m.incomingEventQueue.qsize()
except Exception:
with suppress(Exception):
m.errorState = True
modules_waiting = sorted(modules_waiting.items(), key=lambda x: x[-1], reverse=True)
modules_running = []
for m in self.__moduleInstances.values():
try:
if m.running:
modules_running.append(m.__name__)
except Exception:
with suppress(Exception):
m.errorState = True
modules_errored = []
for m in self.__moduleInstances.values():
try:
if m.errorState:
modules_errored.append(m.__name__)
except Exception:
with suppress(Exception):
m.errorState = True
queues_empty = [qsize == 0 for m, qsize in modules_waiting]
for mod in self.__moduleInstances.values():
if mod.errorState and mod.incomingEventQueue is not None:
self.__sf.debug(f"Clearing and unsetting incomingEventQueue for errored module {mod.__name__}.")
with suppress(Exception):
while 1:
mod.incomingEventQueue.get_nowait()
mod.incomingEventQueue = None
if not modules_running and not queues_empty:
self.__sf.debug("Clearing queues for stalled/aborted modules.")
for mod in self.__moduleInstances.values():
with suppress(Exception):
while True:
mod.incomingEventQueue.get_nowait()
if log_status:
events_queued = ", ".join([f"{mod}: {qsize:,}" for mod, qsize in modules_waiting[:5] if qsize > 0])
if not events_queued:
events_queued = 'None'
self.__sf.debug(f"Events queued: {sum([m[-1] for m in modules_waiting]):,} ({events_queued})")
if modules_running:
self.__sf.debug(f"Modules running: {len(modules_running):,} ({', '.join(modules_running)})")
if modules_errored:
self.__sf.debug(f"Modules errored: {len(modules_errored):,} ({', '.join(modules_errored)})")
if all(queues_empty) and not modules_running:
return True
return False | Check if all threads are complete.
Args:
log_status (bool): print thread queue status to debug log
Returns:
bool: True if all threads are finished | threadsFinished | python | smicallef/spiderfoot | sfscan.py | https://github.com/smicallef/spiderfoot/blob/master/sfscan.py | MIT |
def __init__(self, targetValue: str, typeName: str) -> None:
"""Initialize SpiderFoot target.
Args:
targetValue (str): target value
typeName (str): target type
"""
self.targetType = typeName
self.targetValue = targetValue
self.targetAliases = list() | Initialize SpiderFoot target.
Args:
targetValue (str): target value
typeName (str): target type | __init__ | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def setAlias(self, value: str, typeName: str) -> None:
"""Specify other hostnames, IPs, etc. that are aliases for this target.
For instance, if the user searched for an ASN, a module
might supply all the nested subnets as aliases.
Or, if a user searched for an IP address, a module
might supply the hostname as an alias.
Args:
value (str): Target alias value
typeName (str): Target alias data type
"""
if not isinstance(value, (str, bytes)):
return
if not value:
return
if not isinstance(typeName, (str, bytes)):
return
if not typeName:
return
alias: TargetAlias = {'type': typeName, 'value': value.lower()}
if alias in self.targetAliases:
return
self.targetAliases.append(alias) | Specify other hostnames, IPs, etc. that are aliases for this target.
For instance, if the user searched for an ASN, a module
might supply all the nested subnets as aliases.
Or, if a user searched for an IP address, a module
might supply the hostname as an alias.
Args:
value (str): Target alias value
typeName (str): Target alias data type | setAlias | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def _getEquivalents(self, typeName: str) -> typing.List[str]:
"""Get all aliases of the specfied target data type.
Args:
typeName (str): Target data type
Returns:
typing.List[str]: target aliases
"""
ret: typing.List[str] = list()
for item in self.targetAliases:
if item['type'] == typeName:
ret.append(item['value'].lower())
return ret | Get all aliases of the specfied target data type.
Args:
typeName (str): Target data type
Returns:
typing.List[str]: target aliases | _getEquivalents | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def getNames(self) -> typing.List[str]:
"""Get all domains associated with the target.
Returns:
typing.List[str]: domains associated with the target
"""
e = self._getEquivalents("INTERNET_NAME")
if self.targetType in ["INTERNET_NAME", "EMAILADDR"] and self.targetValue.lower() not in e:
e.append(self.targetValue.lower())
names: typing.List[str] = list()
for name in e:
if isinstance(name, bytes):
names.append(name.decode("utf-8"))
else:
names.append(name)
return names | Get all domains associated with the target.
Returns:
typing.List[str]: domains associated with the target | getNames | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def getAddresses(self) -> typing.List[str]:
"""Get all IP subnet or IP address aliases associated with the target.
Returns:
typing.List[str]: List of IP subnets and addresses
"""
e = self._getEquivalents("IP_ADDRESS")
if self.targetType == "IP_ADDRESS":
e.append(self.targetValue)
e = self._getEquivalents("IPV6_ADDRESS")
if self.targetType == "IPV6_ADDRESS":
e.append(self.targetValue)
return e | Get all IP subnet or IP address aliases associated with the target.
Returns:
typing.List[str]: List of IP subnets and addresses | getAddresses | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def matches(self, value: str, includeParents: bool = False, includeChildren: bool = True) -> bool:
"""Check whether the supplied value is "tightly" related to the original target.
Tightly in this case means:
If the value is an IP:
* is it in the list of aliases or the target itself?
* is it on the target's subnet?
If the value is an internet name (subdomain, domain, hostname):
* is it in the list of aliases or the target itself?
* is it a parent of the aliases of the target (domain/subdomain)
* is it a child of the aliases of the target (hostname)
Args:
value (str): can be an Internet Name (hostname, subnet, domain) or an IP address.
includeParents (bool): True means you consider a value that is
a parent domain of the target to still be a tight relation.
includeChildren (bool): False means you don't consider a value
that is a child of the target to be a tight relation.
Returns:
bool: whether the value matches the target
"""
if not isinstance(value, str) and not isinstance(value, bytes):
return False
if isinstance(value, bytes):
value = value.decode("utf-8")
if not value:
return False
# We can't really say anything about names, username, bitcoin addresses
# or phone numbers, so everything matches
if self.targetType in ["HUMAN_NAME", "PHONE_NUMBER", "USERNAME", "BITCOIN_ADDRESS"]:
return True
# TODO: review handling of other potential self.targetType target types:
# "INTERNET_NAME", "EMAILADDR", "BGP_AS_OWNER"
# For IP addreses, check if it is an alias of the target or within the target's subnet.
if netaddr.valid_ipv4(value) or netaddr.valid_ipv6(value):
if value in self.getAddresses():
return True
if self.targetType in ["IP_ADDRESS", "IPV6_ADDRESS", "NETBLOCK_OWNER", "NETBLOCKV6_OWNER"]:
try:
if netaddr.IPAddress(value) in netaddr.IPNetwork(self.targetValue):
return True
except netaddr.AddrFormatError:
return False
return False
# For everything else, check if the value is within or equal to target names
for name in self.getNames():
if value == name:
return True
if includeParents and name.endswith("." + value):
return True
if includeChildren and value.endswith("." + name):
return True
return False | Check whether the supplied value is "tightly" related to the original target.
Tightly in this case means:
If the value is an IP:
* is it in the list of aliases or the target itself?
* is it on the target's subnet?
If the value is an internet name (subdomain, domain, hostname):
* is it in the list of aliases or the target itself?
* is it a parent of the aliases of the target (domain/subdomain)
* is it a child of the aliases of the target (hostname)
Args:
value (str): can be an Internet Name (hostname, subnet, domain) or an IP address.
includeParents (bool): True means you consider a value that is
a parent domain of the target to still be a tight relation.
includeChildren (bool): False means you don't consider a value
that is a child of the target to be a tight relation.
Returns:
bool: whether the value matches the target | matches | python | smicallef/spiderfoot | spiderfoot/target.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/target.py | MIT |
def dataPath() -> str:
"""Returns the file system location of SpiderFoot data and configuration files.
Returns:
str: SpiderFoot data file system path
"""
path = os.environ.get('SPIDERFOOT_DATA')
if not path:
path = f"{Path.home()}/.spiderfoot/"
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
return path | Returns the file system location of SpiderFoot data and configuration files.
Returns:
str: SpiderFoot data file system path | dataPath | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def cachePath() -> str:
"""Returns the file system location of the cacha data files.
Returns:
str: SpiderFoot cache file system path
"""
path = os.environ.get('SPIDERFOOT_CACHE')
if not path:
path = f"{Path.home()}/.spiderfoot/cache"
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
return path | Returns the file system location of the cacha data files.
Returns:
str: SpiderFoot cache file system path | cachePath | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def logPath() -> str:
"""Returns the file system location of SpiderFoot log files.
Returns:
str: SpiderFoot data file system path
"""
path = os.environ.get('SPIDERFOOT_LOGS')
if not path:
path = f"{Path.home()}/.spiderfoot/logs"
if not os.path.isdir(path):
os.makedirs(path, exist_ok=True)
return path | Returns the file system location of SpiderFoot log files.
Returns:
str: SpiderFoot data file system path | logPath | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def loadModulesAsDict(path: str, ignore_files: typing.Optional[typing.List[str]] = None) -> dict:
"""Load modules from modules directory.
Args:
path (str): file system path for modules directory
ignore_files (list): List of module file names to ignore
Returns:
dict: SpiderFoot modules
Raises:
TypeError: ignore file list was invalid
ValueError: module path does not exist
SyntaxError: module data is malformed
"""
if not ignore_files:
ignore_files = []
if not isinstance(ignore_files, list):
raise TypeError(f"ignore_files is {type(ignore_files)}; expected list()")
if not os.path.isdir(path):
raise ValueError(f"Modules directory does not exist: {path}")
sfModules = dict()
valid_categories = ["Content Analysis", "Crawling and Scanning", "DNS",
"Leaks, Dumps and Breaches", "Passive DNS",
"Public Registries", "Real World", "Reputation Systems",
"Search Engines", "Secondary Networks", "Social Media"]
for filename in os.listdir(path):
if not filename.startswith("sfp_"):
continue
if not filename.endswith(".py"):
continue
if filename in ignore_files:
continue
modName = filename.split('.')[0]
sfModules[modName] = dict()
mod = __import__('modules.' + modName, globals(), locals(), [modName])
sfModules[modName]['object'] = getattr(mod, modName)()
mod_dict = sfModules[modName]['object'].asdict()
sfModules[modName].update(mod_dict)
if len(sfModules[modName]['cats']) > 1:
raise SyntaxError(f"Module {modName} has multiple categories defined but only one is supported.")
if sfModules[modName]['cats'] and sfModules[modName]['cats'][0] not in valid_categories:
raise SyntaxError(f"Module {modName} has invalid category '{sfModules[modName]['cats']}'.")
return sfModules | Load modules from modules directory.
Args:
path (str): file system path for modules directory
ignore_files (list): List of module file names to ignore
Returns:
dict: SpiderFoot modules
Raises:
TypeError: ignore file list was invalid
ValueError: module path does not exist
SyntaxError: module data is malformed | loadModulesAsDict | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def loadCorrelationRulesRaw(path: str, ignore_files: typing.Optional[typing.List[str]] = None) -> typing.Dict[str, str]:
"""Load correlation rules from correlations directory.
Args:
path (str): file system path for correlations directory
ignore_files (list[str]): List of module file names to ignore
Returns:
dict[str, str]: raw correlation rules
Raises:
TypeError: ignore file list was invalid
ValueError: module path does not exist
"""
if not ignore_files:
ignore_files = []
if not isinstance(ignore_files, list):
raise TypeError(f"ignore_files is {type(ignore_files)}; expected list()")
if not os.path.isdir(path):
raise ValueError(f"Correlations directory does not exist: {path}")
correlationRulesRaw: typing.Dict[str, str] = dict()
for filename in os.listdir(path):
if not filename.endswith(".yaml"):
continue
if filename in ignore_files:
continue
ruleName = filename.split('.')[0]
with open(path + filename, 'r') as f:
correlationRulesRaw[ruleName] = f.read()
return correlationRulesRaw | Load correlation rules from correlations directory.
Args:
path (str): file system path for correlations directory
ignore_files (list[str]): List of module file names to ignore
Returns:
dict[str, str]: raw correlation rules
Raises:
TypeError: ignore file list was invalid
ValueError: module path does not exist | loadCorrelationRulesRaw | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def targetTypeFromString(target: str) -> typing.Optional[str]:
"""Return the scan target seed data type for the specified scan target input.
Args:
target (str): scan target seed input
Returns:
str: scan target seed data type
"""
if not target:
return None
# NOTE: the regex order is important
regexToType = [
{r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$": "IP_ADDRESS"},
{r"^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}/\d+$": "NETBLOCK_OWNER"},
{r"^.*@.*$": "EMAILADDR"},
{r"^\+[0-9]+$": "PHONE_NUMBER"},
{r"^\".+\s+.+\"$": "HUMAN_NAME"},
{r"^\".+\"$": "USERNAME"},
{r"^[0-9]+$": "BGP_AS_OWNER"},
{r"^[0-9a-f:]+$": "IPV6_ADDRESS"},
{r"^[0-9a-f:]+::/[0-9]+$": "NETBLOCKV6_OWNER"},
{r"^(([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])\.)+([a-z0-9]|[a-z0-9][a-z0-9\-]*[a-z0-9])$": "INTERNET_NAME"},
{r"^(bc(0([ac-hj-np-z02-9]{39}|[ac-hj-np-z02-9]{59})|1[ac-hj-np-z02-9]{8,87})|[13][a-km-zA-HJ-NP-Z1-9]{25,35})$": "BITCOIN_ADDRESS"},
]
# Parse the target and set the target type
for rxpair in regexToType:
rx = list(rxpair.keys())[0]
if re.match(rx, target, re.IGNORECASE | re.UNICODE):
return list(rxpair.values())[0]
return None | Return the scan target seed data type for the specified scan target input.
Args:
target (str): scan target seed input
Returns:
str: scan target seed data type | targetTypeFromString | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def urlRelativeToAbsolute(url: str) -> typing.Optional[str]:
"""Turn a relative URL path into an absolute path.
Args:
url (str): URL
Returns:
str: URL relative path
"""
if not url:
return None
if not isinstance(url, str):
return None
if '..' not in url:
return url
finalBits: typing.List[str] = list()
for chunk in url.split('/'):
if chunk != '..':
finalBits.append(chunk)
continue
# Don't pop the last item off if we're at the top
if len(finalBits) <= 1:
continue
# Don't pop the last item off if the first bits are not the path
if '://' in url and len(finalBits) <= 3:
continue
finalBits.pop()
return '/'.join(finalBits) | Turn a relative URL path into an absolute path.
Args:
url (str): URL
Returns:
str: URL relative path | urlRelativeToAbsolute | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def urlBaseDir(url: str) -> typing.Optional[str]:
"""Extract the top level directory from a URL
Args:
url (str): URL
Returns:
str: base directory
"""
if not url:
return None
if not isinstance(url, str):
return None
bits = url.split('/')
# For cases like 'www.somesite.com'
if len(bits) == 0:
return url + '/'
# For cases like 'http://www.blah.com'
if '://' in url and url.count('/') < 3:
return url + '/'
base = '/'.join(bits[:-1])
return base + '/' | Extract the top level directory from a URL
Args:
url (str): URL
Returns:
str: base directory | urlBaseDir | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def urlBaseUrl(url: str) -> typing.Optional[str]:
"""Extract the scheme and domain from a URL.
Note: Does not return the trailing slash! So you can do .endswith() checks.
Args:
url (str): URL
Returns:
str: base URL without trailing slash
"""
if not url:
return None
if not isinstance(url, str):
return None
if '://' in url:
bits = re.match(r'(\w+://.[^/:\?]*)[:/\?].*', url)
else:
bits = re.match(r'(.[^/:\?]*)[:/\?]', url)
if bits is None:
return url.lower()
return bits.group(1).lower() | Extract the scheme and domain from a URL.
Note: Does not return the trailing slash! So you can do .endswith() checks.
Args:
url (str): URL
Returns:
str: base URL without trailing slash | urlBaseUrl | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def dictionaryWordsFromWordlists(wordlists: typing.Optional[typing.List[str]] = None) -> typing.Set[str]:
"""Return dictionary words from several language dictionaries.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: words from dictionaries
Raises:
IOError: Error reading wordlist file
"""
words: typing.Set[str] = set()
if wordlists is None:
wordlists = ["english", "german", "french", "spanish"]
for d in wordlists:
try:
with resources.open_text('spiderfoot.dicts.ispell', f"{d}.dict", errors='ignore') as dict_file:
for w in dict_file.readlines():
words.add(w.strip().lower().split('/')[0])
except BaseException as e:
raise IOError(f"Could not read wordlist file '{d}.dict'") from e
return words | Return dictionary words from several language dictionaries.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: words from dictionaries
Raises:
IOError: Error reading wordlist file | dictionaryWordsFromWordlists | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def humanNamesFromWordlists(wordlists: typing.Optional[typing.List[str]] = None) -> typing.Set[str]:
"""Return list of human names from wordlist file.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: human names from wordlists
Raises:
IOError: Error reading wordlist file
"""
words: typing.Set[str] = set()
if wordlists is None:
wordlists = ["names"]
for d in wordlists:
try:
with resources.open_text('spiderfoot.dicts.ispell', f"{d}.dict", errors='ignore') as dict_file:
for w in dict_file.readlines():
words.add(w.strip().lower().split('/')[0])
except BaseException as e:
raise IOError(f"Could not read wordlist file '{d}.dict'") from e
return words | Return list of human names from wordlist file.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: human names from wordlists
Raises:
IOError: Error reading wordlist file | humanNamesFromWordlists | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def usernamesFromWordlists(wordlists: typing.Optional[typing.List[str]] = None) -> typing.Set[str]:
"""Return list of usernames from wordlist file.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: usernames from wordlists
Raises:
IOError: Error reading wordlist file
"""
words: typing.Set[str] = set()
if wordlists is None:
wordlists = ["generic-usernames"]
for d in wordlists:
try:
with resources.open_text('spiderfoot.dicts', f"{d}.txt", errors='ignore') as dict_file:
for w in dict_file.readlines():
words.add(w.strip().lower().split('/')[0])
except BaseException as e:
raise IOError(f"Could not read wordlist file '{d}.txt'") from e
return words | Return list of usernames from wordlist file.
Args:
wordlists (list[str]): list of wordlist file names to read (excluding file extension).
Returns:
set[str]: usernames from wordlists
Raises:
IOError: Error reading wordlist file | usernamesFromWordlists | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def buildGraphGexf(root: str, title: str, data: typing.List[str], flt: typing.Optional[typing.List[str]] = None) -> str:
"""Convert supplied raw data into GEXF (Graph Exchange XML Format) format (e.g. for Gephi).
Args:
root (str): TBD
title (str): unused
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
str: GEXF formatted XML
"""
if not flt:
flt = []
mapping = SpiderFootHelpers.buildGraphData(data, flt)
graph = nx.Graph()
nodelist: typing.Dict[str, int] = dict()
ncounter = 0
for pair in mapping:
(dst, src) = pair
# Leave out this special case
if dst == "ROOT" or src == "ROOT":
continue
color = {
'r': 0,
'g': 0,
'b': 0,
'a': 0
}
if dst not in nodelist:
ncounter = ncounter + 1
if dst in root:
color['r'] = 255
graph.add_node(dst)
graph.nodes[dst]['viz'] = {'color': color}
nodelist[dst] = ncounter
if src not in nodelist:
ncounter = ncounter + 1
if src in root:
color['r'] = 255
graph.add_node(src)
graph.nodes[src]['viz'] = {'color': color}
nodelist[src] = ncounter
graph.add_edge(src, dst)
gexf = GEXFWriter(graph=graph)
return str(gexf).encode('utf-8') | Convert supplied raw data into GEXF (Graph Exchange XML Format) format (e.g. for Gephi).
Args:
root (str): TBD
title (str): unused
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
str: GEXF formatted XML | buildGraphGexf | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def buildGraphJson(root: str, data: typing.List[str], flt: typing.Optional[typing.List[str]] = None) -> str:
"""Convert supplied raw data into JSON format for SigmaJS.
Args:
root (str): TBD
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
str: TBD
"""
if not flt:
flt = []
mapping = SpiderFootHelpers.buildGraphData(data, flt)
ret: _Graph = {}
ret['nodes'] = list()
ret['edges'] = list()
nodelist: typing.Dict[str, int] = dict()
ecounter = 0
ncounter = 0
for pair in mapping:
(dst, src) = pair
col = "#000"
# Leave out this special case
if dst == "ROOT" or src == "ROOT":
continue
if dst not in nodelist:
ncounter = ncounter + 1
if dst in root:
col = "#f00"
ret['nodes'].append({
'id': str(ncounter),
'label': str(dst),
'x': random.SystemRandom().randint(1, 1000),
'y': random.SystemRandom().randint(1, 1000),
'size': "1",
'color': col
})
nodelist[dst] = ncounter
if src not in nodelist:
ncounter = ncounter + 1
if src in root:
col = "#f00"
ret['nodes'].append({
'id': str(ncounter),
'label': str(src),
'x': random.SystemRandom().randint(1, 1000),
'y': random.SystemRandom().randint(1, 1000),
'size': "1",
'color': col
})
nodelist[src] = ncounter
ecounter = ecounter + 1
ret['edges'].append({
'id': str(ecounter),
'source': str(nodelist[src]),
'target': str(nodelist[dst])
})
return json.dumps(ret) | Convert supplied raw data into JSON format for SigmaJS.
Args:
root (str): TBD
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
str: TBD | buildGraphJson | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def buildGraphData(data: typing.List[str], flt: typing.Optional[typing.List[str]] = None) -> typing.Set[typing.Tuple[str, str]]:
"""Return a format-agnostic collection of tuples to use as the
basis for building graphs in various formats.
Args:
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
set[tuple[str, str]]: TBD
Raises:
ValueError: data value was invalid
TypeError: data type was invalid
"""
if not flt:
flt = []
if not isinstance(data, list):
raise TypeError(f"data is {type(data)}; expected list()")
if not data:
raise ValueError("data is empty")
def get_next_parent_entities(item: str, pids: typing.Optional[typing.List[str]] = None) -> typing.List[str]:
if not pids:
pids = []
ret: typing.List[str] = list()
for [parent, entity_id] in parents[item]:
if entity_id in pids:
continue
if parent in entities:
ret.append(parent)
else:
pids.append(entity_id)
for p in get_next_parent_entities(parent, pids):
ret.append(p)
return ret
mapping: typing.Set[typing.Tuple[str, str]] = set()
entities: typing.Dict[str, bool] = dict()
parents: typing.Dict[str, typing.List[typing.List[str]]] = dict()
for row in data:
if len(row) != 15:
raise ValueError(f"data row length is {len(row)}; expected 15")
if row[11] == "ENTITY" or row[11] == "INTERNAL":
# List of all valid entity values
if len(flt) > 0:
if row[4] in flt or row[11] == "INTERNAL":
entities[row[1]] = True
else:
entities[row[1]] = True
if row[1] not in parents:
parents[row[1]] = list()
parents[row[1]].append([row[2], row[8]])
for entity in entities:
for [parent, _id] in parents[entity]:
if parent in entities:
if entity != parent:
# Add entity parent
mapping.add((entity, parent))
else:
# Check parent for entityship.
next_parents = get_next_parent_entities(parent)
for next_parent in next_parents:
if entity != next_parent:
# Add next entity parent
mapping.add((entity, next_parent))
return mapping | Return a format-agnostic collection of tuples to use as the
basis for building graphs in various formats.
Args:
data (list[str]): Scan result as list
flt (list[str]): List of event types to include. If not set everything is included.
Returns:
set[tuple[str, str]]: TBD
Raises:
ValueError: data value was invalid
TypeError: data type was invalid | buildGraphData | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def dataParentChildToTree(data: typing.Dict[str, typing.Optional[typing.List[str]]]) -> typing.Union[Tree, EmptyTree]:
"""Converts a dictionary of k -> array to a nested
tree that can be digested by d3 for visualizations.
Args:
data (dict): dictionary of k -> array
Returns:
dict: nested tree
Raises:
ValueError: data value was invalid
TypeError: data type was invalid
"""
if not isinstance(data, dict):
raise TypeError(f"data is {type(data)}; expected dict()")
if not data:
raise ValueError("data is empty")
def get_children(needle: str, haystack: typing.Dict[str, typing.Optional[typing.List[str]]]) -> typing.Optional[typing.List[Tree]]:
ret: typing.List[Tree] = list()
if needle not in list(haystack.keys()):
return None
if haystack[needle] is None:
return None
for c in haystack[needle]:
ret.append({"name": c, "children": get_children(c, haystack)})
return ret
# Find the element with no parents, that's our root.
root = None
for k in list(data.keys()):
if data[k] is None:
continue
contender = True
for ck in list(data.keys()):
if data[ck] is None:
continue
if k in data[ck]:
contender = False
if contender:
root = k
break
if root is None:
return {}
return {"name": root, "children": get_children(root, data)} | Converts a dictionary of k -> array to a nested
tree that can be digested by d3 for visualizations.
Args:
data (dict): dictionary of k -> array
Returns:
dict: nested tree
Raises:
ValueError: data value was invalid
TypeError: data type was invalid | dataParentChildToTree | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def validLEI(lei: str) -> bool:
"""Check if the provided string is a valid Legal Entity Identifier (LEI).
Args:
lei (str): The LEI number to check.
Returns:
bool: string is a valid LEI
Note:
ISO 17442 has been withdrawn and is not accurate
https://www.gleif.org/en/about-lei/iso-17442-the-lei-code-structure
"""
if not isinstance(lei, str):
return False
if not re.match(r'^[A-Z0-9]{18}[0-9]{2}$', lei, re.IGNORECASE):
return False
return True | Check if the provided string is a valid Legal Entity Identifier (LEI).
Args:
lei (str): The LEI number to check.
Returns:
bool: string is a valid LEI
Note:
ISO 17442 has been withdrawn and is not accurate
https://www.gleif.org/en/about-lei/iso-17442-the-lei-code-structure | validLEI | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def validEmail(email: str) -> bool:
"""Check if the provided string is a valid email address.
Args:
email (str): The email address to check.
Returns:
bool: email is a valid email address
"""
if not isinstance(email, str):
return False
if "@" not in email:
return False
if not re.match(r'^([\%a-zA-Z\.0-9_\-\+]+@[a-zA-Z\.0-9\-]+\.[a-zA-Z\.0-9\-]+)$', email):
return False
if len(email) < 6:
return False
# Skip strings with messed up URL encoding
if "%" in email:
return False
# Skip strings which may have been truncated
if "..." in email:
return False
return True | Check if the provided string is a valid email address.
Args:
email (str): The email address to check.
Returns:
bool: email is a valid email address | validEmail | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def validPhoneNumber(phone: str) -> bool:
"""Check if the provided string is a valid phone number.
Args:
phone (str): The phone number to check.
Returns:
bool: string is a valid phone number
"""
if not isinstance(phone, str):
return False
try:
return phonenumbers.is_valid_number(phonenumbers.parse(phone))
except Exception:
return False | Check if the provided string is a valid phone number.
Args:
phone (str): The phone number to check.
Returns:
bool: string is a valid phone number | validPhoneNumber | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def genScanInstanceId() -> str:
"""Generate an globally unique ID for this scan.
Returns:
str: scan instance unique ID
"""
return str(uuid.uuid4()).split("-")[0].upper() | Generate an globally unique ID for this scan.
Returns:
str: scan instance unique ID | genScanInstanceId | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractLinksFromHtml(url: str, data: str, domains: typing.Optional[typing.List[str]]) -> typing.Dict[str, ExtractedLink]:
"""Find all URLs within the supplied content.
This function does not fetch any URLs.
A dictionary will be returned, where each link will have the keys:
'source': The URL where the link was obtained from
'original': What the link looked like in the content it was obtained from
The key will be the *absolute* URL of the link obtained, so for example if
the link '/abc' was obtained from 'http://xyz.com', the key in the dict will
be 'http://xyz.com/abc' with the 'original' attribute set to '/abc'
Args:
url (str): base URL used to construct absolute URLs from relative URLs
data (str): data to examine for links
domains: TBD
Returns:
dict: links
Raises:
TypeError: argument was invalid type
"""
returnLinks: typing.Dict[str, ExtractedLink] = dict()
if not isinstance(url, str):
raise TypeError(f"url {type(url)}; expected str()")
if not isinstance(data, str):
raise TypeError(f"data {type(data)}; expected str()")
if isinstance(domains, str):
domains = [domains]
tags = {
'a': 'href',
'img': 'src',
'script': 'src',
'link': 'href',
'area': 'href',
'base': 'href',
'form': 'action'
}
links: typing.List[typing.Union[typing.List[str], str]] = []
try:
for t in list(tags.keys()):
for lnk in BeautifulSoup(data, features="lxml", parse_only=SoupStrainer(t)).find_all(t):
if lnk.has_attr(tags[t]):
links.append(lnk[tags[t]])
except BaseException:
return returnLinks
try:
proto = url.split(":")[0]
except BaseException:
proto = "http"
# Loop through all the URLs/links found
for link in links:
if not isinstance(link, str):
link = str(link)
link = link.strip()
if len(link) < 1:
continue
# Don't include stuff likely part of some dynamically built incomplete
# URL found in Javascript code (character is part of some logic)
if link[len(link) - 1] in ['.', '#'] or link[0] == '+' or 'javascript:' in link.lower() or '()' in link \
or '+"' in link or '"+' in link or "+'" in link or "'+" in link or "data:image" in link \
or ' +' in link or '+ ' in link:
continue
# Filter in-page links
if re.match('.*#.[^/]+', link):
continue
# Ignore mail links
if 'mailto:' in link.lower():
continue
# URL decode links
if '%2f' in link.lower():
link = urllib.parse.unquote(link)
absLink = None
# Capture the absolute link:
# If the link contains ://, it is already an absolute link
if '://' in link:
absLink = link
# If the link starts with //, it is likely a protocol relative URL
elif link.startswith('//'):
absLink = proto + ':' + link
# If the link starts with a /, the absolute link is off the base URL
elif link.startswith('/'):
absLink = SpiderFootHelpers.urlBaseUrl(url) + link
# Maybe the domain was just mentioned and not a link, so we make it one
for domain in domains:
if absLink is None and domain.lower() in link.lower():
absLink = proto + '://' + link
# Otherwise, it's a flat link within the current directory
if absLink is None:
absLink = SpiderFootHelpers.urlBaseDir(url) + link
# Translate any relative pathing (../)
absLink = SpiderFootHelpers.urlRelativeToAbsolute(absLink)
returnLinks[absLink] = {'source': url, 'original': link}
return returnLinks | Find all URLs within the supplied content.
This function does not fetch any URLs.
A dictionary will be returned, where each link will have the keys:
'source': The URL where the link was obtained from
'original': What the link looked like in the content it was obtained from
The key will be the *absolute* URL of the link obtained, so for example if
the link '/abc' was obtained from 'http://xyz.com', the key in the dict will
be 'http://xyz.com/abc' with the 'original' attribute set to '/abc'
Args:
url (str): base URL used to construct absolute URLs from relative URLs
data (str): data to examine for links
domains: TBD
Returns:
dict: links
Raises:
TypeError: argument was invalid type | extractLinksFromHtml | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractHashesFromText(data: str) -> typing.List[typing.Tuple[str, str]]:
"""Extract all hashes within the supplied content.
Args:
data (str): text to search for hashes
Returns:
list[tuple[str, str]]: list of hashes
"""
ret: typing.List[typing.Tuple[str, str]] = list()
if not isinstance(data, str):
return ret
hashes = {
"MD5": re.compile(r"(?:[^a-fA-F\d]|\b)([a-fA-F\d]{32})(?:[^a-fA-F\d]|\b)"),
"SHA1": re.compile(r"(?:[^a-fA-F\d]|\b)([a-fA-F\d]{40})(?:[^a-fA-F\d]|\b)"),
"SHA256": re.compile(r"(?:[^a-fA-F\d]|\b)([a-fA-F\d]{64})(?:[^a-fA-F\d]|\b)"),
"SHA512": re.compile(r"(?:[^a-fA-F\d]|\b)([a-fA-F\d]{128})(?:[^a-fA-F\d]|\b)")
}
for h in hashes:
matches = re.findall(hashes[h], data)
for m in matches:
ret.append((h, m))
return ret | Extract all hashes within the supplied content.
Args:
data (str): text to search for hashes
Returns:
list[tuple[str, str]]: list of hashes | extractHashesFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractUrlsFromRobotsTxt(robotsTxtData: str) -> typing.List[str]:
"""Parse the contents of robots.txt.
Args:
robotsTxtData (str): robots.txt file contents
Returns:
list[str]: list of patterns which should not be followed
Todo:
Check and parse User-Agent.
Fix whitespace parsing; ie, " " is not a valid disallowed path
"""
returnArr: typing.List[str] = list()
if not isinstance(robotsTxtData, str):
return returnArr
for line in robotsTxtData.splitlines():
if line.lower().startswith('disallow:'):
m = re.match(r'disallow:\s*(.[^ #]*)', line, re.IGNORECASE)
if m:
returnArr.append(m.group(1))
return returnArr | Parse the contents of robots.txt.
Args:
robotsTxtData (str): robots.txt file contents
Returns:
list[str]: list of patterns which should not be followed
Todo:
Check and parse User-Agent.
Fix whitespace parsing; ie, " " is not a valid disallowed path | extractUrlsFromRobotsTxt | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractPgpKeysFromText(data: str) -> typing.List[str]:
"""Extract all PGP keys within the supplied content.
Args:
data (str): text to search for PGP keys
Returns:
list[str]: list of PGP keys
"""
if not isinstance(data, str):
return list()
keys: typing.Set[str] = set()
pattern = re.compile("(-----BEGIN.*?END.*?BLOCK-----)", re.MULTILINE | re.DOTALL)
for key in re.findall(pattern, data):
if len(key) >= 300:
keys.add(key)
return list(keys) | Extract all PGP keys within the supplied content.
Args:
data (str): text to search for PGP keys
Returns:
list[str]: list of PGP keys | extractPgpKeysFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractEmailsFromText(data: str) -> typing.List[str]:
"""Extract all email addresses within the supplied content.
Args:
data (str): text to search for email addresses
Returns:
list[str]: list of email addresses
"""
if not isinstance(data, str):
return list()
emails: typing.Set[str] = set()
matches = re.findall(r'([\%a-zA-Z\.0-9_\-\+]+@[a-zA-Z\.0-9\-]+\.[a-zA-Z\.0-9\-]+)', data)
for match in matches:
if SpiderFootHelpers.validEmail(match):
emails.add(match)
return list(emails) | Extract all email addresses within the supplied content.
Args:
data (str): text to search for email addresses
Returns:
list[str]: list of email addresses | extractEmailsFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractIbansFromText(data: str) -> typing.List[str]:
"""Find all International Bank Account Numbers (IBANs) within the supplied content.
Extracts possible IBANs using a generic regex.
Checks whether possible IBANs are valid or not
using country-wise length check and Mod 97 algorithm.
Args:
data (str): text to search for IBANs
Returns:
list[str]: list of IBAN
"""
if not isinstance(data, str):
return list()
ibans: typing.Set[str] = set()
# Dictionary of country codes and their respective IBAN lengths
ibanCountryLengths = {
"AL": 28, "AD": 24, "AT": 20, "AZ": 28,
"ME": 22, "BH": 22, "BY": 28, "BE": 16,
"BA": 20, "BR": 29, "BG": 22, "CR": 22,
"HR": 21, "CY": 28, "CZ": 24, "DK": 18,
"DO": 28, "EG": 29, "SV": 28, "FO": 18,
"FI": 18, "FR": 27, "GE": 22, "DE": 22,
"GI": 23, "GR": 27, "GL": 18, "GT": 28,
"VA": 22, "HU": 28, "IS": 26, "IQ": 23,
"IE": 22, "IL": 23, "JO": 30, "KZ": 20,
"XK": 20, "KW": 30, "LV": 21, "LB": 28,
"LI": 21, "LT": 20, "LU": 20, "MT": 31,
"MR": 27, "MU": 30, "MD": 24, "MC": 27,
"DZ": 24, "AO": 25, "BJ": 28, "VG": 24,
"BF": 27, "BI": 16, "CM": 27, "CV": 25,
"CG": 27, "EE": 20, "GA": 27, "GG": 22,
"IR": 26, "IM": 22, "IT": 27, "CI": 28,
"JE": 22, "MK": 19, "MG": 27, "ML": 28,
"MZ": 25, "NL": 18, "NO": 15, "PK": 24,
"PS": 29, "PL": 28, "PT": 25, "QA": 29,
"RO": 24, "LC": 32, "SM": 27, "ST": 25,
"SA": 24, "SN": 28, "RS": 22, "SC": 31,
"SK": 24, "SI": 19, "ES": 24, "CH": 21,
"TL": 23, "TN": 24, "TR": 26, "UA": 29,
"AE": 23, "GB": 22, "SE": 24
}
# Normalize input data to remove whitespace
data = data.replace(" ", "")
# Extract alphanumeric characters of lengths ranging from 15 to 32
# and starting with two characters
matches = re.findall("[A-Za-z]{2}[A-Za-z0-9]{13,30}", data)
for match in matches:
iban = match.upper()
countryCode = iban[0:2]
if countryCode not in ibanCountryLengths.keys():
continue
if len(iban) != ibanCountryLengths[countryCode]:
continue
# Convert IBAN to integer format.
# Move the first 4 characters to the end of the string,
# then convert all characters to integers; where A = 10, B = 11, ...., Z = 35
iban_int = iban[4:] + iban[0:4]
for character in iban_int:
if character.isalpha():
iban_int = iban_int.replace(character, str((ord(character) - 65) + 10))
# Check IBAN integer mod 97 for remainder
if int(iban_int) % 97 != 1:
continue
ibans.add(iban)
return list(ibans) | Find all International Bank Account Numbers (IBANs) within the supplied content.
Extracts possible IBANs using a generic regex.
Checks whether possible IBANs are valid or not
using country-wise length check and Mod 97 algorithm.
Args:
data (str): text to search for IBANs
Returns:
list[str]: list of IBAN | extractIbansFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractCreditCardsFromText(data: str) -> typing.List[str]:
"""Find all credit card numbers with the supplied content.
Extracts numbers with lengths ranging from 13 - 19 digits
Checks the numbers using Luhn's algorithm to verify
if the number is a valid credit card number or not
Args:
data (str): text to search for credit card numbers
Returns:
list[str]: list of credit card numbers
"""
if not isinstance(data, str):
return list()
creditCards: typing.Set[str] = set()
# Remove whitespace from data.
# Credit cards might contain spaces between them
# which will cause regex mismatch
data = data.replace(" ", "")
# Extract all numbers with lengths ranging from 13 - 19 digits
matches = re.findall(r"[0-9]{13,19}", data)
# Verify each extracted number using Luhn's algorithm
for match in matches:
if int(match) == 0:
continue
ccNumber = match
ccNumberTotal = 0
isSecondDigit = False
for digit in ccNumber[::-1]:
d = int(digit)
if isSecondDigit:
d *= 2
ccNumberTotal += int(d / 10)
ccNumberTotal += d % 10
isSecondDigit = not isSecondDigit
if ccNumberTotal % 10 == 0:
creditCards.add(match)
return list(creditCards) | Find all credit card numbers with the supplied content.
Extracts numbers with lengths ranging from 13 - 19 digits
Checks the numbers using Luhn's algorithm to verify
if the number is a valid credit card number or not
Args:
data (str): text to search for credit card numbers
Returns:
list[str]: list of credit card numbers | extractCreditCardsFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def extractUrlsFromText(content: str) -> typing.List[str]:
"""Extract all URLs from a string.
Args:
content (str): text to search for URLs
Returns:
list[str]: list of identified URLs
"""
if not isinstance(content, str):
return []
# https://tools.ietf.org/html/rfc3986#section-3.3
return re.findall(r"(https?://[a-zA-Z0-9-\.:]+/[\-\._~!\$&'\(\)\*\+\,\;=:@/a-zA-Z0-9]*)", html.unescape(content)) | Extract all URLs from a string.
Args:
content (str): text to search for URLs
Returns:
list[str]: list of identified URLs | extractUrlsFromText | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def sslDerToPem(der_cert: bytes) -> str:
"""Given a certificate as a DER-encoded blob of bytes, returns a PEM-encoded string version of the same certificate.
Args:
der_cert (bytes): certificate in DER format
Returns:
str: PEM-encoded certificate as a byte string
Raises:
TypeError: arg type was invalid
"""
if not isinstance(der_cert, bytes):
raise TypeError(f"der_cert is {type(der_cert)}; expected bytes()")
return ssl.DER_cert_to_PEM_cert(der_cert) | Given a certificate as a DER-encoded blob of bytes, returns a PEM-encoded string version of the same certificate.
Args:
der_cert (bytes): certificate in DER format
Returns:
str: PEM-encoded certificate as a byte string
Raises:
TypeError: arg type was invalid | sslDerToPem | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def countryNameFromCountryCode(countryCode: str) -> typing.Optional[str]:
"""Convert a country code to full country name.
Args:
countryCode (str): country code
Returns:
str: country name
"""
if not isinstance(countryCode, str):
return None
return SpiderFootHelpers.countryCodes().get(countryCode.upper()) | Convert a country code to full country name.
Args:
countryCode (str): country code
Returns:
str: country name | countryNameFromCountryCode | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def countryNameFromTld(tld: str) -> typing.Optional[str]:
"""Retrieve the country name associated with a TLD.
Args:
tld (str): Top level domain
Returns:
str: country name
"""
if not isinstance(tld, str):
return None
country_name = SpiderFootHelpers.countryCodes().get(tld.upper())
if country_name:
return country_name
country_tlds = {
# List of TLD not associated with any country
"COM": "United States",
"NET": "United States",
"ORG": "United States",
"GOV": "United States",
"MIL": "United States"
}
country_name = country_tlds.get(tld.upper())
if country_name:
return country_name
return None | Retrieve the country name associated with a TLD.
Args:
tld (str): Top level domain
Returns:
str: country name | countryNameFromTld | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def countryCodes() -> typing.Dict[str, str]:
"""Dictionary of country codes and associated country names.
Returns:
dict[str, str]: country codes and associated country names
"""
return {
"AF": "Afghanistan",
"AX": "Aland Islands",
"AL": "Albania",
"DZ": "Algeria",
"AS": "American Samoa",
"AD": "Andorra",
"AO": "Angola",
"AI": "Anguilla",
"AQ": "Antarctica",
"AG": "Antigua and Barbuda",
"AR": "Argentina",
"AM": "Armenia",
"AW": "Aruba",
"AU": "Australia",
"AT": "Austria",
"AZ": "Azerbaijan",
"BS": "Bahamas",
"BH": "Bahrain",
"BD": "Bangladesh",
"BB": "Barbados",
"BY": "Belarus",
"BE": "Belgium",
"BZ": "Belize",
"BJ": "Benin",
"BM": "Bermuda",
"BT": "Bhutan",
"BO": "Bolivia",
"BQ": "Bonaire, Saint Eustatius and Saba",
"BA": "Bosnia and Herzegovina",
"BW": "Botswana",
"BV": "Bouvet Island",
"BR": "Brazil",
"IO": "British Indian Ocean Territory",
"VG": "British Virgin Islands",
"BN": "Brunei",
"BG": "Bulgaria",
"BF": "Burkina Faso",
"BI": "Burundi",
"KH": "Cambodia",
"CM": "Cameroon",
"CA": "Canada",
"CV": "Cape Verde",
"KY": "Cayman Islands",
"CF": "Central African Republic",
"TD": "Chad",
"CL": "Chile",
"CN": "China",
"CX": "Christmas Island",
"CC": "Cocos Islands",
"CO": "Colombia",
"KM": "Comoros",
"CK": "Cook Islands",
"CR": "Costa Rica",
"HR": "Croatia",
"CU": "Cuba",
"CW": "Curacao",
"CY": "Cyprus",
"CZ": "Czech Republic",
"CD": "Democratic Republic of the Congo",
"DK": "Denmark",
"DJ": "Djibouti",
"DM": "Dominica",
"DO": "Dominican Republic",
"TL": "East Timor",
"EC": "Ecuador",
"EG": "Egypt",
"SV": "El Salvador",
"GQ": "Equatorial Guinea",
"ER": "Eritrea",
"EE": "Estonia",
"ET": "Ethiopia",
"FK": "Falkland Islands",
"FO": "Faroe Islands",
"FJ": "Fiji",
"FI": "Finland",
"FR": "France",
"GF": "French Guiana",
"PF": "French Polynesia",
"TF": "French Southern Territories",
"GA": "Gabon",
"GM": "Gambia",
"GE": "Georgia",
"DE": "Germany",
"GH": "Ghana",
"GI": "Gibraltar",
"GR": "Greece",
"GL": "Greenland",
"GD": "Grenada",
"GP": "Guadeloupe",
"GU": "Guam",
"GT": "Guatemala",
"GG": "Guernsey",
"GN": "Guinea",
"GW": "Guinea-Bissau",
"GY": "Guyana",
"HT": "Haiti",
"HM": "Heard Island and McDonald Islands",
"HN": "Honduras",
"HK": "Hong Kong",
"HU": "Hungary",
"IS": "Iceland",
"IN": "India",
"ID": "Indonesia",
"IR": "Iran",
"IQ": "Iraq",
"IE": "Ireland",
"IM": "Isle of Man",
"IL": "Israel",
"IT": "Italy",
"CI": "Ivory Coast",
"JM": "Jamaica",
"JP": "Japan",
"JE": "Jersey",
"JO": "Jordan",
"KZ": "Kazakhstan",
"KE": "Kenya",
"KI": "Kiribati",
"XK": "Kosovo",
"KW": "Kuwait",
"KG": "Kyrgyzstan",
"LA": "Laos",
"LV": "Latvia",
"LB": "Lebanon",
"LS": "Lesotho",
"LR": "Liberia",
"LY": "Libya",
"LI": "Liechtenstein",
"LT": "Lithuania",
"LU": "Luxembourg",
"MO": "Macao",
"MK": "Macedonia",
"MG": "Madagascar",
"MW": "Malawi",
"MY": "Malaysia",
"MV": "Maldives",
"ML": "Mali",
"MT": "Malta",
"MH": "Marshall Islands",
"MQ": "Martinique",
"MR": "Mauritania",
"MU": "Mauritius",
"YT": "Mayotte",
"MX": "Mexico",
"FM": "Micronesia",
"MD": "Moldova",
"MC": "Monaco",
"MN": "Mongolia",
"ME": "Montenegro",
"MS": "Montserrat",
"MA": "Morocco",
"MZ": "Mozambique",
"MM": "Myanmar",
"NA": "Namibia",
"NR": "Nauru",
"NP": "Nepal",
"NL": "Netherlands",
"AN": "Netherlands Antilles",
"NC": "New Caledonia",
"NZ": "New Zealand",
"NI": "Nicaragua",
"NE": "Niger",
"NG": "Nigeria",
"NU": "Niue",
"NF": "Norfolk Island",
"KP": "North Korea",
"MP": "Northern Mariana Islands",
"NO": "Norway",
"OM": "Oman",
"PK": "Pakistan",
"PW": "Palau",
"PS": "Palestinian Territory",
"PA": "Panama",
"PG": "Papua New Guinea",
"PY": "Paraguay",
"PE": "Peru",
"PH": "Philippines",
"PN": "Pitcairn",
"PL": "Poland",
"PT": "Portugal",
"PR": "Puerto Rico",
"QA": "Qatar",
"CG": "Republic of the Congo",
"RE": "Reunion",
"RO": "Romania",
"RU": "Russia",
"RW": "Rwanda",
"BL": "Saint Barthelemy",
"SH": "Saint Helena",
"KN": "Saint Kitts and Nevis",
"LC": "Saint Lucia",
"MF": "Saint Martin",
"PM": "Saint Pierre and Miquelon",
"VC": "Saint Vincent and the Grenadines",
"WS": "Samoa",
"SM": "San Marino",
"ST": "Sao Tome and Principe",
"SA": "Saudi Arabia",
"SN": "Senegal",
"RS": "Serbia",
"CS": "Serbia and Montenegro",
"SC": "Seychelles",
"SL": "Sierra Leone",
"SG": "Singapore",
"SX": "Sint Maarten",
"SK": "Slovakia",
"SI": "Slovenia",
"SB": "Solomon Islands",
"SO": "Somalia",
"ZA": "South Africa",
"GS": "South Georgia and the South Sandwich Islands",
"KR": "South Korea",
"SS": "South Sudan",
"ES": "Spain",
"LK": "Sri Lanka",
"SD": "Sudan",
"SR": "Suriname",
"SJ": "Svalbard and Jan Mayen",
"SZ": "Swaziland",
"SE": "Sweden",
"CH": "Switzerland",
"SY": "Syria",
"TW": "Taiwan",
"TJ": "Tajikistan",
"TZ": "Tanzania",
"TH": "Thailand",
"TG": "Togo",
"TK": "Tokelau",
"TO": "Tonga",
"TT": "Trinidad and Tobago",
"TN": "Tunisia",
"TR": "Turkey",
"TM": "Turkmenistan",
"TC": "Turks and Caicos Islands",
"TV": "Tuvalu",
"VI": "U.S. Virgin Islands",
"UG": "Uganda",
"UA": "Ukraine",
"AE": "United Arab Emirates",
"GB": "United Kingdom",
"US": "United States",
"UM": "United States Minor Outlying Islands",
"UY": "Uruguay",
"UZ": "Uzbekistan",
"VU": "Vanuatu",
"VA": "Vatican",
"VE": "Venezuela",
"VN": "Vietnam",
"WF": "Wallis and Futuna",
"EH": "Western Sahara",
"YE": "Yemen",
"ZM": "Zambia",
"ZW": "Zimbabwe",
# Below are not country codes but recognized as regions / TLDs
"AC": "Ascension Island",
"EU": "European Union",
"SU": "Soviet Union",
"UK": "United Kingdom"
} | Dictionary of country codes and associated country names.
Returns:
dict[str, str]: country codes and associated country names | countryCodes | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def sanitiseInput(cmd: str, extra: typing.Optional[typing.List[str]] = None) -> bool:
"""Verify input command is safe to execute
Args:
cmd (str): The command to check
extra (list[str]): Additional characters to consider safe
Returns:
bool: command is "safe"
"""
if not extra:
extra = []
chars = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm',
'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', '-', '.']
if extra:
chars.extend(extra)
for c in cmd:
if c.lower() not in chars:
return False
if '..' in cmd:
return False
if cmd.startswith("-"):
return False
if len(cmd) < 3:
return False
return True | Verify input command is safe to execute
Args:
cmd (str): The command to check
extra (list[str]): Additional characters to consider safe
Returns:
bool: command is "safe" | sanitiseInput | python | smicallef/spiderfoot | spiderfoot/helpers.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/helpers.py | MIT |
def findCaller(self, stack_info: bool = False, stacklevel: int = 1) -> tuple:
"""Find the stack frame of the caller so that we can note the source
file name, line number and function name.
Args:
stack_info (bool): TBD
stacklevel (int): TBD
Returns:
tuple: filename, line number, module name, and stack trace
"""
f = logging.currentframe()
# On some versions of IronPython, currentframe() returns None if
# IronPython isn't run with -X:Frames.
if f is not None:
f = f.f_back
orig_f = f
while f and stacklevel > 1:
f = f.f_back
stacklevel -= 1
if not f:
f = orig_f
rv = "(unknown file)", 0, "(unknown function)", None
while hasattr(f, "f_code"):
co = f.f_code
filename = os.path.normcase(co.co_filename)
if filename in (logging._srcfile, _srcfile): # This is the only change
f = f.f_back
continue
sinfo = None
if stack_info:
sio = io.StringIO()
sio.write('Stack (most recent call last):\n')
traceback.print_stack(f, file=sio)
sinfo = sio.getvalue()
if sinfo[-1] == '\n':
sinfo = sinfo[:-1]
sio.close()
rv = (co.co_filename, f.f_lineno, co.co_name, sinfo)
break
return rv # noqa R504 | Find the stack frame of the caller so that we can note the source
file name, line number and function name.
Args:
stack_info (bool): TBD
stacklevel (int): TBD
Returns:
tuple: filename, line number, module name, and stack trace | findCaller | python | smicallef/spiderfoot | spiderfoot/plugin.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/plugin.py | MIT |
def _updateSocket(self, socksProxy: str) -> None:
"""Hack to override module's use of socket, replacing it with
one that uses the supplied SOCKS server.
Args:
socksProxy (str): SOCKS proxy
"""
self.socksProxy = socksProxy | Hack to override module's use of socket, replacing it with
one that uses the supplied SOCKS server.
Args:
socksProxy (str): SOCKS proxy | _updateSocket | python | smicallef/spiderfoot | spiderfoot/plugin.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/plugin.py | MIT |
def clearListeners(self) -> None:
"""Used to clear any listener relationships, etc. This is needed because
Python seems to cache local variables even between threads."""
self._listenerModules = list()
self._stopScanning = False | Used to clear any listener relationships, etc. This is needed because
Python seems to cache local variables even between threads. | clearListeners | python | smicallef/spiderfoot | spiderfoot/plugin.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/plugin.py | MIT |
def setup(self, sf, userOpts: dict = {}) -> None:
"""Will always be overriden by the implementer.
Args:
sf (SpiderFoot): SpiderFoot object
userOpts (dict): TBD
"""
pass | Will always be overriden by the implementer.
Args:
sf (SpiderFoot): SpiderFoot object
userOpts (dict): TBD | setup | python | smicallef/spiderfoot | spiderfoot/plugin.py | https://github.com/smicallef/spiderfoot/blob/master/spiderfoot/plugin.py | MIT |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.