Files
Breacher/breacher.py

112 lines
5.0 KiB
Python
Raw Normal View History

2017-10-10 09:05:34 +05:30
import requests #module for making request to a webpage
import threading #module for multi-threading
import argparse #module for parsing command line arguments
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
parser = argparse.ArgumentParser() #defines the parser
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
#Arguements that can be supplied
2017-10-08 11:38:31 +05:30
parser.add_argument("-u", help="target url", dest='target')
2017-10-10 09:05:34 +05:30
parser.add_argument("--path", help="custom path prefix", dest='prefix')
2017-10-08 11:38:31 +05:30
parser.add_argument("--type", help="set the type i.e. html, asp, php", dest='type')
parser.add_argument("--fast", help="uses multithreading", dest='fast', action="store_true")
2017-10-10 09:05:34 +05:30
args = parser.parse_args() #arguments to be parsed
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
target = args.target #Gets tarfet from argument
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
#Fancy banner :p
2018-12-26 01:59:55 +05:30
print ('''\033[1;34m______ ______ _______ _______ _______ _ _ _______ ______
2017-10-08 11:38:31 +05:30
|_____] |_____/ |______ |_____| | |_____| |______ |_____/
|_____] | \_ |______ | | |_____ | | |______ | \_
2017-04-03 14:56:31 +05:30
2018-12-26 01:59:55 +05:30
\033[37mMade with \033[91m<3\033[37m By D3V\033[1;m''')
print ('''\n I am not responsible for your shit and if you get some error while
running Breacher, there are good chances that target isn't responding.\n''')
print ('\033[1;31m--------------------------------------------------------------------------\033[1;m\n')
2017-04-03 14:56:31 +05:30
2017-10-10 09:05:34 +05:30
try:
2020-05-02 22:45:51 +05:30
target = target.replace('https://', '') #Removes https://
2017-10-10 09:05:34 +05:30
except:
2020-05-02 22:45:51 +05:30
print ('\033[1;31m[-]\033[1;m -u argument is not supplied. Enter python breacher -h for help')
quit()
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
target = target.replace('http://', '') #and http:// from the url
target = target.replace('/', '') #removes / from url so we can have example.com and not example.com/
target = 'http://' + target #adds http:// before url so we have a perfect URL now
if args.prefix != None:
2020-05-02 22:45:51 +05:30
target = target + args.prefix
2017-10-10 09:05:34 +05:30
try:
2020-05-02 22:45:51 +05:30
r = requests.get(target + '/robots.txt') #Requests to example.com/robots.txt
if '<html>' in r.text: #if there's an html error page then its not robots.txt
print (' \033[1;31m[-]\033[1;m Robots.txt not found\n')
else: #else we got robots.txt
print (' \033[1;32m[+]\033[0m Robots.txt found. Check for any interesting entry\n')
print (r.text)
2017-10-10 09:05:34 +05:30
except: #if this request fails, we are getting robots.txt
2020-05-02 22:45:51 +05:30
print (' \033[1;31m[-]\033[1;m Robots.txt not found\n')
2018-12-26 01:59:55 +05:30
print ('\033[1;31m--------------------------------------------------------------------------\033[1;m\n')
2017-10-10 09:05:34 +05:30
2017-10-08 11:38:31 +05:30
def scan(links):
2020-05-02 22:45:51 +05:30
for link in links: #fetches one link from the links list
link = target + link # Does this--> example.com/admin/
r = requests.get(link) #Requests to the combined url
http = r.status_code #Fetches the http response code
if http == 200: #if its 200 the url points to valid resource i.e. admin panel
print (' \033[1;32m[+]\033[0m Admin panel found: %s'% link)
elif http == 404: #404 means not found
print (' \033[1;31m[-]\033[1;m %s'% link)
elif http == 302: #302 means redirection
print (' \033[1;32m[+]\033[0m Potential EAR vulnerability found : ' + link)
else:
print (' \033[1;31m[-]\033[1;m %s'% link)
2017-10-10 09:05:34 +05:30
paths = [] #list of paths
2017-10-08 11:38:31 +05:30
def get_paths(type):
2017-04-03 14:56:31 +05:30
try:
2017-10-10 09:05:34 +05:30
with open('paths.txt','r') as wordlist: #opens paths.txt and grabs links according to the type arguemnt
for path in wordlist: #too boring to describe
2017-10-08 11:38:31 +05:30
path = str(path.replace("\n",""))
try:
2020-05-02 22:45:51 +05:30
if 'asp' in type:
if 'html' in path or 'php' in path:
pass
else:
paths.append(path)
if 'php' in type:
if 'asp' in path or 'html' in path:
pass
else:
paths.append(path)
if 'html' in type:
if 'asp' in path or 'php' in path:
pass
else:
paths.append(path)
2017-10-08 11:38:31 +05:30
except:
2020-05-02 22:45:51 +05:30
paths.append(path)
2017-10-08 11:38:31 +05:30
except IOError:
2018-12-26 01:59:55 +05:30
print ('\033[1;31m[-]\033[1;m Wordlist not found!')
2017-10-08 11:38:31 +05:30
quit()
2018-12-26 01:59:55 +05:30
2017-10-10 09:05:34 +05:30
if args.fast == True: #if the user has supplied --fast argument
2020-05-02 22:45:51 +05:30
type = args.type #gets the input from --type argument
get_paths(type) #tells the link grabber to grab links according to user input like php, html, asp
paths1 = paths[:len(paths)/2] #The path/links list gets
paths2 = paths[len(paths)/2:] #divided into two lists
def part1():
links = paths1 #it is the first part of the list
scan(links) #calls the scanner
def part2():
links = paths2 #it is the second part of the list
scan(links) #calls the scanner
t1 = threading.Thread(target=part1) #Calls the part1 function via a thread
t2 = threading.Thread(target=part2) #Calls the part2 function via a thread
t1.start() #starts thread 1
t2.start() #starts thread 2
t1.join() #Joins both
t2.join() #of the threads
2017-10-10 09:05:34 +05:30
else: #if --fast isn't supplied we go without threads
2020-05-02 22:45:51 +05:30
type = args.type
get_paths(type)
links = paths
scan(links)