use urls from a file as seeds (Resolves #135)

This commit is contained in:
Somdev Sangwan
2018-11-17 23:17:50 +05:30
committed by GitHub
parent b9bf006e2c
commit 0a053d351d

View File

@@ -50,6 +50,8 @@ parser.add_argument('--params', help='find params',
dest='find', action='store_true')
parser.add_argument('--crawl', help='crawl',
dest='recursive', action='store_true')
parser.add_argument(
'--seeds', help='load crawling seeds from a file', dest='args_seeds')
parser.add_argument(
'-f', '--file', help='load payloads from a file', dest='args_file')
parser.add_argument('-l', '--level', help='level of crawling',
@@ -87,6 +89,7 @@ proxy = args.proxy
find = args.find
recursive = args.recursive
args_file = args.args_file
args_seeds = args.args_seeds
level = args.level
add_headers = args.add_headers
threadCount = args.threadCount
@@ -107,6 +110,14 @@ if args_file:
'\n').encode('utf-8').decode('utf-8'))
payloadList = list(filter(None, payloadList))
seedList = []
if args_seeds:
with open(args_seeds, 'r') as f:
for line in f:
seedList.append(line.strip(
'\n').encode('utf-8').decode('utf-8'))
seedList = list(filter(None, seedList))
encoding = base64 if encode and encode == 'base64' else False
if not proxy:
@@ -116,38 +127,41 @@ if update: # if the user has supplied --update argument
updater()
quit() # quitting because files have been changed
if not target: # if the user hasn't supplied a url
if not target and not args_seeds: # if the user hasn't supplied a url
print('\n' + parser.format_help().lower())
quit()
if fuzz:
singleFuzz(target, paramData, verbose, encoding, headers, delay, timeout)
elif not recursive:
elif not recursive and not args_seeds:
if args_file:
bruteforcer(target, paramData, payloadList, verbose, encoding, headers, delay, timeout)
else:
scan(target, paramData, verbose, encoding, headers, delay, timeout, skipDOM, find, skip)
else:
print('%s Crawling the target' % run)
scheme = urlparse(target).scheme
verboseOutput(scheme, 'scheme', verbose)
host = urlparse(target).netloc
main_url = scheme + '://' + host
crawlingResult = photon(target, headers, level,
threadCount, delay, timeout)
forms = crawlingResult[0]
domURLs = list(crawlingResult[1])
difference = abs(len(domURLs) - len(forms))
if len(domURLs) > len(forms):
for i in range(difference):
forms.append(0)
elif len(forms) > len(domURLs):
for i in range(difference):
domURLs.append(0)
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount)
futures = (threadpool.submit(crawl, scheme, host, main_url, form, domURL, verbose,
blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding) for form, domURL in zip(forms, domURLs))
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(forms) or (i + 1) % threadCount == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(forms)), end='\r')
print()
if target:
seedList.append(target)
for target in seedList:
print('%s Crawling the target' % run)
scheme = urlparse(target).scheme
verboseOutput(scheme, 'scheme', verbose)
host = urlparse(target).netloc
main_url = scheme + '://' + host
crawlingResult = photon(target, headers, level,
threadCount, delay, timeout)
forms = crawlingResult[0]
domURLs = list(crawlingResult[1])
difference = abs(len(domURLs) - len(forms))
if len(domURLs) > len(forms):
for i in range(difference):
forms.append(0)
elif len(forms) > len(domURLs):
for i in range(difference):
domURLs.append(0)
threadpool = concurrent.futures.ThreadPoolExecutor(max_workers=threadCount)
futures = (threadpool.submit(crawl, scheme, host, main_url, form, domURL, verbose,
blindXSS, blindPayload, headers, delay, timeout, skipDOM, encoding) for form, domURL in zip(forms, domURLs))
for i, _ in enumerate(concurrent.futures.as_completed(futures)):
if i + 1 == len(forms) or (i + 1) % threadCount == 0:
print('%s Progress: %i/%i' % (info, i + 1, len(forms)), end='\r')
print()