diff --git a/readme b/README.md similarity index 89% rename from readme rename to README.md index 9f78f87..96b8a20 100644 --- a/readme +++ b/README.md @@ -1,16 +1,16 @@ # sqlinj-ant -一、程序说明 +##一、程序说明 分布式、全覆盖、半自动化 sql注入扫描器 解决问题:传统扫描器对post请求的无力,对登录后请求的无力,扫描效率低,覆盖效果差 -通过使用代理服务器主动提交的方式收集全部请求保存到redis中 -python调用sqlmapapi遍历上一步收集的url进行测试 +通过使用代理服务器主动提交的方式收集全部请求保存到redis中,可覆盖全部http接口 +python调用sqlmapapi遍历上一步收集的url进行测试,通过多节点部署sqlmap极大提高识别效率 -二、文件说明 +##二、文件说明 . |____autoinj.py 调用sqlmapapi提供的api进行sql注入测试 对存在注入漏洞的请求返回请求信息 |____console.py 主程序 获取用户参数调用autoinj进行注入测试 @@ -20,7 +20,7 @@ python调用sqlmapapi遍历上一步收集的url进行测试 |____proxy.conf nginx代理配置 记录http请求到redis |____spider.py 没用的 -三、使用说明 +##三、使用说明 1.环境要求 python 2.7 redis diff --git a/img/1.png b/img/1.png new file mode 100644 index 0000000..02b7d37 Binary files /dev/null and b/img/1.png differ diff --git a/img/2.png b/img/2.png new file mode 100644 index 0000000..af68ff9 Binary files /dev/null and b/img/2.png differ diff --git a/myspider.py b/myspider.py deleted file mode 100644 index 06f25d7..0000000 --- a/myspider.py +++ /dev/null @@ -1,27 +0,0 @@ -import scrapy - -class BlogSpider(scrapy.Spider): - name = 'youzuspider' - start_urls = ['http://www.youzu.com'] - - def parse(self, response): - hxs = HtmlXPathSelector(response) - items = [] - - newurls = hxs.select('//a/@href').extract() - validurls = [] - for url in newurls: - #判断URL是否合法 - if true: - validurls.append(url) - - items.extend([self.make_requests_from_url(url).replace(callback=self.parse) for url in validurls]) - - sites = hxs.select('//ul/li') - items = [] - for site in sites: - item = DmozItem() - item['title'] = site.select('a/text()').extract() - item['link'] = site.select('a/@href').extract() - item['desc'] = site.select('text()').extract() - items.append(item) diff --git a/spider.py b/spider.py deleted file mode 100644 index eee46b5..0000000 --- a/spider.py +++ /dev/null @@ -1,57 +0,0 @@ -#!/usr/bin/python -# -*- coding:utf-8 -*- - -''' - 通过一个url搜集同域下全部非静态资源并上报到redis -''' -import re -import sys -import urllib -import urlparse -import requests - -#判断url是否同域 -def samedomain(domain, urls): - rs = set() - for url in urls: - req = urlparse.urlparse(url) - if ("http" == req[0] or "https" == req[0]) and req[1] == domain: - rs.add(url) - elif len(req[0]) == 0: - path = req[2] - if path[0] != '/': - path = '/' + path - if len(req[4])>0: - path = path + "?" + req[4] - print path - rs.add("http://" + domain + path) - return rs - -#通过页面代码提取 -def queryUrl(domain, code): - #return re.findall('', code, re.I) - return re.findall("