Add files via upload

This commit is contained in:
achenc1013
2025-03-09 19:44:06 +08:00
committed by GitHub
parent ba7339b28c
commit 6f49427932
60 changed files with 10119 additions and 2 deletions

Binary file not shown.

View File

@@ -0,0 +1,398 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
爬虫模块,负责爬取目标网站的页面
"""
import re
import logging
import threading
import queue
import time
from urllib.parse import urlparse, urljoin, parse_qsl
from bs4 import BeautifulSoup
from concurrent.futures import ThreadPoolExecutor
logger = logging.getLogger('xss_scanner')
class Crawler:
"""爬虫类,负责爬取网站页面"""
def __init__(self, http_client, max_depth=2, threads=5, exclude_pattern=None, include_pattern=None):
"""
初始化爬虫
Args:
http_client: HTTP客户端
max_depth: 最大爬取深度
threads: 线程数
exclude_pattern: 排除URL模式
include_pattern: 包含URL模式
"""
self.http_client = http_client
self.max_depth = max_depth
self.threads = threads
self.exclude_pattern = exclude_pattern
self.include_pattern = include_pattern
# 已访问的URL
self.visited_urls = set()
# 待访问的URL队列
self.url_queue = queue.Queue()
# 存储爬取结果
self.results = []
# 线程锁
self.lock = threading.Lock()
# 线程池
self.thread_pool = None
# 记录每个页面的加载状态
self.page_status = {}
# 常见的静态资源文件扩展名
self.static_extensions = {
'.css', '.js', '.jpg', '.jpeg', '.png', '.gif', '.svg', '.ico', '.pdf',
'.doc', '.docx', '.xls', '.xlsx', '.ppt', '.pptx', '.zip', '.rar', '.tar',
'.gz', '.mp3', '.mp4', '.avi', '.mov', '.flv', '.wmv'
}
def crawl(self, base_url):
"""
爬取指定的网站
Args:
base_url: 基础URL
Returns:
list: 爬取结果,包含页面信息
"""
logger.info(f"开始爬取网站: {base_url}")
# 重置状态
self.visited_urls = set()
self.url_queue = queue.Queue()
self.results = []
self.page_status = {}
# 解析基础URL
parsed_base_url = urlparse(base_url)
self.base_domain = parsed_base_url.netloc
# 添加基础URL到队列
self.url_queue.put((base_url, 0)) # (url, depth)
# 创建线程池
self.thread_pool = ThreadPoolExecutor(max_workers=self.threads)
# 创建并启动爬虫线程
workers = []
for _ in range(self.threads):
worker = threading.Thread(target=self._worker)
workers.append(worker)
worker.start()
# 等待所有线程完成
for worker in workers:
worker.join()
logger.info(f"爬取完成,共发现 {len(self.results)} 个页面")
return self.results
def _worker(self):
"""爬虫工作线程"""
while True:
try:
# 获取URL和深度
url, depth = self.url_queue.get(block=False)
# 处理URL
self._process_url(url, depth)
# 标记任务完成
self.url_queue.task_done()
except queue.Empty:
# 队列为空,检查是否所有线程都空闲
with self.lock:
if self.url_queue.empty():
break
except Exception as e:
logger.error(f"爬虫线程错误: {str(e)}")
def _process_url(self, url, depth):
"""
处理URL
Args:
url: 要处理的URL
depth: 当前深度
"""
# 如果超过最大深度,则跳过
if depth > self.max_depth:
return
# 如果URL已访问则跳过
if url in self.visited_urls:
return
# 添加到已访问集合
with self.lock:
self.visited_urls.add(url)
# 检查URL是否符合过滤条件
if not self._should_crawl(url):
return
logger.debug(f"爬取页面: {url}")
# 发送HTTP请求
response = self.http_client.get(url)
if not response or response.status_code != 200:
logger.debug(f"页面获取失败: {url}, 状态码: {response.status_code if response else 'None'}")
return
# 解析页面内容
content_type = response.headers.get('Content-Type', '')
if 'text/html' not in content_type:
logger.debug(f"跳过非HTML页面: {url}, Content-Type: {content_type}")
return
# 解析HTML
soup = BeautifulSoup(response.text, 'html.parser')
# 提取页面信息
page_info = self._extract_page_info(url, soup, response)
# 添加到结果
with self.lock:
self.results.append(page_info)
# 如果未达到最大深度,则提取链接
if depth < self.max_depth:
links = self._extract_links(url, soup)
for link in links:
# 添加到队列
self.url_queue.put((link, depth + 1))
def _extract_page_info(self, url, soup, response):
"""
提取页面信息
Args:
url: 页面URL
soup: BeautifulSoup对象
response: 响应对象
Returns:
dict: 页面信息
"""
# 提取页面标题
title = soup.title.string if soup.title else "No Title"
# 提取表单
forms = self._extract_forms(url, soup)
# 提取URL参数
params = self._extract_params(url)
# 提取JavaScript事件
events = self._extract_js_events(soup)
# 提取HTTP头
headers = dict(response.headers)
return {
'url': url,
'title': title,
'forms': forms,
'params': params,
'events': events,
'headers': headers,
'status_code': response.status_code,
'content_length': len(response.content),
'cookies': dict(response.cookies)
}
def _extract_links(self, base_url, soup):
"""
提取页面中的链接
Args:
base_url: 基础URL
soup: BeautifulSoup对象
Returns:
list: 提取的链接列表
"""
links = []
# 提取<a>标签链接
for a in soup.find_all('a', href=True):
link = a['href'].strip()
if link:
full_url = urljoin(base_url, link)
links.append(full_url)
# 提取<form>标签链接
for form in soup.find_all('form', action=True):
link = form['action'].strip()
if link:
full_url = urljoin(base_url, link)
links.append(full_url)
# 过滤链接
filtered_links = []
for link in links:
# 跳过锚点链接
if '#' in link:
link = link.split('#')[0]
if not link:
continue
# 跳过JavaScript链接
if link.startswith('javascript:'):
continue
# 跳过邮件链接
if link.startswith('mailto:'):
continue
# 跳过电话链接
if link.startswith('tel:'):
continue
# 跳过静态资源文件
parsed_link = urlparse(link)
path = parsed_link.path.lower()
if any(path.endswith(ext) for ext in self.static_extensions):
continue
# 只爬取同一域名
if parsed_link.netloc and parsed_link.netloc != self.base_domain:
continue
filtered_links.append(link)
return list(set(filtered_links))
def _extract_forms(self, base_url, soup):
"""
提取页面中的表单
Args:
base_url: 基础URL
soup: BeautifulSoup对象
Returns:
list: 表单列表
"""
forms = []
for form in soup.find_all('form'):
form_info = {
'id': form.get('id', ''),
'name': form.get('name', ''),
'method': form.get('method', 'get').upper(),
'action': urljoin(base_url, form.get('action', '')),
'fields': []
}
# 提取表单字段
for field in form.find_all(['input', 'textarea', 'select']):
# 跳过隐藏字段
if field.name == 'input' and field.get('type') == 'hidden':
continue
field_info = {
'name': field.get('name', ''),
'id': field.get('id', ''),
'type': field.get('type', 'text') if field.name == 'input' else field.name,
'value': field.get('value', '')
}
form_info['fields'].append(field_info)
forms.append(form_info)
return forms
def _extract_params(self, url):
"""
提取URL参数
Args:
url: URL
Returns:
list: 参数列表
"""
parsed_url = urlparse(url)
params = [p[0] for p in parse_qsl(parsed_url.query)]
return params
def _extract_js_events(self, soup):
"""
提取JavaScript事件
Args:
soup: BeautifulSoup对象
Returns:
list: 事件列表
"""
events = []
# 常见的JavaScript事件属性
js_events = [
'onclick', 'onmouseover', 'onmouseout', 'onload', 'onerror', 'onsubmit',
'onchange', 'onkeyup', 'onkeydown', 'onkeypress', 'onblur', 'onfocus',
'onreset', 'onselect', 'onabort', 'ondblclick', 'onmousedown', 'onmouseup',
'onmousemove', 'onunload'
]
# 查找所有带有JavaScript事件的标签
for tag in soup.find_all():
for event in js_events:
if tag.has_attr(event):
events.append({
'tag': tag.name,
'event': event,
'code': tag[event]
})
return events
def _should_crawl(self, url):
"""
检查URL是否应该爬取
Args:
url: URL
Returns:
bool: 是否应该爬取
"""
# 检查是否是HTTP或HTTPS协议
if not url.startswith(('http://', 'https://')):
return False
# 检查排除模式
if self.exclude_pattern and re.search(self.exclude_pattern, url):
return False
# 检查包含模式
if self.include_pattern and not re.search(self.include_pattern, url):
return False
# 检查是否是静态资源文件
parsed_url = urlparse(url)
path = parsed_url.path.lower()
if any(path.endswith(ext) for ext in self.static_extensions):
return False
return True

View File

@@ -0,0 +1,315 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
HTTP客户端模块负责发送HTTP请求
"""
import logging
import requests
import random
import time
from urllib.parse import urlparse, urljoin
from requests.exceptions import RequestException, Timeout, ConnectionError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
# 禁用不安全请求的警告
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logger = logging.getLogger('xss_scanner')
class HttpClient:
"""HTTP客户端类负责处理HTTP请求"""
def __init__(self, timeout=10, user_agent=None, proxy=None, cookies=None, headers=None, verify_ssl=False):
"""
初始化HTTP客户端
Args:
timeout: 请求超时时间
user_agent: 用户代理
proxy: 代理
cookies: Cookies
headers: 自定义HTTP头
verify_ssl: 是否验证SSL证书
"""
self.timeout = timeout
self.user_agent = user_agent
self.proxy = proxy
self.cookies = cookies or {}
self.headers = headers or {}
self.verify_ssl = verify_ssl
self.session = requests.Session()
# 设置默认User-Agent
if not self.user_agent:
self.user_agent = (
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.124 Safari/537.36"
)
# 设置默认请求头
if 'User-Agent' not in self.headers:
self.headers['User-Agent'] = self.user_agent
def get(self, url, params=None, headers=None, cookies=None, allow_redirects=True, timeout=None):
"""
发送GET请求
Args:
url: 请求的URL
params: 请求参数
headers: 请求头
cookies: Cookies
allow_redirects: 是否允许重定向
timeout: 超时时间
Returns:
requests.Response: 响应对象
"""
merged_headers = self._merge_headers(headers)
merged_cookies = self._merge_cookies(cookies)
timeout = timeout or self.timeout
try:
response = self.session.get(
url=url,
params=params,
headers=merged_headers,
cookies=merged_cookies,
proxies=self._get_proxies(),
allow_redirects=allow_redirects,
timeout=timeout,
verify=self.verify_ssl
)
return response
except Timeout:
logger.warning(f"请求超时: {url}")
except ConnectionError:
logger.warning(f"连接错误: {url}")
except RequestException as e:
logger.warning(f"请求异常: {url} - {str(e)}")
except Exception as e:
logger.error(f"发送GET请求时发生错误: {url} - {str(e)}")
return None
def post(self, url, data=None, json=None, headers=None, cookies=None, allow_redirects=True, timeout=None):
"""
发送POST请求
Args:
url: 请求的URL
data: 表单数据
json: JSON数据
headers: 请求头
cookies: Cookies
allow_redirects: 是否允许重定向
timeout: 超时时间
Returns:
requests.Response: 响应对象
"""
merged_headers = self._merge_headers(headers)
merged_cookies = self._merge_cookies(cookies)
timeout = timeout or self.timeout
try:
response = self.session.post(
url=url,
data=data,
json=json,
headers=merged_headers,
cookies=merged_cookies,
proxies=self._get_proxies(),
allow_redirects=allow_redirects,
timeout=timeout,
verify=self.verify_ssl
)
return response
except Timeout:
logger.warning(f"请求超时: {url}")
except ConnectionError:
logger.warning(f"连接错误: {url}")
except RequestException as e:
logger.warning(f"请求异常: {url} - {str(e)}")
except Exception as e:
logger.error(f"发送POST请求时发生错误: {url} - {str(e)}")
return None
def head(self, url, headers=None, cookies=None, allow_redirects=True, timeout=None):
"""
发送HEAD请求
Args:
url: 请求的URL
headers: 请求头
cookies: Cookies
allow_redirects: 是否允许重定向
timeout: 超时时间
Returns:
requests.Response: 响应对象
"""
merged_headers = self._merge_headers(headers)
merged_cookies = self._merge_cookies(cookies)
timeout = timeout or self.timeout
try:
response = self.session.head(
url=url,
headers=merged_headers,
cookies=merged_cookies,
proxies=self._get_proxies(),
allow_redirects=allow_redirects,
timeout=timeout,
verify=self.verify_ssl
)
return response
except Exception as e:
logger.error(f"发送HEAD请求时发生错误: {url} - {str(e)}")
return None
def request(self, method, url, **kwargs):
"""
发送自定义请求
Args:
method: 请求方法
url: 请求的URL
**kwargs: 其他参数
Returns:
requests.Response: 响应对象
"""
# 合并请求头和Cookies
if 'headers' in kwargs:
kwargs['headers'] = self._merge_headers(kwargs['headers'])
else:
kwargs['headers'] = self._merge_headers({})
if 'cookies' in kwargs:
kwargs['cookies'] = self._merge_cookies(kwargs['cookies'])
else:
kwargs['cookies'] = self._merge_cookies({})
# 设置代理
kwargs['proxies'] = self._get_proxies()
# 设置默认参数
kwargs.setdefault('timeout', self.timeout)
kwargs.setdefault('verify', self.verify_ssl)
try:
response = self.session.request(method, url, **kwargs)
return response
except Exception as e:
logger.error(f"发送{method}请求时发生错误: {url} - {str(e)}")
return None
def download_file(self, url, save_path, chunk_size=8192):
"""
下载文件
Args:
url: 文件URL
save_path: 保存路径
chunk_size: 块大小
Returns:
bool: 下载是否成功
"""
try:
response = self.get(url, stream=True)
if not response or response.status_code != 200:
logger.error(f"下载文件失败, 状态码: {response.status_code if response else 'None'}")
return False
with open(save_path, 'wb') as f:
for chunk in response.iter_content(chunk_size=chunk_size):
if chunk:
f.write(chunk)
return True
except Exception as e:
logger.error(f"下载文件时发生错误: {url} - {str(e)}")
return False
def submit_form(self, url, form_data, method='POST', headers=None, cookies=None):
"""
提交表单
Args:
url: 表单提交URL
form_data: 表单数据
method: 提交方法
headers: 请求头
cookies: Cookies
Returns:
requests.Response: 响应对象
"""
if method.upper() == 'POST':
return self.post(url, data=form_data, headers=headers, cookies=cookies)
else:
return self.get(url, params=form_data, headers=headers, cookies=cookies)
def _merge_headers(self, headers=None):
"""
合并请求头
Args:
headers: 请求头
Returns:
dict: 合并后的请求头
"""
merged = self.headers.copy()
if headers:
merged.update(headers)
return merged
def _merge_cookies(self, cookies=None):
"""
合并Cookies
Args:
cookies: Cookies
Returns:
dict: 合并后的Cookies
"""
merged = self.cookies.copy()
if cookies:
merged.update(cookies)
return merged
def _get_proxies(self):
"""
获取代理配置
Returns:
dict: 代理配置
"""
if not self.proxy:
return {}
return {
'http': self.proxy,
'https': self.proxy
}
def delay_request(self, min_delay=1, max_delay=3):
"""
延迟请求以避免被目标网站检测为爬虫
Args:
min_delay: 最小延迟时间(秒)
max_delay: 最大延迟时间(秒)
"""
delay = random.uniform(min_delay, max_delay)
time.sleep(delay)

View File

@@ -0,0 +1,384 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
技术检测模块,用于识别网页使用的技术、框架和编程语言
"""
import re
import logging
import json
from urllib.parse import urlparse
from bs4 import BeautifulSoup
logger = logging.getLogger('xss_scanner')
class TechDetector:
"""网站技术检测类,用于识别网站使用的技术栈"""
def __init__(self):
"""初始化技术检测器"""
# 前端框架特征
self.frontend_frameworks = {
'React': [
('script', {'src': re.compile(r'react(-|\.min\.)?\.js')}),
('script', {'src': re.compile(r'react-dom(-|\.min\.)?\.js')}),
('meta', {'name': 'generator', 'content': re.compile(r'react', re.I)}),
('div', {'id': 'root'}),
('div', {'id': 'app'}),
('meta', {'name': 'next-head-count'}),
('code', {'id': '__NEXT_DATA__'})
],
'Vue.js': [
('script', {'src': re.compile(r'vue(-|\.min\.)?\.js')}),
('div', {'id': 'app'}),
('div', {'id': 'vue-app'}),
('div', {'class': 'v-application'}),
('meta', {'name': 'generator', 'content': re.compile(r'vue', re.I)})
],
'Angular': [
('script', {'src': re.compile(r'angular(-|\.min\.)?\.js')}),
('*', {'ng-app': re.compile(r'.*')}),
('*', {'ng-controller': re.compile(r'.*')}),
('*', {'ng-repeat': re.compile(r'.*')}),
('*', {'ng-bind': re.compile(r'.*')}),
('*', {'ng-model': re.compile(r'.*')})
],
'jQuery': [
('script', {'src': re.compile(r'jquery(-|\.min\.)?\.js')}),
],
'Bootstrap': [
('link', {'href': re.compile(r'bootstrap(-|\.min\.)?\.css')}),
('script', {'src': re.compile(r'bootstrap(-|\.min\.)?\.js')}),
('div', {'class': re.compile(r'container(-fluid)?')}),
('div', {'class': re.compile(r'row')}),
('div', {'class': re.compile(r'col(-[a-z]+-[0-9]+)?')})
]
}
# 后端框架和语言特征
self.backend_technologies = {
'PHP': [
('X-Powered-By', re.compile(r'PHP/?', re.I)),
('Set-Cookie', re.compile(r'PHPSESSID', re.I)),
('link', {'href': re.compile(r'\.php')}),
('a', {'href': re.compile(r'\.php')}),
('form', {'action': re.compile(r'\.php')})
],
'WordPress': [
('meta', {'name': 'generator', 'content': re.compile(r'WordPress', re.I)}),
('link', {'href': re.compile(r'wp-content')}),
('script', {'src': re.compile(r'wp-includes')}),
('link', {'rel': 'https://api.w.org/'}),
('meta', {'property': 'og:site_name'}),
('body', {'class': re.compile(r'wordpress')})
],
'Laravel': [
('input', {'name': '_token'}),
('meta', {'name': 'csrf-token'}),
('script', {'src': re.compile(r'vendor/laravel')}),
('Set-Cookie', re.compile(r'laravel_session', re.I))
],
'Django': [
('input', {'name': 'csrfmiddlewaretoken'}),
('meta', {'name': 'csrf-token'}),
('X-Frame-Options', 'SAMEORIGIN')
],
'Flask': [
('form', {'action': re.compile(r'\/[a-z0-9_]+\/?')}),
('Set-Cookie', re.compile(r'session=', re.I))
],
'Python': [
('Server', re.compile(r'(Python|Werkzeug|Django|Tornado|Flask|CherryPy)', re.I)),
('X-Powered-By', re.compile(r'(Python|Werkzeug|Django|Tornado|Flask|CherryPy)', re.I))
],
'ASP.NET': [
('X-Powered-By', re.compile(r'ASP\.NET', re.I)),
('X-AspNet-Version', re.compile(r'.*')),
('Set-Cookie', re.compile(r'ASP\.NET_SessionId', re.I)),
('form', {'action': re.compile(r'\.aspx')}),
('input', {'name': '__VIEWSTATE'})
],
'Node.js': [
('X-Powered-By', re.compile(r'Express', re.I)),
('Set-Cookie', re.compile(r'connect\.sid', re.I))
],
'Ruby on Rails': [
('X-Powered-By', re.compile(r'Phusion Passenger|Ruby|Rails', re.I)),
('Set-Cookie', re.compile(r'_session_id', re.I)),
('meta', {'name': 'csrf-param', 'content': 'authenticity_token'})
],
'Java': [
('X-Powered-By', re.compile(r'(JSP|Servlet|Tomcat|JBoss|GlassFish|WebLogic|WebSphere|Jetty)', re.I)),
('Server', re.compile(r'(Tomcat|JBoss|GlassFish|WebLogic|WebSphere|Jetty)', re.I)),
('Set-Cookie', re.compile(r'JSESSIONID', re.I))
],
'Go': [
('Server', re.compile(r'(go httpserver)', re.I)),
('X-Powered-By', re.compile(r'(go|gin|echo)', re.I))
]
}
# 服务器特征
self.server_technologies = {
'Nginx': [
('Server', re.compile(r'nginx', re.I))
],
'Apache': [
('Server', re.compile(r'apache', re.I))
],
'IIS': [
('Server', re.compile(r'IIS', re.I))
],
'LiteSpeed': [
('Server', re.compile(r'LiteSpeed', re.I))
],
'Cloudflare': [
('Server', re.compile(r'cloudflare', re.I)),
('CF-RAY', re.compile(r'.*')),
('CF-Cache-Status', re.compile(r'.*'))
],
'Varnish': [
('X-Varnish', re.compile(r'.*')),
('X-Varnish-Cache', re.compile(r'.*'))
]
}
# WAF特征
self.waf_technologies = {
'Cloudflare': [
('Server', re.compile(r'cloudflare', re.I)),
('CF-RAY', re.compile(r'.*'))
],
'ModSecurity': [
('Server', re.compile(r'mod_security', re.I)),
('X-Mod-Security', re.compile(r'.*'))
],
'Sucuri': [
('X-Sucuri-ID', re.compile(r'.*')),
('X-Sucuri-Cache', re.compile(r'.*'))
],
'Imperva': [
('X-Iinfo', re.compile(r'.*')),
('Set-Cookie', re.compile(r'incap_ses', re.I))
],
'Akamai': [
('X-Akamai-Transformed', re.compile(r'.*')),
('Set-Cookie', re.compile(r'ak_bmsc', re.I))
],
'F5 BIG-IP': [
('Set-Cookie', re.compile(r'BIGipServer', re.I)),
('Server', re.compile(r'BigIP', re.I))
],
'Barracuda': [
('Set-Cookie', re.compile(r'barra_counter_session', re.I))
]
}
def detect(self, response, content=None):
"""
检测网页使用的技术
Args:
response: HTTP响应对象
content: HTML内容(可选)
Returns:
dict: 检测到的技术信息
"""
if not response:
return {}
results = {
'frontend': [],
'backend': [],
'server': [],
'waf': []
}
# 提取HTML内容
html_content = content or response.text
# 解析HTML
try:
soup = BeautifulSoup(html_content, 'html.parser')
except Exception as e:
logger.error(f"解析HTML时发生错误: {str(e)}")
soup = None
# 检测前端框架
if soup:
for framework, patterns in self.frontend_frameworks.items():
for tag_name, attrs in patterns:
elements = soup.find_all(tag_name, attrs)
if elements:
if framework not in results['frontend']:
results['frontend'].append(framework)
break
# 检测后端技术
headers = response.headers
# 基于HTTP头的检测
for tech, patterns in self.backend_technologies.items():
for header_name, pattern in patterns:
if header_name in headers:
if isinstance(pattern, re.Pattern) and pattern.search(headers[header_name]):
if tech not in results['backend']:
results['backend'].append(tech)
break
# 基于HTML的后端技术检测
if soup:
for tech, patterns in self.backend_technologies.items():
if tech in results['backend']:
continue
for tag_name, attrs in patterns:
if tag_name in ['link', 'a', 'form', 'input', 'meta', 'script', 'body']:
elements = soup.find_all(tag_name, attrs)
if elements:
if tech not in results['backend']:
results['backend'].append(tech)
break
# 检测服务器技术
for server, patterns in self.server_technologies.items():
for header_name, pattern in patterns:
if header_name in headers:
if isinstance(pattern, re.Pattern) and pattern.search(headers[header_name]):
if server not in results['server']:
results['server'].append(server)
break
# 检测WAF
for waf, patterns in self.waf_technologies.items():
for header_name, pattern in patterns:
if header_name in headers:
if isinstance(pattern, re.Pattern) and pattern.search(headers[header_name]):
if waf not in results['waf']:
results['waf'].append(waf)
break
# 添加详细检测信息
self._enhance_detection(results, soup, headers)
return results
def _enhance_detection(self, results, soup, headers):
"""
增强检测,添加更多详细信息
Args:
results: 已检测的结果
soup: BeautifulSoup对象
headers: HTTP响应头
"""
# 检测JavaScript库的版本
if soup:
# 检测React版本
if 'React' in results['frontend']:
script_tags = soup.find_all('script')
for script in script_tags:
if script.string and 'React.version' in script.string:
version_match = re.search(r'React.version\s*=\s*[\'"]([^\'"]+)[\'"]', script.string)
if version_match:
results['frontend'].remove('React')
results['frontend'].append(f"React {version_match.group(1)}")
break
# 检测Angular版本
if 'Angular' in results['frontend']:
for script in soup.find_all('script'):
if script.string and 'angular.version' in script.string:
version_match = re.search(r'angular.version\s*=\s*\{[^\}]*full:\s*[\'"]([^\'"]+)[\'"]', script.string)
if version_match:
results['frontend'].remove('Angular')
results['frontend'].append(f"Angular {version_match.group(1)}")
break
# WordPress版本
if 'WordPress' in results['backend']:
meta_tags = soup.find_all('meta', {'name': 'generator'})
for meta in meta_tags:
content = meta.get('content', '')
if 'WordPress' in content:
version_match = re.search(r'WordPress\s*([0-9\.]+)', content)
if version_match:
results['backend'].remove('WordPress')
results['backend'].append(f"WordPress {version_match.group(1)}")
break
# 检测服务器版本
if 'Server' in headers:
server_header = headers['Server']
# Nginx版本
if 'Nginx' in results['server']:
version_match = re.search(r'nginx/([0-9\.]+)', server_header, re.I)
if version_match:
results['server'].remove('Nginx')
results['server'].append(f"Nginx {version_match.group(1)}")
# Apache版本
elif 'Apache' in results['server']:
version_match = re.search(r'Apache/([0-9\.]+)', server_header, re.I)
if version_match:
results['server'].remove('Apache')
results['server'].append(f"Apache {version_match.group(1)}")
def get_waf_bypass_techniques(self, detected_waf):
"""
根据检测到的WAF返回可能的绕过技术
Args:
detected_waf: 检测到的WAF列表
Returns:
dict: WAF绕过技术
"""
bypass_techniques = {}
for waf in detected_waf:
if waf == 'Cloudflare':
bypass_techniques['Cloudflare'] = [
'使用不同的编码方式: HTML, URL, Unicode, Base64等',
'利用换行符分割XSS Payload',
'使用JavaScript事件处理程序的大小写混合形式',
'使用不同的HTML标签避免常见的如script, img, iframe',
'尝试使用较少被检测的事件如onmouseover, onerror, onwheel等'
]
elif waf == 'ModSecurity':
bypass_techniques['ModSecurity'] = [
'使用JavaScript事件处理程序的不同形式',
'使用HTML实体编码',
'分割Payload: < s c r i p t >',
'使用JavaScript的eval函数和字符串操作函数',
'使用CSS注入配合XSS'
]
elif waf == 'Imperva':
bypass_techniques['Imperva'] = [
'使用JavaScript原型链污染技术',
'避免使用关键词(alert, document.cookie等)',
'使用JavaScript的间接调用方法',
'使用多层编码: URL编码 + HTML编码 + Unicode编码',
'利用长字符串和重复字符迷惑WAF规则'
]
elif waf == 'F5 BIG-IP':
bypass_techniques['F5 BIG-IP'] = [
'使用非标准事件处理程序',
'DOM XSS手法通常更能绕过F5的防护',
'使用JavaScript模板字符串',
'使用JavaScript的Function构造函数',
'利用特定浏览器的解析差异'
]
elif waf == 'Akamai':
bypass_techniques['Akamai'] = [
'利用JavaScript的变量和函数名混淆',
'使用CDATA和注释规避特征检测',
'避免直接使用javascript:伪协议',
'使用data:text/html;base64,...编码',
'利用JavaScript中的字符串拼接和动态执行'
]
return bypass_techniques

View File

@@ -0,0 +1,228 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
参数验证模块,负责验证用户输入的参数
"""
import re
import os
import logging
from urllib.parse import urlparse
logger = logging.getLogger('xss_scanner')
def validate_target(target):
"""
验证目标URL是否有效
Args:
target: 目标URL
Returns:
bool: 是否有效
"""
if not target:
return False
# 检查URL格式
if not target.startswith(('http://', 'https://')):
logger.warning(f"无效的URL格式: {target}URL必须以http://或https://开头")
return False
# 解析URL
try:
parsed_url = urlparse(target)
if not parsed_url.netloc:
logger.warning(f"无效的URL: {target},缺少域名")
return False
except Exception as e:
logger.warning(f"URL解析失败: {target} - {str(e)}")
return False
return True
def validate_file_path(file_path, check_exists=True, check_write=False):
"""
验证文件路径是否有效
Args:
file_path: 文件路径
check_exists: 是否检查文件是否存在
check_write: 是否检查文件是否可写
Returns:
bool: 是否有效
"""
if not file_path:
return False
# 检查文件是否存在
if check_exists and not os.path.exists(file_path):
logger.warning(f"文件不存在: {file_path}")
return False
# 检查文件是否可写
if check_write:
try:
# 检查文件是否可写
if os.path.exists(file_path):
if not os.access(file_path, os.W_OK):
logger.warning(f"文件不可写: {file_path}")
return False
else:
# 检查目录是否可写
dir_path = os.path.dirname(file_path)
if dir_path and not os.access(dir_path, os.W_OK):
logger.warning(f"目录不可写: {dir_path}")
return False
except Exception as e:
logger.warning(f"检查文件权限失败: {file_path} - {str(e)}")
return False
return True
def validate_ip(ip):
"""
验证IP地址是否有效
Args:
ip: IP地址
Returns:
bool: 是否有效
"""
if not ip:
return False
# IPv4正则表达式
ipv4_pattern = r'^(\d{1,3}\.){3}\d{1,3}$'
# 检查格式
if not re.match(ipv4_pattern, ip):
return False
# 检查每个段的范围
segments = ip.split('.')
for segment in segments:
if not 0 <= int(segment) <= 255:
return False
return True
def validate_port(port):
"""
验证端口号是否有效
Args:
port: 端口号
Returns:
bool: 是否有效
"""
try:
port = int(port)
return 1 <= port <= 65535
except:
return False
def validate_proxy(proxy):
"""
验证代理设置是否有效
Args:
proxy: 代理设置
Returns:
bool: 是否有效
"""
if not proxy:
return False
# 检查代理格式
if not proxy.startswith(('http://', 'https://', 'socks4://', 'socks5://')):
logger.warning(f"无效的代理格式: {proxy}代理必须以http://、https://、socks4://或socks5://开头")
return False
# 解析代理
try:
parsed_proxy = urlparse(proxy)
if not parsed_proxy.netloc:
logger.warning(f"无效的代理: {proxy},缺少主机名")
return False
except Exception as e:
logger.warning(f"代理解析失败: {proxy} - {str(e)}")
return False
return True
def validate_regex(pattern):
"""
验证正则表达式是否有效
Args:
pattern: 正则表达式
Returns:
bool: 是否有效
"""
if not pattern:
return False
try:
re.compile(pattern)
return True
except re.error:
return False
def validate_headers(headers):
"""
验证HTTP头是否有效
Args:
headers: HTTP头格式Header1:Value1;Header2:Value2
Returns:
bool: 是否有效
"""
if not headers:
return False
try:
# 分割HTTP头
header_pairs = headers.split(';')
for header in header_pairs:
if header.strip() and ':' not in header:
logger.warning(f"无效的HTTP头格式: {header}应为Header:Value格式")
return False
return True
except Exception as e:
logger.warning(f"验证HTTP头失败: {str(e)}")
return False
def validate_cookies(cookies):
"""
验证Cookie是否有效
Args:
cookies: Cookie格式name1=value1; name2=value2
Returns:
bool: 是否有效
"""
if not cookies:
return False
try:
# 分割Cookie
cookie_pairs = cookies.split(';')
for cookie in cookie_pairs:
if cookie.strip() and '=' not in cookie:
logger.warning(f"无效的Cookie格式: {cookie}应为name=value格式")
return False
return True
except Exception as e:
logger.warning(f"验证Cookie失败: {str(e)}")
return False