2020-01-17 02:44:46 +05:30
|
|
|
import os
|
2020-12-09 14:17:42 +05:30
|
|
|
import re
|
2019-11-24 21:12:10 +05:30
|
|
|
import json
|
2020-01-17 02:44:46 +05:30
|
|
|
import tempfile
|
2020-08-21 11:10:58 +05:30
|
|
|
import re
|
2019-11-24 21:12:10 +05:30
|
|
|
|
2021-01-28 16:15:17 +05:30
|
|
|
from urllib.parse import urlparse
|
2019-11-24 21:12:10 +05:30
|
|
|
|
|
|
|
|
def host(string):
|
2019-12-04 14:04:03 +08:00
|
|
|
if string and '*' not in string:
|
2021-01-28 16:15:17 +05:30
|
|
|
return urlparse(string).netloc
|
2019-11-24 21:12:10 +05:30
|
|
|
|
2020-01-17 02:44:46 +05:30
|
|
|
|
2019-11-24 21:12:10 +05:30
|
|
|
def load_json(file):
|
2019-12-04 14:04:03 +08:00
|
|
|
with open(file) as f:
|
|
|
|
|
return json.load(f)
|
2020-01-17 02:44:46 +05:30
|
|
|
|
|
|
|
|
|
|
|
|
|
def format_result(result):
|
|
|
|
|
new_result = {}
|
|
|
|
|
for each in result:
|
|
|
|
|
if each:
|
|
|
|
|
for i in each:
|
|
|
|
|
new_result[i] = each[i]
|
|
|
|
|
return new_result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def create_url_list(target_url, inp_file):
|
|
|
|
|
urls = []
|
|
|
|
|
if inp_file:
|
|
|
|
|
with open(inp_file, 'r') as file:
|
|
|
|
|
for line in file:
|
|
|
|
|
if line.startswith(('http://', 'https://')):
|
|
|
|
|
urls.append(line.rstrip('\n'))
|
|
|
|
|
if target_url and target_url.startswith(('http://', 'https://')):
|
|
|
|
|
urls.append(target_url)
|
|
|
|
|
return urls
|
|
|
|
|
|
2020-08-21 11:10:58 +05:30
|
|
|
def create_stdin_list(target_url, inp_file):
|
|
|
|
|
urls = []
|
|
|
|
|
if inp_file:
|
|
|
|
|
for line in inp_file.readlines():
|
|
|
|
|
if line.startswith(('http://', 'https://')):
|
|
|
|
|
urls.append(line.rstrip('\n'))
|
|
|
|
|
if target_url and target_url.startswith(('http://', 'https://')):
|
|
|
|
|
urls.append(target_url)
|
|
|
|
|
return urls
|
2020-01-17 02:44:46 +05:30
|
|
|
|
|
|
|
|
def prompt(default=None):
|
|
|
|
|
editor = 'nano'
|
|
|
|
|
with tempfile.NamedTemporaryFile(mode='r+') as tmpfile:
|
|
|
|
|
if default:
|
|
|
|
|
tmpfile.write(default)
|
|
|
|
|
tmpfile.flush()
|
|
|
|
|
|
|
|
|
|
child_pid = os.fork()
|
|
|
|
|
is_child = child_pid == 0
|
|
|
|
|
|
|
|
|
|
if is_child:
|
|
|
|
|
os.execvp(editor, [editor, tmpfile.name])
|
|
|
|
|
else:
|
|
|
|
|
os.waitpid(child_pid, 0)
|
|
|
|
|
tmpfile.seek(0)
|
|
|
|
|
return tmpfile.read().strip()
|
|
|
|
|
|
|
|
|
|
|
2021-11-14 00:02:28 +00:00
|
|
|
def extractHeaders(headers: str):
|
2020-01-17 02:44:46 +05:30
|
|
|
sorted_headers = {}
|
2021-11-14 00:02:28 +00:00
|
|
|
for header in headers.split('\\n'):
|
|
|
|
|
name, value = header.split(":", 1)
|
|
|
|
|
name = name.strip()
|
|
|
|
|
value = value.strip()
|
|
|
|
|
if len(value) >= 1 and value[-1] == ',':
|
|
|
|
|
value = value[:-1]
|
|
|
|
|
sorted_headers[name] = value
|
|
|
|
|
return sorted_headers
|