源码包扫描
# import requests
import subprocess
import os, re
from urllib.parse import urlparse
from multiprocessing.pool import ThreadPool
pool = ThreadPool(10)
# 取消验证警告
# from requests.packages.urllib3.exceptions import InsecureRequestWarning
# requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
path = os.path.split(os.path.realpath(__file__))[0]
dict_path = os.path.join(path, "domain_dict")
result_path = os.path.join(path, "result")
# 判断结果
if not os.path.exists(dict_path):
# 如果不存在则创建目录
# 创建目录操作函数
os.mkdir(dict_path)
if not os.path.exists(result_path):
os.mkdir(result_path)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0"}
# 提取纯净的domain(xxxx.com)
def chuli_url(url):
url = urlparse(url).netloc
# url = url.split(".")[0]+"."+url.split(".")[1].split("/")[0]
# if "http://" in url:
# url = url.replace("http://", "")
# if "https://" in url:
# url = url.replace("https://", "")
return url
def rule_1(domain):
'''
xxx.com xxx1.com xxx2.com xxx01.com xxx_01.com
xxx_com xxx1_com xxx2_com xxx01_com
'''
my_list = []
ret = domain.split(".")
# with open("ret_demo.txt", "a") as f:
# f.write(str(ret)+"\n")
num_list = []
for x in range(1, 6):
num_list.append('0{}'.format(x))
for x in range(1,6):
num_list.append(str(x))
#print(num_list)
if len(ret) == 2:
# 情况一 xxx1.com 1
for z in num_list:
ret_1 = ret[0]+z+"."+ret[1]
my_list.append(ret_1)
# 情况二 xxx1_com 1
for z in num_list:
ret_2 = ret[0]+z+"_"+ret[1]
my_list.append(ret_2)
# 情况三 xxx.1.com
for z in num_list:
ret_3 = ret[0]+"."+z+"."+ret[1]
my_list.append(ret_3)
# 情况四 xxx_1.com 1
for z in num_list:
ret_4 = ret[0]+"_"+z+"."+ret[1]
my_list.append(ret_4)
# 情况五 xxx_1_com 1
for z in num_list:
ret_5 = ret[0]+"_"+z+"_"+ret[1]
my_list.append(ret_5)
# 情况六 xxx.1_com
for z in num_list:
ret_6 = ret[0]+"."+z+"_"+ret[1]
my_list.append(ret_6)
elif len(ret) == 3:
# 情况一 www.xxx1.com 1
for z in num_list:
ret_1 = ret[0]+"."+ret[1]+z+"."+ret[2]
my_list.append(ret_1)
# 情况二 www.xxx1_com 1
for z in num_list:
ret_2 = ret[0]+"."+ret[1]+z+"_"+ret[2]
my_list.append(ret_2)
# # 情况三 www.xxx.1.com
# for z in num_list:
# ret_3 = ret[0]+"."+z+"."+ret[1]
# my_list.append(ret_3)
# # 情况四 www.xxx_1.com
# for z in num_list:
# ret_4 = ret[0]+"_"+z+"."+ret[1]
# my_list.append(ret_4)
# 情况五 www_xxx_1_com 1
for z in num_list:
ret_5 = ret[0]+"_"+ret[1]+"_"+z+"_"+ret[2]
my_list.append(ret_5)
# 情况六 www_xxx1_com 1
for z in num_list:
ret_6 = ret[0]+"_"+ret[1]+z+"_"+ret[2]
my_list.append(ret_6)
return my_list
def rule_2(domain):
temp_list = []
temp_list.append(domain) # xx.com
if len(re.findall(".", domain)) == 1:
temp_list.append(domain.split(".")[0]+"_"+domain.split(".")[1]) # xxx_com
else:
# www.xx.com site.xx.vip web.sss.sss.cn
temp_list.append(domain.replace(".", "_"))
return temp_list
def my_requests(domain, domain_list_dict):
for x in domain_list_dict:
url_1 = "http://"+domain+"/"+x
url_2 = "https://"+domain+"/"+x
try:
print("[*] {}".format(domain+"/"+x))
ret_1 = requests.get(url=url_1, headers=headers, verify=False, timeout=5)
ret_2 = requests.get(url=url_2, headers=headers, verify=False, timeout=5)
if ret_1.status_code == 200:
with open("resulit.txt", "a", encodeing="utf-8") as f:
f.write(url_1+"\n")
print("[*] find {} :".format(url_1))
return
if ret_2.status_code == 200:
with open("resulit.txt", "a", encodeing="utf-8") as f:
f.write(url_2+"\n")
return
print("[*] find {} :".format(url_2))
except Exception as e:
print(e)
pass
# my_requests("192.168.17.139", ["xxx","www.tar", "sadga.txt"])
# exit()
def my_subprocess(rce):
print("[*] {}".format(rce))
child = subprocess.Popen(rce,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
while child.poll() is None:
output = child.stdout.readline().decode("gbk", "ignore")
print(output.strip())
my_list = []
tar_list = [".bz2",".gz",".rar",".tar",".tar.bz2",".tar.gz",".tgz",".zip",".7z",".tar.gz2",".Z",".xz","tar.xz",".mdb.rar",".0SP1.rar",".0SP1.zip",".txt",".mdb.zip",".config.rar",".config.zip",".net.cn.rar",".net.cn.zip",".wjw.cn.rar",".wjw.cn.zip",".html",".sql"]
# domain = str(input("[*] domain(xx.com):"))
with open("domain.txt", "r") as f:
ret = f.readlines()
for domain in ret:
real_url = chuli_url(domain.strip())
domain = chuli_url(domain.strip())
domain_list = []
domain_list.extend(rule_2(domain)) # 构造基础
domain_list.extend(rule_1(domain))
temp_list = []
for a in domain_list:
for x in tar_list:
x = x.strip()
temp_list.append(a+x)
for x in temp_list: # 生成对应域名的字典
# print(x)
with open(os.path.join(dict_path,domain+".txt"), "a", encoding="utf-8") as f:
f.write(x+"\n")
# my_subprocess("python3 /root/dirsearch/dirsearch.py --random-agent -e * -t 20 -u {} -w {} --json-report={}".format(real_url, os.path.join(dict_path,domain+".txt"), os.path.join(path,domain+"_result.json")))
my_subprocess("python3 /root/dirsearch/dirsearch.py --random-agent -e * -t 20 -u {} -w {} --json-report={}".format(real_url, os.path.join(dict_path,domain+".txt"), os.path.join(result_path,domain+"_result.json",)))
# pool.close()
# pool.join()
# for x in my_list:
# print(x)
# with open("my_tarzip.txt", "a", encoding="utf-8") as f:
# f.write(x+"\n")