源码包扫描
import subprocess
import os, re
from urllib.parse import urlparse
from multiprocessing.pool import ThreadPool
pool = ThreadPool(10)
path = os.path.split(os.path.realpath(__file__))[0]
dict_path = os.path.join(path, "domain_dict")
result_path = os.path.join(path, "result")
if not os.path.exists(dict_path):
os.mkdir(dict_path)
if not os.path.exists(result_path):
os.mkdir(result_path)
headers = {"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; rv:67.0) Gecko/20100101 Firefox/67.0"}
def chuli_url(url):
url = urlparse(url).netloc
return url
def rule_1(domain):
'''
xxx.com xxx1.com xxx2.com xxx01.com xxx_01.com
xxx_com xxx1_com xxx2_com xxx01_com
'''
my_list = []
ret = domain.split(".")
num_list = []
for x in range(1, 6):
num_list.append('0{}'.format(x))
for x in range(1,6):
num_list.append(str(x))
if len(ret) == 2:
for z in num_list:
ret_1 = ret[0]+z+"."+ret[1]
my_list.append(ret_1)
for z in num_list:
ret_2 = ret[0]+z+"_"+ret[1]
my_list.append(ret_2)
for z in num_list:
ret_3 = ret[0]+"."+z+"."+ret[1]
my_list.append(ret_3)
for z in num_list:
ret_4 = ret[0]+"_"+z+"."+ret[1]
my_list.append(ret_4)
for z in num_list:
ret_5 = ret[0]+"_"+z+"_"+ret[1]
my_list.append(ret_5)
for z in num_list:
ret_6 = ret[0]+"."+z+"_"+ret[1]
my_list.append(ret_6)
elif len(ret) == 3:
for z in num_list:
ret_1 = ret[0]+"."+ret[1]+z+"."+ret[2]
my_list.append(ret_1)
for z in num_list:
ret_2 = ret[0]+"."+ret[1]+z+"_"+ret[2]
my_list.append(ret_2)
for z in num_list:
ret_5 = ret[0]+"_"+ret[1]+"_"+z+"_"+ret[2]
my_list.append(ret_5)
for z in num_list:
ret_6 = ret[0]+"_"+ret[1]+z+"_"+ret[2]
my_list.append(ret_6)
return my_list
def rule_2(domain):
temp_list = []
temp_list.append(domain)
if len(re.findall(".", domain)) == 1:
temp_list.append(domain.split(".")[0]+"_"+domain.split(".")[1])
else:
temp_list.append(domain.replace(".", "_"))
return temp_list
def my_requests(domain, domain_list_dict):
for x in domain_list_dict:
url_1 = "http://"+domain+"/"+x
url_2 = "https://"+domain+"/"+x
try:
print("[*] {}".format(domain+"/"+x))
ret_1 = requests.get(url=url_1, headers=headers, verify=False, timeout=5)
ret_2 = requests.get(url=url_2, headers=headers, verify=False, timeout=5)
if ret_1.status_code == 200:
with open("resulit.txt", "a", encodeing="utf-8") as f:
f.write(url_1+"\n")
print("[*] find {} :".format(url_1))
return
if ret_2.status_code == 200:
with open("resulit.txt", "a", encodeing="utf-8") as f:
f.write(url_2+"\n")
return
print("[*] find {} :".format(url_2))
except Exception as e:
print(e)
pass
def my_subprocess(rce):
print("[*] {}".format(rce))
child = subprocess.Popen(rce,stdout=subprocess.PIPE,stderr=subprocess.STDOUT,shell=True)
while child.poll() is None:
output = child.stdout.readline().decode("gbk", "ignore")
print(output.strip())
my_list = []
tar_list = [".bz2",".gz",".rar",".tar",".tar.bz2",".tar.gz",".tgz",".zip",".7z",".tar.gz2",".Z",".xz","tar.xz",".mdb.rar",".0SP1.rar",".0SP1.zip",".txt",".mdb.zip",".config.rar",".config.zip",".net.cn.rar",".net.cn.zip",".wjw.cn.rar",".wjw.cn.zip",".html",".sql"]
with open("domain.txt", "r") as f:
ret = f.readlines()
for domain in ret:
real_url = chuli_url(domain.strip())
domain = chuli_url(domain.strip())
domain_list = []
domain_list.extend(rule_2(domain))
domain_list.extend(rule_1(domain))
temp_list = []
for a in domain_list:
for x in tar_list:
x = x.strip()
temp_list.append(a+x)
for x in temp_list:
with open(os.path.join(dict_path,domain+".txt"), "a", encoding="utf-8") as f:
f.write(x+"\n")
my_subprocess("python3 /root/dirsearch/dirsearch.py --random-agent -e * -t 20 -u {} -w {} --json-report={}".format(real_url, os.path.join(dict_path,domain+".txt"), os.path.join(result_path,domain+"_result.json",)))
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 周边上新:园子的第一款马克杯温暖上架
· Open-Sora 2.0 重磅开源!
· .NET周刊【3月第1期 2025-03-02】
· 分享 3 个 .NET 开源的文件压缩处理库,助力快速实现文件压缩解压功能!
· [AI/GPT/综述] AI Agent的设计模式综述