描述
- 通过读取字典中的关键字,拼接成url,来测试目标站点文件目录结构
代码
- 设置了一个resume参数,如果因为网络等问题导致扫描中断,重新启动扫描时可以将resume设置为上次扫描到的位置,从而继续进行扫描
import queue
import requests
import threading
import sys
AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/110.0.0.0 Safari/537.36"
EXTENSIONS = [".php", ".bak", ".orig", "inc"]
TARGET = "http://testphp.vulnweb.com"
THREADS = 50
WORDLIST = "/usr/share/wordlist"
def get_words(resume=None):
def extent_words(word):
if "." in word:
words.put(f"/{word}")
else:
words.put(f"/{word}/")
for extension in EXTENSIONS:
words.put(f"/{word}{extension}")
with open(WORDLIST) as f:
raw_words = f.read()
found_resume = False
words = queue.Queue()
for word in raw_words.split():
if resume is not None:
if found_resume:
extent_words(word)
elif word == resume:
found_resume = True
print(f"Resuming wordlist from: {resume}")
else:
extent_words()
return words
def dir_bruter(words):
headers = {'User-Agent': AGENT}
while not words.empty():
url = f"{TARGET}{words.get()}"
try:
r = requests.get(url, headers=headers)
except requests.exceptions.ConnectionError:
sys.stderr.write('x')
sys.stderr.flush()
continue
if r.status_code == 200:
print(f"\nSuccess ({r.status_code}: {url})")
elif r.status_code == 404:
sys.stderr.write(".")
sys.stderr.flush()
else:
print(f"{r.status_code} => {url}")
if __name__ == "__main__":
words = get_words()
print("Press return to continue.")
sys.stdin.readline()
for _ in range(THREADS):
t = threading.Thread(Target=dir_bruter, args=(words,))
t.start()