Python BeautifulSoup库 常用方法
匹配文件 fofa.html
# -*- coding: utf-8 -*-
# @Time : 2021/9/5 18:10
# @Author : HUGBOY
# @File : test.py
# @Software: PyCharm
import re
from bs4 import BeautifulSoup
with open('./fofa.html', 'rb') as f:
html = f.read()
# 把html解析为树形结构
rel = BeautifulSoup(html, "html.parser")
#标签 只匹配第一个
print(rel.title)
# 标签内容
print(rel.title.string)
# 属性
print(rel.a.attrs)
# 注释
print(type(rel.a.string))
if str(type(rel.a.string)) == "<class 'bs4.element.Comment'>":
print(rel.a.string)
# 全部标签(遍历)
tag_list = rel.head.contents
print(tag_list)
# 搜索标签 完全匹配
tag_list = rel.find_all("div")
print(tag_list)
# 搜索含a标签 结合正则表达式
tag_list = rel.find_all(re.compile("d"))
# 搜索含某属性标签
def href_get(tag):
return tag.has_attr("href")
tag_list = rel.find_all(href_get)
# 搜索有特殊值的标签
tag_list1 = rel.find_all(id="__nuxt")
tag_list2 = rel.find_all(class_=True)
tag_list3 = rel.find_all(href="/_nuxt/css/9eec2be.css")
print(tag_list1)
print(tag_list2)
print(tag_list3)
# 搜索text 完全匹配
text_list1 = rel.find_all(text="美国")
text_list2 = rel.find_all(text=["国", "TCP/UDP"])
print(text_list1, text_list2)
# 搜索text 含某特殊字符 结合正则表达式
text_list = rel.find_all(text=re.compile("\d"))
for t in text_list:
print(t)
# 限定个数
text_list = rel.find_all(text=re.compile("\d"), limit=5)
for t in text_list:
print(t)
#CSS 选择器
# 通过标签选择
print(rel.select("title"))
# 通过类名选择 '.' + 'class_value'
print(rel.select(".listSpans"))
# 通过序号id选择 '#' + 'id_value'
print(rel.select("#u1"))
# 通过标签属性选择
print(rel.select("a[href='http://52.67.233.77:4848']"))
# 通过子标签选择
print(rel.select("head>title"))
# 通过兄弟标签(同级标签)选择 只返回后一个标签包含区域
print(rel.select(".relatedSearch ~ .mainContainer"))
# 获取选择区域所有标签的文本内容
class_area = rel.select(".relatedSearch ~ .mainContainer")
print(class_area[0].get_text())
________________________________________________________
Every good deed you do will someday come back to you.
Love you,love word !