Python 之beautifulSoup4解析库
一、节点选择器
from bs4 import BeautifulSoup if __name__ == '__main__': html = ''' <div> <ul> <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> <li>你好!!!</li> <li class="last-li"><a href="2.html">hello world</a></li> </ul> </div> ''' soup = BeautifulSoup(html, features="lxml") # 只会获取第一个匹配的 print(soup.li) # <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> # 获取第一个元素的所有属性 print(soup.li.attrs) # {'class': ['aaa', 'last-li']} # 获取指定属性class的值 print(soup.li.attrs["class"]) # ['aaa', 'last-li'] #获取li下的a标签的所有属性 print(soup.li.a.attrs) # {'href': '1.html'} # 获取li的class属性值 print(soup.li['class']) # ['aaa', 'last-li'] # 如果有多个则会返回None print(soup.li.string) # None print(soup.li.contents) # [<a href="1.html">yangs</a>, <span>zi</span>] print(soup.li.children) # <list_iterator object at 0x012DFD70> # yangs # zi for i in soup.li.children: print(i.string) # descendants属性获取子孙节点,返回生成器 ''' <a href="1.html">yangs</a> yangs <span>zi</span> zi ''' for i in soup.li.descendants: print(i) # parent属性获取父节点,parents获取祖先节点,返回生成器 ''' <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> <li>你好!!!</li> <li class="last-li"><a href="2.html">hello world</a></li> ''' for i in soup.li.parent: print(i) soup.li.parents # next_sibling属性返回下一个兄弟节点,previous_sibling返回上一个兄弟节点, 注意换行符也是一个节点,所以有时候在获取兄弟节点是通常是字符串或者空白 soup.a.next_sibling soup.a.previous_sibling # next_siblings和previous_sibling分别返回前面和后面的所有兄弟节点,返回生成器 soup.a.next_siblings soup.a.previous_siblings # next_element和previous_element属性获取下一个被解析的对象,或者上一个 soup.a.next_element soup.a.previous_element # next_elements和previous_elements迭代器向前或者后访问文档解析内容 soup.a.next_elements soup.a.previous_elements
二、方法选择器
from bs4 import BeautifulSoup if __name__ == '__main__': html = ''' <div> <ul> <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> <li id="two">你好!!!</li> <li class="last-li"><a href="2.html">hello world</a></li> </ul> </div> ''' soup = BeautifulSoup(html, features="lxml") print(soup.find_all("li")) # [<li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li>, <li id="two">你好!!!</li>, <li class="last-li"><a href="2.html">hello world</a></li>] # 限制输出数量 print(soup.find_all("li", limit=1)) # [<li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li>] print(soup.find_all("li", class_='last-li')) # [<li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li>, <li class="last-li"><a href="2.html">hello world</a></li>] print(soup.find_all("li", attrs={"class": 'aaa last-li'})) # [<li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li>] print(soup.find_all("li", attrs={"class": ['last-li aaa']})) # [] print(soup.find_all("li", id = 'two')) # [<li id="two">你好!!!</li>] print(soup.find_all("li", id = 'two')[0].string) # 你好!!!
find( name , attrs , recursive , text , **kwargs ):它返回的是单个元素,也就是第一个匹配的元素,类型依然是tag类型参数同find_all()一样
另外还有许多查询方法,其用法和前面介绍的find_all()方法完全相同,只不过查询范围不同,参数也一样
find_parents(name , attrs , recursive , text , **kwargs )和find_parent(name , attrs , recursive , text , **kwargs ):前者返回所有祖先节点,后者返回直接父节点
find_next_siblings(name , attrs , recursive , text , **kwargs )和find_next_sibling(name , attrs , recursive , text , **kwargs ):对当前tag后面的节点进行迭代,前者返回后面的所有兄弟节点,后者返回后面第一个兄弟节点
find_previous_siblings(name , attrs , recursive , text , **kwargs )和find_previous_sibling(name , attrs , recursive , text , **kwargs ):对当前tag前面的节点进行迭代,前者返回前面的所有兄弟节点,后者返回前面的第一个兄弟节点
find_all_next(name , attrs , recursive , text , **kwargs )和find_next(name , attrs , recursive , text , **kwargs ):对当前tag之后的tag和字符串进行迭代,前者返回所有符合条件的节点,后者返回第一个符合条件的节点
find_all_previous()和find_previous():对当前tag之前的tag和字符串进行迭代,前者返回节点后所有符合条件的节点,后者返回第一个符合条件的节点
三、CSS选择器
from bs4 import BeautifulSoup if __name__ == '__main__': html = ''' <div> <ul> <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> <li id="two">你好!!!</li> <li class="last-li"><a href="2.html">hello world</a></li> </ul> </div> ''' soup = BeautifulSoup(html, features="lxml") print(soup.select("li")) # [<li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li>, <li id="two">你好!!!</li>, <li class="last-li"><a href="2.html">hello world</a></li>] print(soup.select("li a")) # [<a href="1.html">yangs</a>, <a href="2.html">hello world</a>] print(soup.select("li.aaa a")) # [<a href="1.html">yangs</a>] print(soup.select("li[class~=aaa] a")) # [<a href="1.html">yangs</a>] print(soup.select("li a[href='2.html']")) # [<a href="2.html">hello world</a>] print(soup.select("li a[href='2.html']")[0].string) # hello world # 匹配开头 print(soup.select("li a[href^='1']")) # [<a href="1.html">yangs</a>] # #匹配值的结尾 print(soup.select("li a[href$='.html']")) # [<a href="1.html">yangs</a>, <a href="2.html">hello world</a>] # #模糊匹配 print(soup.select("li a[href*='.h']")) # [<a href="1.html">yangs</a>, <a href="2.html">hello world</a>]
四、tag修改方法
from bs4 import BeautifulSoup if __name__ == '__main__': html = ''' <div> <ul> <li class="aaa last-li"><a href="1.html">yangs</a><span>zi</span></li> <li id="two">你好!!!</li> <li class="last-li"><a href="2.html">hello world</a></li> </ul> </div> ''' soup = BeautifulSoup(html, features="lxml") soup.li.a.string = "样子" print(soup.li) # <li class="aaa last-li"><a href="1.html">样子</a><span>zi</span></li> soup.li.a.append(", 你好") print(soup.li) # <li class="aaa last-li"><a href="1.html">样子, 你好</a><span>zi</span></li>
其他方法:
insert()将元素插入到指定的位置
inert_before()在当前tag或文本节点前插入内容
insert_after()在当前tag或文本节点后插入内容
clear()移除当前tag的内容
extract()将当前tag移除文档数,并作为方法结果返回
prettify()将Beautiful Soup的文档数格式化后以Unicode编码输出,tag节点也可以调用
get_text()输出tag中包含的文本内容,包括子孙tag中的内容
soup.original_encoding 属性记录了自动识别的编码结果
from_encoding:参数在创建BeautifulSoup对象是可以用来指定编码,减少猜测编码的运行速度
#解析部分文档,可以使用SoupStrainer类来创建一个内容过滤器,它接受同搜索方法相同的参数
五、异常处理
#Beautiful Soup异常处理:
HTMLParser.HTMLParseError:malformed start tag
HTMLParser.HTMLParseError:bad end tag 这个两个异常都是解析器引起的,解决方法是安装lxml或者html5lib
六、58同城bs4数据抓取案例
from bs4 import BeautifulSoup import requests def get_58city(): url = "https://cd.58.com/job/" headers = { "User-Agent": "Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0" } try: res = requests.get(url, headers=headers, verify=False) html = res.content.decode("utf-8") except ConnectionError as e: print(e) try: soup = BeautifulSoup(html, "lxml") result = soup.select("li.job_item", limit=10) except RuntimeError as e: print(e) return_data = [] for site in result: title = site.select("span.name")[0].get_text() money = site.select("p.job_salary")[0].get_text() good_item = site.select("div.job_wel > span") good = [] for i in good_item: good.append(i.get_text()) return_data.append({"title": title, "money": money, "good": good}) return return_data if __name__ == '__main__': data = get_58city() print(data) # [{'title': '诚聘网约车司机', 'money': '8000-16000元/月', 'good': ['广告', '五险一金', '周末双休', '交通补助', '加班补助']}, {'title': '房产销售+高薪等你挑战', 'money': '8000-15000元/月', 'good': ['五险一金', '包住', '年底双薪', '房补', '话补']}, {'title': '成都富士康工资高普工免费招聘', 'money': '4000-6500元/月', 'good': ['五险一金', '包吃', '包住', '周末双休', '年底双薪']}, {'title': '网络销售8K+五险+奖金', 'money': '8000-15000元/月', 'good': ['五险一金', '年底双薪']}, {'title': '周末双休+饭补+普工包吃住', 'money': '3700-4500元/月', 'good': ['年底双薪', '饭补', '周末双休', '房补', '五险一金']}, {'title': '金融城冠军门店招聘精英', 'money': '20000-32000元/月', 'good': []}, {'title': '高薪高提+全城招募+双休', 'money': '8000-16000元/月', 'good': ['周末双休', '年底双薪', '五险一金']}, {'title': '五险+销售代表+无责双休', 'money': '6000-12000元/月', 'good': ['五险一金', '周末双休', '年底双薪', '话补']}, {'title': '急聘长期保底5千足疗师', 'money': '5000-8000元/月', 'good': ['包吃', '包住', '加班补助']}, {'title': '58+汽车分期+商务顾问', 'money': '6000-8000元/月', 'good': ['五险一金']}]
官方文档:https://www.crummy.com/software/BeautifulSoup/bs4/doc/
中文文档:https://www.crummy.com/software/BeautifulSoup/bs4/doc.zh