网络爬虫基础练习
import requests from bs4 import BeautifulSoup res = requests.get('http://www.people.com.cn/') res.encoding = 'UTF-8' soup = BeautifulSoup(res.text, 'html.parser') # 取出h1标签的文本 for h1 in soup.find_all('h1'): print(h1.text) # 取出a标签的链接 for a in soup.find_all('a'): print(a.attrs.get('href')) # 取出所有li标签的所有内容 for li in soup.find_all('li'): print(li.contents) # 取出第2个li标签的a标签的第3个div标签的属性 print(soup.find_all('li')[1].a.find_all('div')[2].attrs) # 取出一条新闻的标题、链接、发布时间、来源 print(soup.select('div .news-list-title')[0].text) print(soup.select('div .news-list-thumb')[0].parent.attrs.get('href')) print(soup.select('div .news-list-info > span')[0].text) print(soup.select('div .news-list-info > span')[1].text)