python 爬取百度热搜
参考地址
https://blog.csdn.net/ezreal_tao/article/details/91154768 etree如何选取
思路讲解
- 1.请求热搜地址,获取到返回值。(http://top.baidu.com/buzz?b=1&fr=20811)
- 2.采用的etree去选取对应的xpath,组合成文档(这边可将获取的html写入本地文件,看看是不是可以直接获取值得)
- 3.封装成钉钉机器人消息发送
注意点:本案例百度的url链接会被钉钉无法识别,主要可能是 % 这个符号的问题,所以调用了百度正常搜索的链接地址
###导入模块
import requests
from lxml import etree
import requests,json
###网址
url="http://top.baidu.com/buzz?b=1&fr=20811"
###模拟浏览器
header={'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'}
'''用于存入本地
r = requests.get(url,headers=header)
with open('D:/1.html','wb') as f:
f.write(r.content)
'''
###主函数
def main():
###获取html页面
html=etree.HTML(requests.get(url,headers=header).content)
# 获取内容
title=html.xpath('//a[@class="list-title"]/text()')
# title_text=html.xpath('//a[@class="info-title"]/text()')
# num=html.xpath('//span[@class="num-top"]/text()')
# 获取url连接
href=html.xpath('//a[@class="list-title"]//@href')
top=title[0]
affair=title[1:]
data="";
data+="### top:["+top+"](https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd="+top+"&rsv_pq=cc0b2ae500104a8f&rsv_t=8594gkWHOJpP8vhGnFVfsZhcRYTJV9ElJQ5Nk3qShVzXGX2bDtb6O2Q4F%2BY&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=5&rsv_sug1=4&rsv_sug7=101&rsv_sug2=0&inputT=860&rsv_sug4=1019&rsv_sug=1"+") \n"
#print('{0:<10}\t{1:<40}'.format("top",top))
for i in range(0, len(affair)):
#print("{0:<10}\t{1:{3}<30}\t{2:{3}>20}".format(rank[i],affair[i],view[i],chr(12288)))
#print(href[i])
# data+=(">- "+str(i+1)+" ["+affair[i]+"]("+href[i+1]+") \n")
data+=(">- ["+affair[i]+"](https://www.baidu.com/s?ie=utf-8&f=8&rsv_bp=1&rsv_idx=1&tn=baidu&wd="+affair[i ]+"&rsv_pq=cc0b2ae500104a8f&rsv_t=8594gkWHOJpP8vhGnFVfsZhcRYTJV9ElJQ5Nk3qShVzXGX2bDtb6O2Q4F%2BY&rqlang=cn&rsv_enter=1&rsv_dl=tb&rsv_sug3=5&rsv_sug1=4&rsv_sug7=101&rsv_sug2=0&inputT=860&rsv_sug4=1019&rsv_sug=1"+") \n")
sendinfo_ding(data)
def sendinfo_ding(data):
url = 'url' #你的机器人webhook地址
program = {
"msgtype": "markdown",
"markdown": {
"title":"百度热搜",
"text": ""+data+""
},
}
headers = {'Content-Type': 'application/json'}
f = requests.post(url, data=json.dumps(program), headers=headers)
print(f)
main()