Python爬取小说(2)单章节爬取
#coding = utf-8
#urlopen:打开网址
#Request:请求
from urllib.request import urlopen,Request
#导入gzip包:解压gzip
import gzip
import ssl
ssl._create_default_https_context = ssl._create_unverified_context()
from lxml import etree
path = "http://www.xbiquge.la/10/10489/4535761.html"
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.88 Safari/537.36',
"Accept-Encoding": "gzip, deflate",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"Accept-Language":"zh-CN,zh;q=0.9"
}
#封装请求
req = Request(url=path,headers=headers)
#打开链接
conn = urlopen(req)
#判断是否正常打开
if conn.code == 200:
#获取数据
data = conn.read()
#print(type(data))
#因为我们下载的是压缩的网页 下面是解压缩
data = gzip.decompress(data).decode("utf-8")
#转换编码格式 (如果没有经过gzip格式压缩,用这个)
# data = data.decode(encoding="utf-8")
html = etree.HTML(data)
#获取对应节点内容 如获取一个id为content的div的内容区的内容
text = html.xpath("//div[@id='content']/text()")
#整理字符串格式
text = "".join(text)
#去除行
text = text.split()
#拼接
text = "\n".join(text)
print(text)
else:
print("网址有问题")