2019新年爬本小说看看

#_author_='LaoHan';
#date: 2019/2/11
# -*- coding: utf-8 -*-
from bs4 import BeautifulSoup
import requests
import codecs
def get_url_list(url):
begin_html = requests.get(url)
soup = BeautifulSoup(begin_html.content, 'lxml')
#begin_html.content如果换成begin_html.text会有乱码
#后来查了下为什么,可以参考下面这个链接
# https://blog.csdn.net/qq_38900441/article/details/79946377
url_list = []
list = soup.select("#list > dl > dd > a")
#访问主页返回信息中的所有a标签中的内容
for i in list:
i = i.get("href")
#print(i)
i = 'http://www.biqugecom.com' + i
url_list.append(i)
url_list = url_list[9:-1]
print (url_list)
return url_list
def get_data(url):
html = requests.get(url)
soup = BeautifulSoup(html.content, 'lxml')
book = codecs.open('output.txt', 'a+', 'utf-8');
# 以二进制写入章节题目 需要转换为utf-8编码,否则会出现乱码
section_name = soup.select("#wrapper > div.content_read > div > div.bookname > h1")[0].text
print (section_name)
book.write(('\r\n' + section_name + '\r\n'))
section_text = soup.select("#content")
for page in section_text:
a = page.text.replace('readx();', '').replace('www.biqugecom.com/20/20341/', '')
book.write((a)+ '\r\n')
# 以二进制写入章节内容
book.close() # 关闭小说文件
if '__main__' == __name__:
url = 'http://www.biqugecom.com/34/34055/'
url_list = get_url_list(url)
for n in url_list:
get_data (n)
posted @ 2019-02-11 16:42  谜一样的青年  阅读(260)  评论(0编辑  收藏  举报