##第一步 导包
from bs4 import BeautifulSoup
import requests
import sys
##准备
class downloder(object):
def __init__(self):
self.server = 'http://www.biqukan.com'
self.target = 'http://www.biqukan.com/1_1094/'
self.names = [] #存放章节名字
self.urls = [] #存放章节链接
self.nums = 0 # 章节数量
def get_download_url(self):
req = requests.get(url=self.target)
html = req.text
div_bf = BeautifulSoup(html)
div = div_bf.find_all('div',class_='listmain')
a_bf = BeautifulSoup(str(div[0]))
a = a_bf.find_all('a')
self.nums = len(a[15:])
for eatch in a[15:]:
self.names.append(eatch.string)
self.urls.append(self.server +eatch.get('href'))
def writer(self ,name,path,text):
write_flag = True
with open(path,'a',encoding='utf-8') as f:
f.write(name +'\n')
f.writelines(text)
f.writelines('\n\n')
def get_contents(self,target):
req = requests.get(url=target)
html = req.text
bf = BeautifulSoup(html)
texts = bf.find_all('div',class_ = 'showtxt')
texts = texts[0].text.replace( '\xa0'*8,'\n\n')
return texts
if __name__ == '__main__':
dl = downloder()
dl.get_download_url()
print('开始下载')
for i in range(dl.nums):
dl.writer(dl.names[i],'用点.txt',dl.get_contents(dl.urls[i]))
print("下载完成")
参考华哥的内容... 还有好多不懂
http://cuijiahua.com/blog/2017/10/spider_tutorial_1.html
年与时驰,意与日去,遂成枯落,
多不接世,悲守穷庐,将复何及。