爬小说

最近发现了一部好看的小说,爬下来试试看,这个脚本并不适用于所有的网页,但根据具体内容稍加修改后大部分网页还是可以用的

#!/usr/bin/python
# Author:Playon
# -*- coding:utf-8 -*-
# Time:2020/8/18 9:57

import requests
from bs4 import BeautifulSoup
import re,time,random

def finder(url,book):
    """
    爬字符
    :param url: 起始地址url
    :param book: 保存的文件名称
    :return:
    """
    title = []
    with open(book,'r',encoding='utf-8')as f:
        rec=re.compile('第\d+章.*?')
        for t in f.readlines():
            t=t.strip()
            if re.findall(rec,t):
                if t not in title:
                    title.append(t)
    user_agent_list = [
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/68.0.3440.106 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36",
        "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.186 Safari/537.36",
        "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.62 Safari/537.36",
        "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/45.0.2454.101 Safari/537.36",
        "Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0)",
        "Mozilla/5.0 (Macintosh; U; PPC Mac OS X 10.5; en-US; rv:1.9.2.15) Gecko/20110303 Firefox/3.6.15",
        ]
    heard = random.choice(user_agent_list)

    headers = {
        'User-Agent': heard,
        # 'Referer': 'Referer',
        # 'Connection': 'close',
    }
    res = requests.get(url, headers=headers).content.decode('gbk','ignore')
    soup = BeautifulSoup(res, 'html.parser')

    # title
    bookname = soup.find('div', attrs={'class': 'bookname'})
    bookname = soup.h1.text
    bookname =re.sub('正文 |全部章节 ','',bookname)
    # print(bookname)
    if bookname.lstrip() not in title:

        # content
        content = soup.find('div', attrs={'class': 'box_con'}).stripped_strings
        # for i in content:
        #     print(i)
        with open(book,'a',encoding='utf-8')as f:
            # rec=re.compile('\([2,3]/\d\)')
            rec1=re.compile('\(\d/\d\)|正文 |全部章节 ')
            # if not re.findall(rec,bookname):
            bookname=re.sub(rec1,'',bookname)
            f.write('\n'+ bookname.title() +'\n')

            rec2=re.compile('^textselect|大神小说网|^>|^投推荐票|^[上下]一章|^加入书签|^荣耀巅峰|^←|^→|^章节目录|^推荐各位书友|\(.*?\)   |^正文|^第\d+章|^全部章节')
            for i in content:
                if not re.findall(rec2, i):
                    # i=re.sub(rec4,'',i.rstrip())
                    f.write(re.sub('Kpl','KPL',i.title() +'\n'))

        print(url)

    # Next page
    next_page = 'http://www.dashenxiaoshuo.com' + soup.find('div', attrs={'class': 'bottem1'}).a.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling.next_sibling['href']

    rec = re.compile('\d+\.html$')
    if re.findall(rec,next_page):
        # print(next_page)
        finder(next_page,book)
    else:
        # print(next_page)
        return 'End'
url="http://www.dashenxiaoshuo.com/html/32/32313/15350782.html"
book='../booklist/rydf.txt'
finder(url,book)

自己用的时候需要修改下url和book的存放地址。

posted @ 2020-12-02 10:10  PlayOn  阅读(470)  评论(0编辑  收藏  举报