爬虫 爬小说

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import requests as r
import re,encodings
import time
from lxml import etree
def pa( url,name):
    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'
    }
    z = r.get(url, headers = headers)
    z.encoding = 'UTF-8'
    html = etree.HTML(z.text)
    # 查找章节名字 
    zhangjie = html.xpath('//*[@id="wrapper"]/div[3]/div/div[2]/h1/text()')[0]
 
    print(zhangjie)
    # xpath 查找小说内容
    content = html.xpath('//*[@id="content"]/text()')
    content = '\n'.join(content)
    with open(name, 'a+', encoding="UTF-8") as txt:
        txt.write(zhangjie + "\n")
        txt.write(content)
        print(zhangjie + ":\t写入成功")
 
 
 
if __name__ == '__main__':
    mulu_url = 'http://www.yuetutu.com/cbook_22694/'
    'User-Agent: Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.61 Safari/537.36'
    s = r.get(mulu_url)
    s.encoding = 'utf-8'
    html = etree.HTML(s.text)
    text = s.text
    name = (re.search('<h1>(.*?)</h1>',text)).group()
    name = (name.replace("<h1>",'')).replace('</h1>','')
    name = "./%s.txt"%name
    mulu = html.xpath('//*[@id="list"]/dl/dd/a/@href')
 
    print(name)
    print(mulu)
    b = 1;
    for i in mulu:
        if b > 8 :
            pa('http://www.yuetutu.com'+i, name)
        b= 1+b
        time.sleep(1)

  

posted @   TTTTTTTAO  阅读(212)  评论(0编辑  收藏  举报
编辑推荐:
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
阅读排行:
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· Docker 太简单,K8s 太复杂?w7panel 让容器管理更轻松!
点击右上角即可分享
微信分享提示