Python 爬取《国王排名》漫画

最近在B站迷上了一部动漫,也是目前比较火的动漫之一《国王排名》
因为...

额(⊙o⊙)…一周才更一集,目前才更到第八集,等不住后面的剧情了,所以想找找现成的动漫看看O(∩_∩)O~

因为涉及到JS解密,个人对这方面的信息爬取还是不够熟练,所以参考了CSDN上的一篇文章,写的还是挺不错的,稍有基础的基本上都能够看懂!
点击下面链接即可跳转:
Python 爬取漫画以及JS解析

下面的代码是我参照了上面链接文章的JS解密写的代码,主要针对《国王排名》漫画的爬取下载,仅供参考!

import requests
import re
import os
import execjs
from pyquery import PyQuery

headers = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
}
url = 'http://www.dm5.com/manhua-guowangpaiming/'


def getOne(url):
    """获取漫画章节"""
    url_One = []
    html = PyQuery(requests.get(url, headers=headers).content.decode('utf-8'))
    id_url = html("#detail-list-select-1 li a")
    for i in id_url.items():
        url_One.append([i.text(), 'http://www.dm5.com' + i.attr("href")])
    url_One.reverse()
    return url_One


def getTwo(chapters_url):
    """解析漫画"""
    pathOne = os.getcwd() + r'\国王排名'
    if not os.path.exists(pathOne):
        os.mkdir(pathOne)
    # 获取漫画的必要参数
    for chapter_name, chapter_url in chapters_url:
        print(f"开始下载 >> {chapter_name} << ")
        pathTwo = pathOne + '\\' + chapter_name
        if not os.path.exists(pathTwo):
            os.mkdir(pathTwo)
        response = requests.get(chapter_url)
        print(chapter_url)
        text = response.text
        cid = re.findall('var DM5_CID=(.*?);', text)[0].strip()
        mid = re.findall('var DM5_MID=(.*?);', text)[0].strip()
        dt = re.findall('var DM5_VIEWSIGN_DT="(.*?)";', text)[0].strip()
        sign = re.findall('var DM5_VIEWSIGN="(.*?)";', text)[0].strip()
        page_count = int(re.findall('var DM5_IMAGE_COUNT=(.*?);', text)[0].strip())
        # print(cid, mid, dt, sign, page_count)
        page = 1
        while page <= page_count:
            js_api = f'{chapter_url}chapterfun.ashx?cid={cid}&page={page}&key=&language=1&gtk=6&_cid={cid}&_mid={mid}&_dt={dt}&_sign={sign}'
            ret = requests.get(js_api, headers={'referer': 'http://www.dm5.com'})
            js_code = ret.text
            image_url = execjs.eval(js_code)
            img_url = image_url[0]
            try:
                with open(f'{pathTwo}\\{page}.jpg', 'wb') as f:
                    f.write(requests.get(img_url).content)
                print(f"下载 {chapter_name} {page}.jpg......")
            except Exception as e:
                print(f'{chapter_name} {page}下载失败:{e}')
            page += 1


def main():
    urls_one = getOne(url)
    getTwo(urls_one)


if __name__ == '__main__':
    main()
posted @ 2021-12-05 00:12  槑孒  阅读(179)  评论(0编辑  收藏  举报