Python应用之爬取一本pdf

 

爬取https://max.book118.com网站的某一本书,其实也算不上爬取,只是下载。我这个只是抛砖引玉,大神们可以写个网站整个文档的爬虫。

以这本书为列 https://max.book118.com/html/2017/0802/125615287.shtm,再加上批量img2pdf的方法,就可以下载一本书了。具体的分析过程不在此赘述,直接上代码(代码只是用于本人学习,写的有些low)

 

import requests
import os
import json
def savePng(url, fileName):
    root = "//home//Desktop//"
    path = root + "//" +  fileName
    if not os.path.exists(root):
        os.mkdir(root)
    if not os.path.exists(path):
        r = requests.get(url)
        r.raise_for_status()
    with open(path, "wb+") as f:
        f.write(r.content)  
def getPNGName(url):
    req = requests.get(url)
    json_req = req.content.decode()
    json_dict= json.loads(json_req)
    print(json_dict)
    return json_dict["NextPage"] 
def getNextPageURL(pngName):
    url = "https://view42.book118.com/pdf/GetNextPage/?f=dXAyMjI2LTIuYm9vazExOC5jb20uODBcMzQ4NDU0MS01OTgxMGI5MDMwM2JjLnBkZg==&img=%s&isMobile=false&isNet=True&readLimit=kVJSwRWfuu2BpuMVDJqlnw==&furl=o4j9ZG7fK94kkYRv4gktA2rYw4NlKHsQghNfCDpGDtCDuhClp@zqsXbBvWkfutt7oIxYGVjQwpqa2_7Y@T__cVzRwC_U6kA_a5K64MvXGRoemz@A5sruig==" % pngName
    return url
def getCurPageUrl(pngName):
    url = "https://view42.book118.com/img/?img=%s" % pngName
    return url 
 
#url = getNextPageURL("7o@o7xcocmmKnrqreQGENvFMksKYwld1WTnrOUUaeADxViDtQ3Pv9cm31oOktykHcA4m4rqRBGs=")
url = "https://view42.book118.com/pdf/GetNextPage/?f=dXAyMjI2LTIuYm9vazExOC5jb20uODBcMzQ4NDU0MS01OTgxMGI5MDMwM2JjLnBkZg==&img=7o@o7xcocmmKnrqreQGENvFMksKYwld1WTnrOUUaeADxViDtQ3Pv9R7mPNB3WAYh&isMobile=false&isNet=True&readLimit=kVJSwRWfuu2BpuMVDJqlnw==&furl=o4j9ZG7fK94kkYRv4gktA2rYw4NlKHsQghNfCDpGDtCDuhClp@zqsXbBvWkfutt7oIxYGVjQwpqa2_7Y@T__cVzRwC_U6kA_a5K64MvXGRoemz@A5sruig=="   
for curPageIndex in range(0, 486):
    #根据当前图片名字,请求下一张图片名字
    pngName = getPNGName(url)
    #根据下一张图片名字拼凑url
    url = getCurPageUrl(pngName)
    #下载PNG,记录图片名字
    savePng(url, str(curPageIndex) + ".PNG")
    #得到下一页图片url
    url = getNextPageURL(pngName)

 

posted on 2018-11-14 11:56  奔跑吧,蜗牛!  阅读(4864)  评论(0编辑  收藏  举报

导航