爬博客

爬取博客并保存到数据库

import requests
import pymysql
from bs4 import BeautifulSoup

# 输入博客网址
url = "https://www.cnblogs.com/java-six/default.html?page="

header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}

# 连接数据库
conn = pymysql.connect(host='192.168.43.162', user='root', password='heziqaz1234', database='ssmbuild', charset='utf8', port=3306)
# 创建游标对象
cursor = conn.cursor()
# 按条件将数据插入数据库
sql = 'INSERT INTO blogs(date, title, link) select %s,%s,%s from DUAL where not exists (select * from blogs where link = %s)'
pageindex=0
while(1):
    pageindex = pageindex+1
    nowurl = url+str(pageindex)
    response = requests.get(nowurl,headers=header)
    print(nowurl)
    print("This is "+str(pageindex)+"page")
    if(response.status_code!=200):
        print("The status_code is "+response.status_code)
        break

    soup = BeautifulSoup(response.text,'html.parser')
    # 获得该页的博客数量
    resSize = len(soup.find_all('div','post'))
    arr = soup.find_all('div','post')

    # print(arr[1].span.string)

    for  i in range(resSize):
        # print("++++++"+str(arr[i].div.a.span.string)+"+++++")
        # .strip() 能消除字符串中的空格等特殊字符
        date = str(arr[i].find_all('div','postFoot')[0].text.strip())
        # get date of this blog
        date = date[9:19]
        title = str(arr[i].div.a.span.string).strip()
        print(title+"++++++++++++++")
        link = str(arr[i].div.a['href'])
        data = [date, title,link,link]
        response = requests.get(link,headers=header)
        soup = BeautifulSoup(response.text,'html.parser')
        #prettify() get html file
        html = soup.find('div','post').prettify()
        # 执行sql语句插入数据库
        cursor.execute(sql, data)
        conn.commit()
    # print(soup.find_all('a','postTitle2')).

# 关闭游标
cursor.close()
# 关闭连接
conn.close()

爬取博客保存到word或pdf中

import requests
from bs4 import BeautifulSoup
import pypandoc
import pdfkit

home = input("请输入博客网址:")
# 输入博客网址
url = home+"/default.html?page="
header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}
pageindex=0
# 博客下下标
blogsnum = 0
while(1):
    pageindex = pageindex+1
    nowurl = url+str(pageindex)
    response = requests.get(nowurl,headers=header)
    print(nowurl)
    print("This is "+str(pageindex)+"page")

    soup = BeautifulSoup(response.text,'html.parser')
    # 获得该页的博客数量
    resSize = len(soup.find_all('div','post'))
    arr = soup.find_all('div','post')
    if(resSize==0):
        break
    # print(arr[1].span.string)

    for  i in range(resSize):
        # print("++++++"+str(arr[i].div.a.span.string)+"+++++")
        # .strip() 能消除字符串中的空格等特殊字符
        date = str(arr[i].find_all('div','postFoot')[0].text.strip())
        # get date of this blog
        date = date[9:19]
        title = str(arr[i].div.a.span.string).strip()
        print(title+"++++++++++++++")
        link = str(arr[i].div.a['href'])
        data = [date, title,link,link]
        response = requests.get(link,headers=header)
        soup = BeautifulSoup(response.text,'html.parser')
        #prettify() get html file
        html = soup.find('div','post').prettify()
        # print(str(html)+"--------")
        # 延时5秒
        # time.sleep( 5 )
        # convert_file('原文件','目标格式','目标文件') Save to word

        # 生成word版文件
        docoutput = pypandoc.convert_text(html, 'docx','html', outputfile="blogs-"+str(blogsnum)+".docx")
        # 生成pdf版文件,指定格式为utf-8,否则会中文乱码
        pdfkit_options = {'encoding': 'UTF-8'}
        pdfoutput = pdfkit.from_string(html,"blogs-"+str(blogsnum)+".pdf",options=pdfkit_options)
        blogsnum = blogsnum+1
    # print(soup.find_all('a','postTitle2')).

大杂烩

import requests
import pymysql
from bs4 import BeautifulSoup
import pypandoc
import pdfkit
import time

# 输入博客网址
url = "https://www.cnblogs.com/java-six/default.html?page="

header={
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/103.0.0.0 Safari/537.36'
}

# 连接数据库
conn = pymysql.connect(host='192.168.43.162', user='root', password='heziqaz1234', database='ssmbuild', charset='utf8', port=3306)
# 创建游标对象
cursor = conn.cursor()
# 按条件将数据插入数据库
sql = 'INSERT INTO blogs(date, title, link) select %s,%s,%s from DUAL where not exists (select * from blogs where link = %s)'
pageindex=0
# 博客下下标
blogsnum = 0
while(1):
    pageindex = pageindex+1
    nowurl = url+str(pageindex)
    response = requests.get(nowurl,headers=header)
    print(nowurl)
    print("This is "+str(pageindex)+"page")
    if(response.status_code!=200):
        print("The status_code is "+response.status_code)
        break

    soup = BeautifulSoup(response.text,'html.parser')
    # 获得该页的博客数量
    resSize = len(soup.find_all('div','post'))
    arr = soup.find_all('div','post')

    # print(arr[1].span.string)

    for  i in range(resSize):
        # print("++++++"+str(arr[i].div.a.span.string)+"+++++")
        # .strip() 能消除字符串中的空格等特殊字符
        date = str(arr[i].find_all('div','postFoot')[0].text.strip())
        # get date of this blog
        date = date[9:19]
        title = str(arr[i].div.a.span.string).strip()
        print(title+"++++++++++++++")
        link = str(arr[i].div.a['href'])
        data = [date, title,link,link]
        response = requests.get(link,headers=header)
        soup = BeautifulSoup(response.text,'html.parser')
        #prettify() get html file
        html = soup.find('div','post').prettify()
        # print(str(html)+"--------")
        # 延时5秒
        # time.sleep( 5 )
        # convert_file('原文件','目标格式','目标文件') Save to word

        # 生成word版文件
        docoutput = pypandoc.convert_text(html, 'docx','html', outputfile="blogs-"+str(blogsnum)+".docx")
        # 生成pdf版文件,指定格式为utf-8,否则会中文乱码
        pdfkit_options = {'encoding': 'UTF-8'}
        pdfoutput = pdfkit.from_string(html,"blogs-"+str(blogsnum)+".pdf",options=pdfkit_options)
        blogsnum = blogsnum+1
        # 执行sql语句插入数据库
        cursor.execute(sql, data)
        conn.commit()
    # print(soup.find_all('a','postTitle2')).

# 关闭游标
cursor.close()
# 关闭连接
conn.close()
posted @ 2022-08-02 21:14  又一岁荣枯  阅读(24)  评论(0编辑  收藏  举报