python bs4结合数据库用法

# -*- coding:utf-8 -*-

from urllib.request import urlopen
from bs4 import BeautifulSoup as bs
import re
from pymysql import cursors
import pymysql

#请求URL并把把结果用UTF-8编码
resp=urlopen('https://en.wikipedia.org/wiki/Main_Page').read().decode('utf-8')

#使用bs解析
soup=bs(resp,'html.parser')
listUrls=soup.findAll('a',href=re.compile('^/wiki/'))


#获取所有以/wiki/开头的a标签的href属性
for url in listUrls:
    if not re.search('\.(jpg|JPG)$',url['href']):
        #string只能获取一个  get_text()获取标签下所有的文字
        print(url.get_text(),'< - - - >','https://en.wikipedia.org'+url['href'])#获取href属性

        #获取数据库连接
        conn = pymysql.Connect(
            host='localhost',
            port=3306,
            user='root',
            password='root',
            db='wikiurl',
            charset='utf8'
        )

        try:
            with conn.cursor() as cur:#用with可以自动关闭cur.close
                sql="insert into `urls`(`urlname`,`urlhref`) values(%s,%s)"
                #执行sql语句
                cur.execute(sql,(url.get_text(),'https://en.wikipedia.org'+url['href']))
                #提交sql语句
                conn.commit()
        finally:
            conn.close()

 

posted @ 2017-10-31 21:12  来呀快活吧  阅读(477)  评论(0编辑  收藏  举报
cs