数据采集第五次作业
作业一
熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
使用Selenium框架爬取京东商城某类商品信息及图片。
代码:
`
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import urllib.request
import threading
import sqlite3
import os
import datetime
from selenium.webdriver.common.keys import Keys
import time
class MySpider:
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36"}
imagePath = "download"
def startUp(self, url, key):
# Initializing Chrome browser
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(chrome_options=chrome_options)
# Initializing variables
self.threads = []
self.No = 0
self.imgNo = 0
# Initializing database
try:
self.con = sqlite3.connect("phones.db")
self.cursor = self.con.cursor()
try:
# 如果有表就删除
self.cursor.execute("drop table phones")
except:
pass
try:
# 建立新的表
sql = "create table phones (mNo varchar(32) primary key, mMark varchar(256),mPrice varchar(32),mNote varchar(1024),mFile varchar(256))"
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
# Initializing images folder
try:
if not os.path.exists(MySpider.imagePath):
os.mkdir(MySpider.imagePath)
images = os.listdir(MySpider.imagePath)
for img in images:
s = os.path.join(MySpider.imagePath, img)
os.remove(s)
except Exception as err:
print(err)
self.driver.get(url)
keyInput = self.driver.find_element_by_id("key")
keyInput.send_keys(key)
keyInput.send_keys(Keys.ENTER)
def closeUp(self):
try:
self.con.commit()
self.con.close()
self.driver.close()
except Exception as err:
print(err)
def insertDB(self, mNo, mMark, mPrice, mNote, mFile):
try:
sql = "insert into phones (mNo,mMark,mPrice,mNote,mFile) values (?,?,?,?,?)"
self.cursor.execute(sql, (mNo, mMark, mPrice, mNote, mFile))
except Exception as err:
print(err)
def showDB(self):
try:
con = sqlite3.connect("phones.db")
cursor = con.cursor()
print("%-8s%-16s%-8s%-16s%s" % ("No", "Mark", "Price", "Image", "Note"))
cursor.execute("select mNo,mMark,mPrice,mFile,mNote from phones order by mNo")
rows = cursor.fetchall()
for row in rows:
print("%-8s %-16s %-8s %-16s %s" % (row[0], row[1], row[2], row[3], row[4]))
con.close()
except Exception as err:
print(err)
def download(self, src1, src2, mFile):
data = None
if src1:
try:
req = urllib.request.Request(src1, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if not data and src2:
try:
req = urllib.request.Request(src2, headers=MySpider.headers)
resp = urllib.request.urlopen(req, timeout=10)
data = resp.read()
except:
pass
if data:
print("download begin", mFile)
fobj = open(MySpider.imagePath + "\\" + mFile, "wb")
fobj.write(data)
fobj.close()
print("download finish", mFile)
def processSpider(self):
try:
time.sleep(1)
print(self.driver.current_url)
lis = self.driver.find_elements_by_xpath("//div[@id='J_goodsList']//li[@class='gl-item']")
for li in lis:
# We find that the image is either in src or in data-lazy-img attribute
try:
src1 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("src")
except:
src1 = ""
try:
src2 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("data-lazy-img")
except:
src2 = ""
try:
price = li.find_element_by_xpath(".//div[@class='p-price']//i").text
except:
price = "0"
try:
note = li.find_element_by_xpath(".//div[@class='p-name p-name-type-2']//a//em").text
mark = note.split(" ")[0]
mark = mark.replace("爱心东东\n", "")
mark = mark.replace(",", "")
note = note.replace("爱心东东\n", "")
note = note.replace(",", "")
except:
note = ""
mark = ""
src2 = ""
self.No = self.No + 1
no = str(self.No)
while len(no) < 6:
no = "0" + no
print(no, mark, price)
if src1:
src1 = urllib.request.urljoin(self.driver.current_url, src1)
p = src1.rfind(".")
mFile = no + src1[p:]
elif src2:
src2 = urllib.request.urljoin(self.driver.current_url, src2)
p = src2.rfind(".")
mFile = no + src2[p:]
if src1 or src2:
T = threading.Thread(target=self.download, args=(src1, src2, mFile))
T.setDaemon(False)
T.start()
self.threads.append(T)
else:
mFile = ""
self.insertDB(no, mark, price, note, mFile)
try:
self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-prev disabled']")
except:
nextPage = self.driver.find_element_by_xpath("//span[@class='p-num']//a[@class='pn-next']")
time.sleep(10)
nextPage.click()
self.processSpider()
except Exception as err:
print(err)
def executeSpider(self, url, key):
starttime = datetime.datetime.now()
print("Spider starting......")
self.startUp(url, key)
print("Spider processing......")
self.processSpider()
print("Spider closing......")
self.closeUp()
for t in self.threads:
t.join()
print("Spider completed......")
endtime = datetime.datetime.now()
elapsed = (endtime - starttime).seconds
print("Total ", elapsed, " seconds elapsed")
url = "https://search.jd.com/Search?keyword=%E6%89%8B%E6%9C%BA&enc=utf-8&wq=%E6%89%8B%E6%9C%BA&pvid=3b49f8ac7fda4e90be468e2e23de7bfc"
spider = MySpider()
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.executeSpider(url, "手机")
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break
`
运行结果:
心得体会:
这次是复现代码,通过分析案例,掌握了selenium和xpath的配合使用。刚开始还是遇见了点小问题,主要是代码的格式搞错了,有几行代码缩进不对,所以浪费了很多时间。好像每次我都会遇到这种不大但是也不小,解决不了代码就不能运行的问题,头疼。
作业二
熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
使用Selenium框架+ MySQL数据库存储技术路线爬取“沪深A股”、“上证A股”、“深证A股”3个板块的股票数据信息。
代码:
`
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import sqlite3
class stocks:
encoding = 'utf-8'
def startUp(self, url):
chrome_options = Options()
chrome_options.add_argument("——headless")
chrome_options.add_argument("——disable-gpu")
self.driver = webdriver.Chrome(chrome_options=chrome_options)
self.no = 0
try:
self.con = sqlite3.connect("st.db")
self.cursor = self.con.cursor()
try:
self.cursor.execute("drop table stocks")
except:
pass
# 创建stocks表
try:
sql = "create table stocks(no varchar(256) ,num varchar(256),name varchar(256),np varchar(256)," \
"zdnum varchar(256),zdp varchar(256),cjnum varchar(256),cjp varchar(256),ud varchar(256)," \
"high varchar(256),low varchar(256),today varchar(256),yest varchar(256)) "
self.cursor.execute(sql)
except:
pass
except Exception as err:
print(err)
self.driver.get(url)
def closeUp(self):
try:
self.con.commit()
self.con.close()
self.driver.close()
except Exception as err:
print(err)
#将爬取到的数据插入到数据库中的表中
def insertDB(self, no, num, name, np, zdnum, zdp, cjnum, cjp, ud, high, low, today, yest):
try:
sql = "insert into stocks (no,num,name,np,zdnum,zdp,cjnum,cjp,ud,high,low,today,yest) values (?,?,?,?,?," \
"?,?,?,?,?,?,?,?) "
self.cursor.execute(sql, (no, num, name, np, zdnum, zdp, cjnum, cjp, ud, high, low, today, yest))
except Exception as err:
print(err)
def showDB(self):
try:
con = sqlite3.connect("st.db")
cursor = con.cursor()
print("序号", "代码", "名称", "最新价", "涨跌幅", "涨跌额", "成交量", "成交额", "振幅", "最高", "最低", "今开", "昨收")
cursor.execute(
# 获取数据
"select no,num,name,np,zdnum,zdp,cjnum,cjp,ud,high,low,today,yest from stocks order by no")
rows = cursor.fetchall()
#逐行输出到控制台
for row in rows:
print(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10], row[11],
row[12])
con.close()
except Exception as err:
print(err)
def execute(self, url):
print("Starting......")
self.startUp(url)
print("Processing......")
self.processSpider()
print("Closing......")
self.closeUp()
print("Completed......")
def processSpider(self):
try:
lis = self.driver.find_elements_by_xpath("//table[@id='table_wrapper-table']/tbody/tr") #开始爬取内容,要获取的东西都在这个路径下
for li in lis:
num = li.find_element_by_xpath(".//td[position()=2]/a[@href]").text
name = li.find_element_by_xpath(".//td[@class='mywidth']/a[@href]").text
np = li.find_element_by_xpath(".//td[position()=5]").text
zdnum = li.find_element_by_xpath(".//td[position()=6]/span").text
zdp = li.find_element_by_xpath(".//td[position()=7]/span").text
cjnum = li.find_element_by_xpath(".//td[position()=8]").text
cjp = li.find_element_by_xpath(".//td[position()=9]").text
ud = li.find_element_by_xpath(".//td[position()=10]").text
high = li.find_element_by_xpath(".//td[position()=11]/span").text
low = li.find_element_by_xpath(".//td[position()=12]/span").text
today = li.find_element_by_xpath(".//td[position()=13]/span").text
yest = li.find_element_by_xpath(".//td[position()=14]").text
self.no = self.no + 1
no = self.no
#执行函数,插入到数据库中
self.insertDB(no, num, name, np, zdnum, zdp, cjnum, cjp, ud, high, low, today, yest)
except Exception as err:
print(err)
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171"
}
#指定url,开始爬取
url = "http://quote.eastmoney.com/center/gridlist.html#hs_a_board"
spider = stocks()
while True:
print("1.爬取")
print("2.显示")
print("3.退出")
s = input("请选择(1,2,3):")
if s == "1":
spider.execute(url)
continue
elif s == "2":
spider.showDB()
continue
elif s == "3":
break
`
运行结果:
心得:
这次是想着按照案例的思路来解决的,没想到遇到了编码问题
控制台输出正常,但是数据库中的文件出现了乱码,网页的charset显示是utf8,改成utf8还是不行,改成utf32也没有成功。经过同学提醒,又安装了Navicat,打开生成的db文件,格式正常。可能是Navicat for MySQL软件的功能不够完善?有待进一步探讨。
作业三
熟练掌握 Selenium 查找HTML元素、实现用户模拟登录、爬取Ajax网页数据、等待HTML元素等内容。
使用Selenium框架+MySQL爬取中国mooc网课程资源信息(课程号、课程名称、学校名称、主讲教师、团队成员、参加人数、课程进度、课程简介)
代码:
`
import pymysql
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
class course:
def startUp(self, url):
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument('--disable-gpu')
self.driver = webdriver.Chrome(options=chrome_options)
# 连接到数据库
try:
self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd='123456', db="mydb",charset="utf8")
self.cursor = self.con.cursor(pymysql.cursors.DictCursor)
self.cursor.execute("delete from courses")
self.opened = True
except Exception as err:
print(err)
self.opened = False
self.driver.get(url)
self.no = 1
def closeUp(self):
if self.opened:
self.con.commit()
self.con.close()
self.opened = False
self.driver.close()
print("closed")
def processSpider(self):
try:
lis = self.driver.find_elements_by_xpath("//div[@class='m-course-list']/div/div[@class]") #所有要爬取的内容都在这一框架下
for li in lis:
name = li.find_element_by_xpath(".//div[@class='t1 f-f0 f-cb first-row']/a/span").text
school = li.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0']/a[@class='t21 f-fc9']").text
author = li.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0']/a[@class='f-fc9']").text
member = li.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0 margin-top0']/span[@class='hot']").text
process = li.find_element_by_xpath(".//span[@class='txt']").text
description = li.find_element_by_xpath(".//span[@class='p5 brief f-ib f-f0 f-cb']").text
print(self.no,name, school, author, member, process, description) #爬取之后输出到控制台
if self.opened:
self.cursor.execute(
"insert into courses(num,name, school, author, member, process, description)"
"values(%s, %s, %s, %s, %s, %s, %s)",
(str(self.no),name, school, author, member, process, description)) #插入到数据库中
self.no += 1
except Exception as err:
print(err)
def executeSpider(self, url):
print("Spider starting......")
self.startUp(url)
print("Spider processing......")
self.processSpider()
print("Spider closing......")
self.closeUp()
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.87 Safari/537.36 SLBrowser/6.0.1.9171 "
}
#开始爬取
url = 'https://www.icourse163.org/search.htm?search=%E7%B4%A0%E6%8F%8F#/'
spider = course()
spider.executeSpider(url)
`
运行结果:
心得
有了实验二的教训,实验三我采用了比较保守的爬取方法,先建立数据库还有表,把数据爬取后,在插入到相应的表中,这样就顺利很多。