第五次作业

作业1

1)、实验内容:

要求:

  • 熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
  • 使用Selenium框架爬取京东商城某类商品信息及图片。

候选网站:http://www.jd.com/

关键词:学生自由选择

输出信息:MYSQL的输出信息如下

mNo mMark mPrice mNote mFile
000001 三星Galaxy 9199.00 三星Galaxy Note20 Ultra 5G... 000001.jpg
000002......

这个代码在ppt上有,我们只是做了一个复现。

代码如下:

from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import urllib.request
import threading
import sqlite3
import os
import datetime
from selenium.webdriver.common.keys import Keys
import time
class JD:
    header = {
        "User-Agent": "Mozilla/5.0(Windows;U;Windows NT 6.0 x64;en-US;rv:1.9pre)Gecko/2008072531 Minefield/3.0.2pre"
    }
    # 图片存储的位置
    imagepath = "download"

    def startUp(self, url, key):
        chrome_options = Options()
        chrome_options.add_argument("——headless")
        chrome_options.add_argument("——disable-gpu")
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        self.threads = []
        self.No = 0
        self.imgNo = 0
        try:
            self.con = sqlite3.connect("phones.db")
            
            self.cursor = self.con.cursor()
            try:
                self.cursor.execute("drop table phones")
                
            except:
                pass
            try:
                sql = "create table phones(mNo varchar(32) primary key,mMark varchar(256),mPrice varchar(32),mNote varchar(1024),mFile varchar(256))"
                self.cursor.execute(sql)
            except:
                pass
        except Exception as err:
            print(err)
        try:
            if not os.path.exists(JD.imagepath):
                
                os.mkdir(JD.imagepath)
            images = os.listdir(JD.imagepath)
            for image in images:
                s = os.path.join(JD.imagepath, image)
                os.remove(s)
        except Exception as err:
            print(err)
        self.driver.get(url)
        keyinput = self.driver.find_element_by_id("key")
        keyinput.send_keys(key)
        keyinput.send_keys(Keys.ENTER)

    def closeUp(self):
        try:
            self.con.commit()
            self.con.close()
            self.driver.close()
        except Exception as err:
            print(err)

    def insertDB(self, mNo, mMark, mPrice, mNote, mFile):
        try:
            sql = "insert into phones (mNo,mMark,mPrice,mNote,mFile) values (?,?,?,?,?)"
            
            self.cursor.execute(sql, (mNo, mMark, mPrice, mNote, mFile))
        except Exception as err:
            print(err)

    def showDB(self):
        try:
            con = sqlite3.connect("phones.db")
            cursor = con.cursor()
            print("%-8s%-16s%-8s%-16s%s" % ("No", "Mark", "Price", "Image", "Note"))
            cursor.execute("select mNO,mMark,mPrice,mFile,mNote from phones order by mNo")
            rows = cursor.fetchall()
            for row in rows:
                print("%-8s%-16s%-8s%-16s%s" % (row[0], row[1], row[2], row[3], row[4]))
               
            con.close()
        except Exception as err:
            print(err)

    def downloadDB(self, src1, src2, mFile):
        data = None
        if src1:
            try:
                req = urllib.request.Request(src1, headers=JD.header)
                resp = urllib.request.urlopen(req, timeout=100)
                data = resp.read()
            except:
                pass
        if not data and src2:
            try:
                req = urllib.request.Request(src2, headers=JD.header)
                resp = urllib.request.urlopen(req, timeout=100)
                data = resp.read()
            except:
                pass
        if data:
            print("download begin!", mFile)
            fobj = open(JD.imagepath + "\\" + mFile, "wb")
            fobj.write(data)
            fobj.close()
            print("download finish!", mFile)

    def processJD(self):
        time.sleep(10)
        try:
            print(self.driver.current_url)
            lis = self.driver.find_elements_by_xpath("//div[@id='J_goodsList']//li[@class='gl-item']")
            time.sleep(1)
            for li in lis:
                time.sleep(1)
                try:
                    src1 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("src")
                    time.sleep(1)
                except:
                    src1 = ""
                try:
                    src2 = li.find_element_by_xpath(".//div[@class='p-img']//a//img").get_attribute("data-lazy-img")
                    time.sleep(1)
                except:
                    src2 = ""
                try:
                    price = li.find_element_by_xpath(".//div[@class='p-price']//i").text
                    time.sleep(1)
                except:
                    price = "0"
             
                note = li.find_element_by_xpath(".//div[@class='p-name p-name-type-2']//em").text
                mark = note.split(" ")[0]
                mark = mark.replace("爱心东东\n", "")
                mark = mark.replace(",", "")
                note = note.replace("爱心东东\n", "")
                note = note.replace(",", "")
                time.sleep(1)
                self.No = self.No + 1
                no = str(self.No)
                while len(no) < 6:
                    no = "0" + no
                print(no, mark, price)
                if src1:
                    src1 = urllib.request.urljoin(self.driver.current_url, src1)
                    p = src1.rfind(".")
                    mFile = no + src1[p:]
                elif src2:
                    src2 = urllib.request.urljoin(self.driver.current_url, src2)
                    p = src2.rfind(".")
                    mFile = no + src2[p:]
                if src1 or src2:
                    T = threading.Thread(target=self.downloadDB, args=(src1, src2, mFile))
                    T.setDaemon(False)
                    T.start()
                    self.threads.append(T)
                else:
                    mFile = ""
                self.insertDB(no, mark, price, note, mFile)

        except Exception as err:
            print(err)




    def executeJD(self, url,key):
        starttime = datetime.datetime.now()
        print("starting!")
        self.startUp(url, key)
        print("processing!")
        self.processJD()
        print("closing!")
        self.closeUp()
        for t in self.threads:
            t.join()
        print("complete!")
        endtime = datetime.datetime.now()
        elapsed = (endtime - starttime).seconds
        print("Total", elapsed, "seconds elasped")


url = "https://www.jd.com"
spider = JD()
while True:
    print("1.爬取")
    print("2.显示")
    print("3.退出")
    s = input("请选择(1,2,3);")
    if s == "1":
        JD().executeJD(url,'手机')
        continue
    elif s == "2":
        JD().showDB()
        continue
    elif s == "3":
        break

实验结果:

APX1T_EAKKH4GG__V5_6_77.png

Y4RQADZP4_75PD6ZZ_N0_6N.png

2)、心得体会

这次实验是书上原封不动的实验的复现。

作业2

1)、实验内容:

要求:

  • 熟练掌握 Selenium 查找HTML元素、爬取Ajax网页数据、等待HTML元素等内容。
  • 使用Selenium框架+ MySQL数据库存储技术路线爬取“沪深A股”、“上证A股”、“深证A股”3个板块的股票数据信息。

候选网站:东方财富网:http://quote.eastmoney.com/center/gridlist.html#hs_a_board

输出信息:MYSQL数据库存储和输出格式如下,表头应是英文命名例如:序号id,股票代码:bStockNo……,由同学们自行定义设计表头:

序号 股票代码 股票名称 最新报价 涨跌幅 涨跌额 成交量 成交额 振幅 最高 最低 今开 昨收
1 688093 N世华 28.47 62.22% 10.92 26.13万 7.6亿 22.34 32.0 28.08 30.2 17.55
2......

代码如下:

import pymysql
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
import time
from selenium.webdriver.common.by import By
import datetime



class MySpider:

    def startup(self, url):
        # 初始化谷歌浏览器
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        #初始化变量
        self.count=1
        try:
            self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd="123456", db="mydb",
                                       charset="utf8")
            self.cursor = self.con.cursor(pymysql.cursors.DictCursor)
            try:
                # 有表先删表
                self.cursor.execute("drop table stock_1")
                self.cursor.execute("drop table stock_2")
                self.cursor.execute("drop table stock_3")
            except:
                pass

            try:
                # 建立新的表
                sql_1 = "create table stock_1 (number varchar(256) , code varchar(256), name varchar(256), the_latest_price varchar(256), price_limit varchar(256), change_amount varchar(256), trading_volume varchar(256), turnover varchar(256), swing varchar(256),tallest varchar(256),lowest varchar(256),today_open varchar(256),yesterday_get varchar(256))"
                sql_2 = "create table stock_2 (number varchar(256) , code varchar(256), name varchar(256), the_latest_price varchar(256), price_limit varchar(256), change_amount varchar(256), trading_volume varchar(256), turnover varchar(256), swing varchar(256),tallest varchar(256),lowest varchar(256),today_open varchar(256),yesterday_get varchar(256))"
                sql_3 = "create table stock_3 (number varchar(256) , code varchar(256), name varchar(256), the_latest_price varchar(256), price_limit varchar(256), change_amount varchar(256), trading_volume varchar(256), turnover varchar(256), swing varchar(256),tallest varchar(256),lowest varchar(256),today_open varchar(256),yesterday_get varchar(256))"
                self.cursor.execute(sql_1)
                self.cursor.execute(sql_2)
                self.cursor.execute(sql_3)
            except:
                pass
        except Exception as err:
            print(err)
        self.driver.get(url)

    def closeup(self):
        try:
            # 关闭数据库、断开与谷歌浏览器连接
            self.con.commit()
            self.con.close()
            self.driver.close()
        except Exception as err:
            print(err)
    #插入方法
    def insertdb(self,table,mnumber, mcode, mname, mthe_latest_price, mprice_limit, mchange_amount, mtrading_volume, mturnover, mswing, mtallest,mlowest, mtoday_open, myesterday_get):
        try:
            sql = "insert into " + table + "(number, code, name, the_latest_price, price_limit, change_amount, trading_volume, turnover, swing, tallest,lowest, today_open, yesterday_get) values (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
            self.cursor.execute(sql, (mnumber, mcode, mname, mthe_latest_price, mprice_limit, mchange_amount, mtrading_volume, mturnover, mswing, mtallest,mlowest, mtoday_open, myesterday_get))
            print("成功插入")
        except Exception as err:
            print("插入失败")
            print(err)
    #爬虫的process
    def processspider(self):
        try:
            #判断当前有无元素Located
            WebDriverWait(self.driver, 1000).until(EC.presence_of_all_elements_located((By.XPATH, "//table[@id='table_wrapper-table']/tbody/tr")))
            #获取信息
            trs = self.driver.find_elements_by_xpath("//table[@id='table_wrapper-table']/tbody/tr")
            time.sleep(1)
            for tr in trs:
                time.sleep(1)
                number = tr.find_element_by_xpath("./td[position()=1]").text
                while len(number) < 6:
                    number = "0" + number
                code = tr.find_element_by_xpath("./td[position()=2]/a").text
                name = tr.find_element_by_xpath("./td[position()=3]/a").text
                the_latest_price = tr.find_element_by_xpath("./td[position()=5]/span").text
                price_limit = tr.find_element_by_xpath("./td[position()=6]/span").text
                change_amount = tr.find_element_by_xpath("./td[position()=7]/span").text
                trading_volume = tr.find_element_by_xpath("./td[position()=8]").text
                turnover = tr.find_element_by_xpath("./td[position()=9]").text
                swing = tr.find_element_by_xpath("./td[position()=10]").text
                tallest = tr.find_element_by_xpath("./td[position()=11]/span").text
                lowest = tr.find_element_by_xpath("./td[position()=12]/span").text
                today_open = tr.find_element_by_xpath("./td[position()=13]/span").text
                yesterday_get = tr.find_element_by_xpath("./td[position()=14]").text
                time.sleep(1)
                print(number, code, name, the_latest_price, price_limit, change_amount, trading_volume, turnover, swing, tallest,lowest, today_open, yesterday_get)
                # 判断板块并写入数据
                if self.count == 1:
                    self.insertdb("stock_1", number, code, name, the_latest_price, price_limit, change_amount, trading_volume, turnover, swing, tallest,lowest, today_open, yesterday_get)
                elif self.count == 2:
                    self.insertdb("stock_2", number, code, name, the_latest_price, price_limit, change_amount, trading_volume, turnover, swing, tallest,lowest, today_open, yesterday_get)
                elif self.count == 3:
                    self.insertdb("stock_3", number, code, name, the_latest_price, price_limit, change_amount, trading_volume, turnover, swing, tallest,lowest, today_open, yesterday_get)


        except Exception as err:
            print(err)

    def executespider(self, url):
        list = ["hs", "sh", "sz"]
        starttime = datetime.datetime.now()
        print("Spider starting......")
        self.startup(url)
        print("Spider processing......")
        print(1)
        print()
        #模拟点击翻页至下一板块
        while self.count < 4:
            print(self.count)
            tiihi = self.driver.find_element_by_xpath(
                "//div/ul[@class='tab-list clearfix']/li[@id='nav_" + list[self.count - 1] + "_a_board']/a")
            print(tiihi)
            print(list[self.count - 1])
            self.driver.execute_script('arguments[0].click()', tiihi)
            time.sleep(3)
            WebDriverWait(self.driver, 1000).until(
                EC.presence_of_element_located((By.XPATH, "//span[@class='paginate_page']/a[last()]")))
            self.processspider()
            self.count = self.count + 1
        print("Spider closing......")
        self.closeup()
        print("Spider completed......")
        endtime = datetime.datetime.now()  # 计算爬虫耗时
        elapsed = (endtime - starttime).seconds
        print("Total ", elapsed, " seconds elapsed")


url = "http://quote.eastmoney.com/center/gridlist.html#hs_a_board"
spider = MySpider()
while True:
    print("1.爬取")
    print("2.退出")
    s = input("请选择(1,2):")
    if s == "1":
        spider.executespider(url)
        continue
    elif s == "2":
        break

实验结果:

python工作台截图:

JTERRDOOPI0YBLN0_HCWAGT.png

56ZOBB_JQ_J__L@0U2Q_4CH.png

53L__CJIKXR5E_0_42_J_R3.png

JZ449~5__9@39BT_QB_RJ4Q.png

C9BAVIHU_K3_K_Q`OS9_117.png

2T_F_JGUFM_2_7_EX4_57YG.png

2)、心得体会

这题其实就是在作业一代码的框架下,改一改代码,第一次了解到selenium提供的脚本方法,使用起来很有意思,可以更自动化的爬取一些想要的内容,其他的都还好。

作业3

1)、实验内容:

要求:

  • 熟练掌握 Selenium 查找HTML元素、实现用户模拟登录、爬取Ajax网页数据、等待HTML元素等内容。
  • 使用Selenium框架+MySQL爬取中国mooc网课程资源信息(课程号、课程名称、学校名称、主讲教师、团队成员、参加人数、课程进度、课程简介)

候选网站:中国mooc网:https://www.icourse163.org

输出信息:MYSQL数据库存储和输出格式

Id cCourse cCollege cTeacher cTeam cCount cProcess cBrief
1 Python数据分析与展示 北京理工大学 嵩天 嵩天 470 2020年11月17日 ~ 2020年12月29日 “我们正步入一个数据或许比软件更重要的新时代。——Tim O'Reilly” 运用数据是精准刻画事物、呈现发展规律的主要手段,分析数据展示规律,把思想变得更精细! ——“弹指之间·享受创新”,通过8周学习,你将掌握利用Python语言表示、清洗、统计和展示数据的能力。
2......

代码如下:

import pymysql
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.wait import WebDriverWait
import time
from selenium.webdriver.common.by import By
import datetime
from selenium.webdriver.common.keys import Keys


class MySpider:

    def startup(self, url):
        # 初始化谷歌浏览器
        chrome_options = Options()
        chrome_options.add_argument('--headless')
        chrome_options.add_argument('--disable-gpu')
        self.driver = webdriver.Chrome(chrome_options=chrome_options)
        self.count=0
        try:
            self.con = pymysql.connect(host="127.0.0.1", port=3306, user="root", passwd="123456", db="mydb",
                                       charset="utf8")
            self.cursor = self.con.cursor(pymysql.cursors.DictCursor)
            try:
                # 如果有表就删除
                self.cursor.execute("drop table mooc")
            except:
                pass

            try:
                # 建立新的表
                sql_1 = "create table mooc (Id varchar(2048) , cCourse varchar(2048), cCollege varchar(2048), cTeacher varchar(2048), cTeam varchar(2048), cProcess varchar(2048), cBrief text)"
                self.cursor.execute(sql_1)
            except:
                pass
        except Exception as err:
            print(err)
        self.driver.get(url)
        key="时尚"
        input = self.driver.find_element_by_xpath("//div[@class='web-nav-right-part']//div[@class='u-baseinputui']//input")
        input.send_keys(key)  # 输入关键字
        input.send_keys(Keys.ENTER)

    def closeup(self):
        try:
            # 关闭数据库、断开与谷歌浏览器连接
            self.con.commit()
            self.con.close()
            self.driver.close()
        except Exception as err:
            print(err)

    def insertdb(self,id, course, college, Teacher, Team, Process, Brief):
        try:
            sql = "insert into mooc (Id, cCourse, cCollege, cTeacher, cTeam, cProcess, cBrief) values (%s,%s,%s,%s,%s,%s,%s)"
            self.cursor.execute(sql, (id, course, college, Teacher, Team,Process, Brief))
            print("成功插入")
        except Exception as err:
            print("插入失败")
            print(err)

    def processspider(self):
        try:
            search_handle = self.driver.current_window_handle
            i=5
            #判断当前有无元素Located
            WebDriverWait(self.driver, 1000).until(EC.presence_of_all_elements_located((By.XPATH, "//div[@class='m-course-list']/div/div[@class]")))
            spans = self.driver.find_elements_by_xpath("//div[@class='m-course-list']/div/div[@class]")
            while i>=5:
                for span in spans:
                    self.count=self.count+1
                    course = span.find_element_by_xpath(".//div[@class='t1 f-f0 f-cb first-row']/a/span").text
                    college = span.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0']/a[@class='t21 f-fc9']").text
                    teacher = span.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0']/a[@class='f-fc9']").text
                    team = span.find_element_by_xpath(".//div[@class='t2 f-fc3 f-nowrp f-f0 margin-top0']/span[@class='hot']").text
                    process = span.find_element_by_xpath(".//span[@class='txt']").text
                    brief = span.find_element_by_xpath(".//span[@class='p5 brief f-ib f-f0 f-cb']").text
                    print(self.count,course,college,teacher,team,process,brief)  # 爬取之后输出到控制台
                    self.insertdb(self.count,course,college,teacher,team,process,brief)
                i=i-1
        except Exception as err:
            print(err)

    def executespider(self,url,key):
        starttime = datetime.datetime.now()
        print("Spider starting......")
        self.startup(url)
        print("Spider processing......")
        print(1)
        print()
        self.processspider()
        print("Spider closing......")
        self.closeup()
        print("Spider completed......")
        endtime = datetime.datetime.now()  # 计算爬虫耗时
        elapsed = (endtime - starttime).seconds
        print("Total ", elapsed, " seconds elapsed")


url = "https://www.icourse163.org/"
spider = MySpider()
while True:
    print("1.爬取")
    print("2.退出")
    s = input("请选择(1,2):")
    if s == "1":
        spider.executespider(url,key="时尚")
        continue
    elif s == "2":
        break

实验结果:

P5_~O_DHBL__KT6U1CKEO`F.png

52YJP@FDTV9AT_UWN_BOO_O.png

2)、心得体会

这个作业的代码在第一题和第二题的基础上做了改动,就很顺利的得到结果了。

posted on 2020-11-21 18:20  zxh2001p  阅读(80)  评论(0编辑  收藏  举报