学习爬虫day04

XPath的使用

XPath选择即为XML路径语言(XML Path Language),它是一种用来确定XML文档中某部分位置的语言

语法简单介绍

1 nodename 选取此节点的所有子节点
2 / 从根节点选取
3 // 从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置
4 . 选取当前节点
5 .. 选取当前节点的父节点
6 @ 选取属性

语法小练习

doc = '''
<html>
 <head>
  <base href='http://example.com/' />
  <title>Example website</title>
 </head>
 <body>
  <div id='images'>
   <a href='image1.html' id='id_a'>Name: My image 1 <br/><img src='image1_thumb.jpg' /></a>
   <a href='image2.html'>Name: My image 2 <br /><img src='image2_thumb.jpg' /></a>
   <a href='image3.html'>Name: My image 3 <br /><img src='image3_thumb.jpg' /></a>
   <a href='image4.html'>Name: My image 4 <br /><img src='image4_thumb.jpg' /></a>
   <a href='image5.html' class='li li-item' name='items'>Name: My image 5 <br /><img src='image5_thumb.jpg' /></a>
   <a href='image6.html' name='items'><span><h5>test</h5></span>Name: My image 6 <br /><img src='image6_thumb.jpg' /></a>
  </div>
 </body>
</html>
'''
from lxml import etree

html = etree.HTML(doc)
# print(html)  # Element html at 0x1c118f51700>
"""// 从匹配选择的当前节点选择文档中的节点,而不考虑它们的位置 """

# 1 所有节点
a = html.xpath('//*')
print(a)  # [<Element html at 0x240605f2640>, <Element head at 0x240606be6c0>, <Element base at 0x240606be740>, <Element title at 0x240606be640>, <Element body at 0x240606be780>, <Element div at 0x240606be800>, <Element a at 0x240606be840>, <Element br at 0x240606be880>, <Element img at 0x240606be8c0>, <Element a at 0x240606be7c0>, <Element br at 0x240606be900>, <Element img at 0x240606be940>, <Element a at 0x240606be980>, <Element br at 0x240606be9c0>, <Element img at 0x240606bea00>, <Element a at 0x240606bea40>, <Element br at 0x240606bea80>, <Element img at 0x240606beac0>, <Element a at 0x240606beb00>, <Element br at 0x240606beb40>, <Element img at 0x240606beb80>, <Element a at 0x240606bebc0>, <Element span at 0x240606bec00>, <Element h5 at 0x240606bec40>, <Element br at 0x240606bec80>, <Element img at 0x240606becc0>]

# 2 指定节点
a = html.xpath('//head')
print(a)  # [<Element head at 0x2c22154e9c0>]

# 3 子节点,子孙节点
a = html.xpath('//div/a')
print(a)  # [<Element a at 0x1ad4e96e7c0>, <Element a at 0x1ad4e96e840>, <Element a at 0x1ad4e96e740>, <Element a at 0x1ad4e96e880>, <Element a at 0x1ad4e96e8c0>, <Element a at 0x1ad4e96e940>]

# 4 父节点
a = html.xpath('//body//a[@href="image1.html"]/..')
print(a)

# 5 属性匹配
a = html.xpath('//body//a[@href="image1.html"]')
print(a)  # [<Element a at 0x1fb1d83e900>]

# 6 文本获取
a = html.xpath('//body//a[@href="image1.html"]/text()')
print(a)

# 7 属性获取
a = html.xpath('//body//a/@href')
print(a)  # ['image1.html', 'image2.html', 'image3.html', 'image4.html', 'image5.html', 'image6.html']

# 8 属性多只匹配
""" a 标签有多个class类,直接匹配就不可以了,需要用contains"""
a = html.xpath('//body//a[@class="li"]')
print(a)  # []
b = html.xpath('//body//a[contains(@class, "li")]')
print(b)  # [<Element a at 0x16b087de880>]
c = html.xpath('//body//a[contains(@class, "li")]/text()')
print(c)  # ['Name: My image 5 ']

# 9 多属性匹配
a = html.xpath('//body//a[contains(@class,"li") or @name="items"]')
print(a)  # [<Element a at 0x1b0b27fe940>, <Element a at 0x1b0b27fe9c0>]
b = html.xpath('//body//a[contains(@class,"li") or @name="items"]/text()')
print(b)  # ['Name: My image 5 ', 'Name: My image 6 ']

# 10 按序选择
a = html.xpath('//a[2]/text()')
print(a)  # ['Name: My image 2 ']
# 取最后一个
b = html.xpath('//a[last()]/@href')
print(b)  # ['image6.html']
# 位置小于3的
c = html.xpath('//a[position()<3]/@href')
print(c)  # ['image1.html', 'image2.html']
# 倒数第二个
d = html.xpath('//a[last()-2]/@href')
print(d)

# 11 节点轴选择
# ancestor:子项节点
a = html.xpath('//a/ancestor::*')  # * 获取了所有祖先节点
print(a)  # [<Element html at 0x29bddba1ac0>, <Element body at 0x29bddc7e840>, <Element div at 0x29bddc7e8c0>]
# 获取祖先节点中的div
b = html.xpath('//a/ancestor::div')
print(b)  # [<Element div at 0x1b3597ee8c0>]
# attribute:属性值
a = html.xpath('//a[1]/attribute::*')
print(a)  # ['image1.html', 'id_a']
# child:直接子节点
c = html.xpath('//a[1]/child::*')
print(c)  # [<Element br at 0x17fcde8e980>, <Element img at 0x17fcde8e900>]
# descendant:所有子孙节点
d = html.xpath('//a[6]/descendant::*')
print(d)  # [<Element span at 0x1ba30bdec00>, <Element h5 at 0x1ba30bdec80>, <Element br at 0x1ba30bdecc0>, <Element img at 0x1ba30bded00>]
# following:当前节点之后所有节点
a = html.xpath('//a[1]/following::*')
print(a)  # [<Element a at 0x1cdcde5ea00>, <Element br at 0x1cdcde5e900>, <Element img at 0x1cdcde5ea40>, <Element a at 0x1cdcde5ea80>, <Element br at 0x1cdcde5eac0>, <Element img at 0x1cdcde5eb40>, <Element a at 0x1cdcde5eb80>, <Element br at 0x1cdcde5ebc0>, <Element img at 0x1cdcde5ec00>, <Element a at 0x1cdcde5eb00>, <Element br at 0x1cdcde5ec40>, <Element img at 0x1cdcde5ec80>, <Element a at 0x1cdcde5ecc0>, <Element span at 0x1cdcde5ed00>, <Element h5 at 0x1cdcde5ed40>, <Element br at 0x1cdcde5ed80>, <Element img at 0x1cdcde5edc0>]
# following-sibling:当前节点之后同级节点
b = html.xpath('//a[1]/following-sibling::*')
print(b)  # [<Element a at 0x1e08d6ae800>, <Element a at 0x1e08d6ae880>, <Element a at 0x1e08d6ae980>, <Element a at 0x1e08d6ae900>, <Element a at 0x1e08d6aeac0>]

selenium动作链

主要用处(案例)

"""滑动验证码"""

"""两种形式"""
# 1 方式一
actions=ActionChains(bro) #拿到动作链对象
actions.drag_and_drop(sourse,target) #把动作放到动作链中,准备串行执行
actions.perform()
# 2 方式二
ActionChains(bro).click_and_hold(sourse).perform()
distance=target.location['x']-sourse.location['x']
track=0
while track < distance:
    ActionChains(bro).move_by_offset(xoffset=2,yoffset=0).perform()
    track+=2

案例:自动登录12306

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
from selenium.webdriver import ActionChains
from selenium.webdriver.chrome.options import Options

# 去掉自动化控制的提示
options = Options()
options.add_argument("--disable-blink-features=AutomationControlled")
# 加载驱动
bro = webdriver.Chrome(executable_path='./chromedriver.exe', options=options)
# 向12306发送get请求
bro.get('https://kyfw.12306.cn/otn/resources/login.html')
# 屏幕最大化
bro.maximize_window()
# 隐式等待
bro.implicitly_wait(10)
try:
    username = bro.find_element(by=By.ID, value='J-userName')
    username.send_keys('')  # 用户的用户名
    password = bro.find_element(by=By.ID, value='J-password')
    password.send_keys('')  # 用户的密码
    time.sleep(3)
    btn = bro.find_element(by=By.ID, value='J-login')
    btn.click()
    span = bro.find_element(by=By.ID, value='nc_1_n1z')
    # 鼠标点主
    ActionChains(bro).click_and_hold(span).perform()
    # 滑动
    ActionChains(bro).move_by_offset(xoffset=300, yoffset=0).perform()
    time.sleep(5)

except Exception as e:
    print(e)
finally:
    bro.close()

打码平台的使用

我们把验证码图片发给第三方,第三方帮咱们解决,我们只需要用钱就可以了

import time

from selenium import webdriver
from selenium.webdriver.common.by import By
from chaojiying import ChaojiyingClient
from PIL import Image

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.chaojiying.com/apiuser/login/')
bro.implicitly_wait(5)
bro.maximize_window()
try:
    username = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[1]/input')
    password = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[2]/input')
    code = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[3]/input')
    btn = bro.find_element(by=By.XPATH, value='/html/body/div[3]/div/div[3]/div[1]/form/p[4]/input')
    username.send_keys('')
    password.send_keys('')
    # 获取验证码:
    # 1 整个页面截图
    bro.save_screenshot('main.png')
    # 2 从整个页面中截取出验证码的图片
    img = bro.find_element(By.XPATH, '/html/body/div[3]/div/div[3]/div[1]/form/div/img')
    location = img.location
    size = img.size
    print(location)
    print(size)
    # 使用pillow扣除大图中的验证码
    img_photo = (
        int(location['x']), int(location['y']), int(location['x'] + size['width']), int(location['y'] + size['height']))
    # 扣除验证码
    # 打开
    img = Image.open('./main.png')
    # 抠图
    fram = img.crop(img_photo)
    # 截出来的小图
    fram.save('code.png')
    # 使用超级鹰破解
    cjy = ChaojiyingClient('', '', '')
    local_img = open('code.png', 'rb').read()
    print(cjy.PostPic(local_img, 1902))  # 1902 验证码类型 官方网站
    res_code = cjy.PostPic(local_img, 1902)['pic_str']
    code.send_keys(res_code)
    time.sleep(5)
    btn.click()
    time.sleep(10)
except Exception as e:
    print(e)
finally:
    bro.close()

爬取京东商品信息

from selenium import webdriver
from selenium.webdriver.common.by import By  # 按照什么方式查找,By.ID,By.CSS_SELECTOR
import time
from selenium.webdriver.common.keys import Keys


def get_goods(driver):
    try:
        goods = driver.find_elements(by=By.CLASS_NAME, value='gl-item')
        for good in goods:
            name = good.find_element(by=By.CSS_SELECTOR, value='.p-name em').text
            price = good.find_element(by=By.CSS_SELECTOR, value='.p-price i').text
            commit = good.find_element(by=By.CSS_SELECTOR, value='.p-commit a').text
            url = good.find_element(by=By.CSS_SELECTOR, value='.p-name a').get_attribute('href')
            img = good.find_element(by=By.CSS_SELECTOR, value='.p-img img').get_attribute('src')
            if not img:
                # 图片懒加载
                img = 'https:' + good.find_element(by=By.CSS_SELECTOR, value='.p-img img').get_attribute(
                    'data-lazy-img')
            print('''
                商品名字:%s
                商品价格:%s
                商品链接:%s
                商品图片:%s
                商品评论:%s
                ''' % (name, price, url, img, commit))
        button = driver.find_element(by=By.PARTIAL_LINK_TEXT, value='下一页')
        button.click()
        time.sleep(1)
        get_goods(driver)

    except Exception as e:
        print(e)


def spider(url, keyword):
    driver = webdriver.Chrome(executable_path='./chromedriver.exe')
    driver.get(url)
    driver.implicitly_wait(10)  # 使用隐式等待
    try:
        # 京东的输入框就得id为key
        input_tag = driver.find_element(by=By.ID, value='key')
        # 输入搜索关键字
        input_tag.send_keys(keyword)
        # 敲一个回车
        input_tag.send_keys(Keys.ENTER)
        # 递归
        get_goods(driver)
    finally:
        driver.close()


if __name__ == '__main__':
    spider('https://www.jd.com/', keyword='香奈儿')

scrapy介绍

# scrapy爬虫框架,做爬虫用的东西,都封装好了,只需要在固定的位置写固定的代码即可

# 号称 爬虫界的django

# 是一个开源和协作的框架,其最初是为了页面抓取所设计的,使用它可以以快速、简单、可扩展的方式从网站中提取所需的数据,但是目前Scrapy的用途十分官方,可用于数据挖掘、检测和自动化测试等领域,也可以应用在获取API所返回的数据或者通用的网络爬虫

使用scrapy

"""安装scrapy"""
   mac,linux: pip3 install scrapy
   win:pip3 insatll scrapy
"""安装失败"""
   1 pip3 install wheel #安装后,便支持通过wheel文件安装软件   xx.whl
   2 pip3 install lxml
   3 pip3 install pyopenssl
   4 下载并安装pywin32:https://sourceforge.net/projects/pywin32/files/pywin32/
   5 下载twisted的wheel文件:http://www.lfd.uci.edu/~gohlke/pythonlibs/#twisted
   6 执行pip3 install 下载目录\Twisted-17.9.0-cp36-cp36m-win_amd64.whl
   7 pip3 install scrapy

爬虫项目准备工作

创建

scrapy startproject myfirstscrapy

创建爬虫

scrapy genspider cnblogs www.cnblogs.com

启动爬虫

scrapy crawl cnblogs --nolog

pycharm中运行

新建run.py
from scrapy.cmdline import execute
execute(['scrapy', 'crawl', 'cnblogs','--nolog'])

posted @   张张张张冉  阅读(26)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
点击右上角即可分享
微信分享提示