学习爬虫day03

bs4搜索文档树

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

soup = BeautifulSoup(html_doc, 'lxml')

"""
搜索文档树:
    find: 找一个
    find_all: 找所有

5种搜索方式:字符串、正则表达式、列表、True、方法
"""

1 字符串:可以按照标签名,属性名查找

'''1 字符串:可以按照标签名,属性名查找'''

res = soup.find(name='a', id='link2')
print(res)  # <a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>

res = soup.find(href='http://example.com/tillie')
print(res) # <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>

res = soup.find(class_='story')
print(res)
'''
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
'''

res = soup.body.find('p')
print(res) # <p class="title" id="my p">asdfasdf<b class="boldest" id="bbb">The Dormouse's story</b></p>

res = soup.find(attrs={'class': 'sister'})
print(res) # <a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>

2 正则表达式 标签名,属性可以使用正则表达式匹配

import re

res = soup .find_all(name=re.compile('^b'))
print(res)
''''
[<body>
<p class="title" id="my p">asdfasdf<b class="boldest" id="bbb">The Dormouse's story</b>
</p>
<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>
<p class="story">...</p>
</body>, <b class="boldest" id="bbb">The Dormouse's story</b>]
'''

res = soup.find_all(href=re.compile('^http'))
for item in res:
    url = item.attrs.get('href')
    print(url)
'''
http://example.com/elsie
http://example.com/lacie
http://example.com/tillie
'''

res = soup.find(attrs={'href': re.compile('^a')})
print(res)

3 列表 标签名,属性名 等于列表 或条件

res=soup.find_all(class_=['story','sister'])  # 或条件
'''
[<p class="story">Once upon a time there were three little sisters; and their names were
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a> and
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>,
<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>,
<a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>,
<a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>,
<p class="story">...</p>]
'''
res=soup.find_all(name=['a','p'])  # 或条件
print(res)

4 True 标签名,属性名 等于布尔

res = soup.find_all(name=True) # 有签名的所有标签
print(res)

res = soup.find_all(src=True) # 取出页面中所有图片
for item in res:
    url = item.attrs.get('href')
    print(url)

5 方法 标签名或属性名 = 方法

def has_class_but_no_id(tag):
    return tag.has_attrs('class') and not tag.has_attrs('id')
print(soup.find_all(has_class_but_no_id))

其他 find_all的其他属性

find的本质是find_all + limit=1

limit

recursive:False,只找一层

res = soup.body.find(name='p',id=False).find_all(name='a',recursive=False)
print(res)

css选择器

bs4 可以通过遍历,搜索,css选择器选择标签

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""
soup = BeautifulSoup(html_doc, 'lxml')
res = soup.select('p')
print(res)

res = soup.select('#link1')
print(res)  # [<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>]

res = soup.select('.sister')
print(res)
# [<a class="sister" href="http://example.com/elsie" id="link1">Elsie</a>, <a class="sister" href="http://example.com/lacie" id="link2">Lacie</a>, <a class="sister" href="http://example.com/tillie" id="link3">Tillie</a>]

res = soup.select('body>p>a')
print(res)

'''
只需要会了css选择,几乎所有的解析器[bs4,lxml...],都会支持css和xpath
'''

res=soup.select('body>p>a:nth-child(2)')
res=soup.select('body>p>a:nth-last-child(1)')

# [attribute=value]
res=soup.select('a[href="http://example.com/tillie"]')
print(res)

selenium基本使用

'''requests 发送http请求获取数据,获取数据是xml使用bs4解析,解析出我们想要的数据'''
    使用requests获取回来的数据,跟直接在浏览器中看到的数据,可能不一样
   requests不能执行js
   如果使用requests,需要分析当次发出了多少请求,每个都要发送一次,才能拼凑出网页完整的数据


selenium操作浏览器,控制浏览器,模拟人的行为
    人为点:功能测试
    自动化测试(接口测试、压力测试),网站,人为点,脚本,  APPnium
    测试开发

selenium最初是一个自动化测试工具,而爬出中使用它主要是为了解决requests无法直接执行JavaScript代码的问题
selenium本质是通过驱动浏览器,完全模拟浏览器的操作,比如跳转、输入、点击、下拉等,来拿到网页渲染之后的结果,可支持多种浏览器

使用

安装模块:pip install selenium

下载浏览器驱动:selenium操作浏览器,需要有浏览器(谷歌浏览器),谷歌浏览器驱动
   https://registry.npmmirror.com/binary.html?path=chromedriver/

打开浏览器3秒后关闭

from selenium import webdriver
import time
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
time.sleep(3)
bro.close()
bro.quit()

无界面浏览器

做爬虫,不希望有一个浏览器打开,谷歌支持无头浏览器,后台运行,没有浏览器的图形化(GUI)界面

from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options
chrome_options = Options()
# chrome_options.add_argument('window-size=1920x3000')  # 指定浏览器分辨率
# chrome_options.add_argument('--disable-gpu')  # 谷歌文档提到需要加上这个属性来规避bug
# chrome_options.add_argument('--hide-scrollbars')  # 隐藏滚动条, 应对一些特殊页面
# chrome_options.add_argument('blink-settings=imagesEnabled=false')  # 不加载图片, 提升速度
# chrome_options.add_argument('--headless')  # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# chrome_options.binary_location = r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"  # 手动指定使用的浏览器位置
bro = webdriver.Chrome(executable_path='./chromedriver.exe', options=chrome_options)

# 在地址栏输入网站
bro.get('http://www.jd.com/')
print(bro.page_source) # 浏览器中看到页面的内容

time.sleep(3)
# 关闭tab页面
bro.close()
# 关闭浏览器
bro.quit()

小案例:自动登录百度

from selenium import webdriver
from selenium.webdriver.common.by import By
import time

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
bro.implicitly_wait(10)
bro.maximize_window()
# 通过 a 标签文字内容查找标签的方式
a = bro.find_element(by=By.LINK_TEXT, value='登录')
# 点击标签
a.click()

# 页面中id唯一,如果有id,优先用id
input_name = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__userName')
# 输入用户名
input_name.send_keys('1990837105@qq.com')
time.sleep(2)
input_password = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__password')
input_password.send_keys('ZxR12580.')
time.sleep(2)
input_submit = bro.find_element(by=By.ID, value='TANGRAM__PSP_11__submit')
input_submit.click()
time.sleep(5)
bro.close()

获取位置属性大小,文本

# 找到扫码登录的标签,搜索标签
 bro.find_element(by=By.ID,value='id号')
 bro.find_element(by=By.LINK_TEXT,value='a标签文本内容')
 bro.find_element(by=By.PARTIAL_LINK_TEXT,value='a标签文本内容模糊匹配')
 bro.find_element(by=By.CLASS_NAME,value='类名')
 bro.find_element(by=By.TAG_NAME,value='标签名')
 bro.find_element(by=By.NAME,value='属性name')
# -----通用的----
 bro.find_element(by=By.CSS_SELECTOR,value='css选择器')
 bro.find_element(by=By.XPATH,value='xpath选择器')
 bro.implicitly_wait(10)
 bro.maximize_window()
from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import base64
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://kyfw.12306.cn/otn/resources/login.html')
# bro.get('https://www.jd.com/')

a = bro.find_element(by=By.LINK_TEXT, value='扫码登录')
# a = bro.find_element(by=By.CSS_SELECTOR, value='.login-hd-account>a')
a.click()
# code = bro.find_element(by=By.ID, value='J-qrImg')
code = bro.find_element(by=By.CSS_SELECTOR, value='#J-qrImg')
# code = bro.find_element(by=By.CSS_SELECTOR, value='.logo_scene_img')

# # 方案一:通过位置,和大小,截图截出来
print(code.id)
print(code.location)
print(code.tag_name)
print(code.size)

# # 方案二:通过src属性获取到图片
print(code.location)
print(code.size)
print(code.id)  # 不是标签的id号
print(code.tag_name)  # 是标签的名字
s = code.get_attribute('src')
print(s)
with open('code.png','wb') as f:
    res=base64.b64decode(s.split(',')[-1])
    f.write(res)

time.sleep(3)

bro.close()

等待元素被加载

代码执行的很快,有些标签还没加载出来,直接取,取不到

显示等待:一般不用,需要指定等待哪个标签,如果标签很多,每个都要设置比较麻烦

隐式等待:bro.implicityly_wait(10)
         find找标签的时候,如果找不到,等最多10s

元素操作

'''点击'''
    标签.click()

'''input写文字'''
    标签.send_keys('文字')

'''input清空文字'''
    标签.clear()

'''模拟键盘操作'''
    from selenium.webdriver.common.keys import Keys
    input_search.send_keys(Keys.ENTER)

执行js代码

弹出提示框

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')

# bro.execute_script('alert("今天也要开心鸭~")')
time.sleep(3)
bro.close()

一节一节向下滑

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')

# 滚动页面,到最底部
for i in range(10):
    y = 400*(i+1)
    bro.execute_script('scrollTo(0,%s)'%y)
    time.sleep(1)


time.sleep(3)
bro.close()

一次性滑到最底部

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')

bro.execute_script('scrollTo(0,document.body.scrollHeight)')


time.sleep(3)
bro.close()

切换选项卡

import time
from selenium import webdriver

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')
# s使用js打开新的选项卡
bro.execute_script('window.open()')
# 切换到这个选项卡上,刚刚打开的是第一个
bro.switch_to.window(bro.window_handles[1])
bro.get('https://weibo.com/')
time.sleep(2)
# 睡俩秒以后跳转到在调到开始页面
bro.switch_to.window(bro.window_handles[0])
# 睡3秒以后关闭页面
time.sleep(3)
bro.close()
bro.quit()

浏览器前进后退

import time
from selenium import webdriver

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.12306.cn/index/')
time.sleep(2)
bro.get('https://www.cnblogs.com/')
time.sleep(4)
bro.get('https://y.qq.com/?ADTAG=myqq#type=index')
# 后退一下
bro.back()  # https://www.cnblogs.com/
time.sleep(2)
# 前进一下
bro.forward()  # https://y.qq.com/?ADTAG=myqq#type=index
time.sleep(2)
bro.close()  # 关闭

异常处理

from selenium.common.exceptions import TimeoutException,NoSuchElementException,NoSuchFrameException
try:

except Exception as e:
    print(e)
finally:
    bro.close()

selenium登录cnblogs获取cookie

操作浏览器,登录成功就可以拿到登录成功的cookie,保存到本地
如果有很多小号,会有很多cookie,搭建cookie池

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import json

# 登录过程
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.cnblogs.com/')
bro.implicitly_wait(10)
try:
    # 找登录按钮
    submit_btn = bro.find_element(By.LINK_TEXT, value='登录')
    submit_btn.click()
    time.sleep(1)
    username = bro.find_element(By.ID, value='mat-input-0')
    password = bro.find_element(By.ID, value='mat-input-1')
    username.send_keys('zr_rbt@163.com')
    password.send_keys('ZxR12580.')
    submit = bro.find_element(By.CSS_SELECTOR,
                              value='body > app-root > app-sign-in-layout > div > div > app-sign-in > app-content-container > div > div > div > form > div > button')
    time.sleep(2)
    submit.click()
    # 会有验证码,滑动,手动操作完了,敲回车,程序继续往下走
    input()
    # 成功后的
    cookie = bro.get_cookies()
    print(cookie)
    with open('cnblogs.json', 'w', encoding='utf-8') as f:
        json.dump(cookie, f)
    time.sleep(5)
except Exception as e:
    print(e)
finally:
    bro.close()
# bro.refresh()
time.sleep(5)
bro.close()

获取cookies登录

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import json

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.cnblogs.com/')
bro.implicitly_wait(10)
time.sleep(3)
# 把本地的cookie写入,就登录了
with open('cnblogs.json','r',encoding='utf-8') as f:
    cookie=json.load(f)

for item in cookie:
    bro.add_cookie(item)
bro.refresh()
time.sleep(10)
bro.close()

抽屉半自动点赞

使用selenium登录到抽屉,获取到,使用requests,自动点,如果使用requests登录,非常难登录,因为有验证码

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import json
import requests

bro = webdriver.Chrome(executable_path='./chromedriver.exe')

bro.get('https://dig.chouti.com/')
bro.implicitly_wait(10)
try:
    submit = bro.find_element(by=By.ID, value='login_btn')
    bro.execute_script("arguments[0].click()", submit)
    # submit.click() # 有的页面button能找到,但是点击不了,报错,可以使用js点击它
    time.sleep(2)
    username = bro.find_element(by=By.NAME, value='phone')
    username.send_keys('18953675221')
    password = bro.find_element(by=By.NAME, value='password')
    password.send_keys('lqz123')
    time.sleep(3)
    submit_button = bro.find_element(By.CSS_SELECTOR,
                                     'body > div.login-dialog.dialog.animated2.scaleIn > div > div.login-footer > div:nth-child(4) > button')
    submit_button.click()

    # 验证码
    input()
    cookie = bro.get_cookies()
    print(cookie)
    with open('chouti.json', 'w', encoding='utf-8') as f:
        json.dump(cookie, f)

    # 找出所有文章的id号
    div_list = bro.find_elements(By.CLASS_NAME, 'link-item')
    l = []
    for div in div_list:
        article_id = div.get_attribute('data-id')
        l.append(article_id)



except Exception as e:
    print(e)

finally:
    bro.close()

#  继续往下写,selenium完成它的任务了,登录---》拿到cookie,使用requests发送[点赞]

print(l)

with open('chouti.json', 'r', encoding='utf-8')as f:
    cookie = json.load(f)
# 小细节,selenium的cookie不能直接给request用,需要有些处理
request_cookies = {}
for item in cookie:
    request_cookies[item['name']] = item['value']
print(request_cookies)
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
}
for i in l:
    data = {
        'linkId': i
    }
    res = requests.post('https://dig.chouti.com/link/vote', data=data, headers=header, cookies=request_cookies)
    print(res.text)
posted @   张张张张冉  阅读(22)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)
点击右上角即可分享
微信分享提示