selenium的基本使用

selenium的基本使用

bs4搜索文档树

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

soup = BeautifulSoup(html_doc, 'lxml')

# 搜索文档树  find:找一个     find_all:找所有

# 5 种搜索方式: 字符串、正则表达式、列表、True、方法

# 5.1 字符串:可以按照标签名,属性名查找
# res=soup.find(name='a',id='link2')
# res=soup.find(href='http://example.com/tillie')
# res=soup.find(class_='story')
# res=soup.body.find('p')
# res=soup.body.find(string='Elsie')
# res=soup.find(attrs={'class':'sister'})
# print(res) #


# 5.2 正则表达式  标签名,属性可以使用正则匹配
# import re
# # res=soup.find_all(name=re.compile('^b'))
# # res=soup.find_all(href=re.compile('^http'))
# # for item in res:
# #     url=item.attrs.get('href')
# #     print(url)
# # request-html    获取到页面中所有的链接地址
# res=soup.find(attrs={'href':re.compile('^a')})

# print(res)


# 5.3 列表  标签名,属性名  等于列表  或条件
# res=soup.find_all(class_=['story','sister'])  # 或条件
# res=soup.find_all(name=['a','p'])  # 或条件
# print(res)


## 5.4 True  标签名,属性名  等于布尔
# res = soup.find_all(name=True)  # 有标签名的所有标签
# print(res)

# 拿出页面中所有图片
# res = soup.find_all(src=True)
# for item in res:
#     url = item.attrs.get('href')
#     print(url)



# 5.5 方法  标签名或属性名 = 方法
# def has_class_but_no_id(tag):
#     return tag.has_attr('class') and not tag.has_attr('id')
#
# print(soup.find_all(has_class_but_no_id))



'''
# 总结:
    1  find和find_all
    2  5 种搜索方法
    3  结合遍历文档树一起使用,提交查询速度
'''


#### 其他 find_all的其他属性   limit    recursive:False,只找一层
# res=soup.find_all(name='a',limit=2)   # find的本质是find_all + limit=1
#
# res=soup.body.find(name='p',id=False).find_all(name='a',recursive=False)
#
# print(res)


## 修改文档树:bbs,删除script标签

css选择器

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
<p id="my p" class="title">asdfasdf<b id="bbb" class="boldest">The Dormouse's story</b>
</p>

<p class="story">Once upon a time there were three little sisters; and their names were
<a href="http://example.com/elsie" class="sister" id="link1">Elsie</a>,
<a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
<a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</p>

<p class="story">...</p>
"""

soup = BeautifulSoup(html_doc, 'lxml')
# res=soup.select('a')
# res=soup.select('#link1')
# res=soup.select('.sister')
# res=soup.select('body>p>a')
# 只需要会了css选择,几乎所有的解析器[bs4,lxml...],都会支持css和xpath


# res=soup.select('body>p>a:nth-child(2)')
# res=soup.select('body>p>a:nth-last-child(1)')

# [attribute=value]
res=soup.select('a[href="http://example.com/tillie"]')
print(res)


'''
记住的:
    1  标签名
    2  .类名
    3  #id号
    4 body a   body下子子孙孙中得a
    5 body>a  body下子的a,没有孙
    6 其他的参照css选择器
    
    
'''

selenium基本使用

# requests 发送http请求获取数据,获取数据是xml使用bs4解析,解析出咱么想要的数据
	-使用requests获取回来的数据,跟直接在浏览器中看到的数据,可能不一样
    -requests不能执行js
    -如果使用requets,需要分析当次请求发出了多少请求,每个都要发送一次,才能拼凑出网页完整的数据
    
    
    
# selenium 操作浏览器,控制浏览器,模拟人的行为
# 人为点:功能测试
# 自动化测试(接口测试,压力测试),网站,认为点,脚本    appnium
# 测试开发 
selenium最初是一个自动化测试工具,而爬虫中使用它主要是为了解决requests无法直接执行JavaScript代码的问题
selenium本质是通过驱动浏览器,完全模拟浏览器的操作,比如跳转、输入、点击、下拉等,来拿到网页渲染之后的结果,可支持多种浏览器



# 使用:
	-安装模块:pip3 install selenium
    -下载浏览器驱动:selenium操作浏览器,需要有浏览器(谷歌浏览器),谷歌浏览器驱动
    	-https://registry.npmmirror.com/binary.html?path=chromedriver/
        -浏览器版本对应的驱动
        106.0.5249.119    找到相应的驱动
        
    -写代码测试
    from selenium import webdriver
    import time

    # 驱动放到环境变量中,就不用传这个参数了
    # 打开一个浏览器
    bro = webdriver.Chrome(executable_path='./chromedriver.exe')
    # 在地址栏输入 网站
    bro.get('http://www.baidu.com')

    time.sleep(3)
    bro.close()  # 关闭tab页
    bro.quit()  # 关闭浏览器
    
    
    
    
# rpa:自动化流程机器人,认为做的体力活

无界面浏览器

from selenium import webdriver
import time
from selenium.webdriver.chrome.options import Options

# 驱动放到环境变量中,就不用传这个参数了
# 打开一个浏览器
chrome_options = Options()
# chrome_options.add_argument('window-size=1920x3000')  # 指定浏览器分辨率
# chrome_options.add_argument('--disable-gpu')  # 谷歌文档提到需要加上这个属性来规避bug
# chrome_options.add_argument('--hide-scrollbars')  # 隐藏滚动条, 应对一些特殊页面
# chrome_options.add_argument('blink-settings=imagesEnabled=false')  # 不加载图片, 提升速度
chrome_options.add_argument('--headless')  # 浏览器不提供可视化页面. linux下如果系统不支持可视化不加这条会启动失败
# chrome_options.binary_location = r"C:\Program Files (x86)\Google\Chrome\Application\chrome.exe"  # 手动指定使用的浏览器位置
bro = webdriver.Chrome(executable_path='./chromedriver.exe', options=chrome_options)


# 在地址栏输入 网站
bro.get('https://www.jd.com/')
print(bro.page_source) # 浏览器中看到的页面的内容

time.sleep(3)
bro.close()  # 关闭tab页
bro.quit()  # 关闭浏览器

selenium其他用法

  • 自动登录百度

from selenium import webdriver
import time
from selenium.webdriver.common.by import By
from selenium.webdriver.chrome.options import Options
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
bro.implicitly_wait(10)  # 等待,找一个标签页,如果标签没加爱出来,等一会儿,如果等待时间过后还是没有,直接报错
bro.maximize_window()  # 全屏
a = bro.find_element(by=By.LINK_TEXT,value='登录')
a.click()
time.sleep(1)
# 页面中id唯一,如果有id,那么优先使用id字段
input_name = bro.find_element(by=By.ID,value='TANGRAM__PSP_11__userName')
input_name.send_keys('18235592827')
time.sleep(1)
input_password = bro.find_element(by=By.ID,value='TANGRAM__PSP_11__password')
input_password.send_keys('jsoeph520')
time.sleep(10)
input_submit = bro.find_element(by=By.ID,value='TANGRAM__PSP_11__submit')
input_submit.click()
time.sleep(5)
bro.close()
# bro.quit()
  • 获取位置属性,大小,文本

# # 查找标签
# bro.find_element(by=By.ID,value='id号')
# bro.find_element(by=By.LINK_TEXT,value='a标签文本内容')
# bro.find_element(by=By.PARTIAL_LINK_TEXT,value='a标签文本内容模糊匹配')
# bro.find_element(by=By.CLASS_NAME,value='类名')
# bro.find_element(by=By.TAG_NAME,value='标签名')
# bro.find_element(by=By.NAME,value='属性name')
# # -----通用的----
# bro.find_element(by=By.CSS_SELECTOR,value='css选择器')
# bro.find_element(by=By.XPATH,value='xpath选择器')
#
# # 获取标签位置,大小
# print(code.location)
# print(code.size)
# -------
# print(code.tag_name)
# print(code.id)


from selenium import webdriver
from selenium.webdriver.common.by import By
import base64
import time
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://kyfw.12306.cn/otn/resources/login.html')
bro.implicitly_wait(10)
bro.maximize_window()
a = bro.find_element(by=By.LINK_TEXT,value='扫码登录')
a.click()
time.sleep(3)
code = bro.find_element(by=By.CSS_SELECTOR,value='#J-qrImg')
print(code.location)
print(code.size)
print(code.id)  # 不是标签的id号
print(code.tag_name)  # 是标签的名字
s = code.get_attribute('src')
print(s)
with open('code.png','wb')as f:
    res = base64.b64decode(s.split(',')[-1])
    f.write(res)

time.sleep(3)
bro.close()
  • 等待元素被加载

# 点击
# 标签.send_click()
# input写文字
# 标签.send_keys('文字')
# input清空文字
# 标签.clear()

# 模拟键盘操作
from selenium import webdriver
import time
from selenium.webdriver.common.by import By
bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('http://www.baidu.com')
bro.implicitly_wait(10)  # 等待,找一个标签页,如果标签没加爱出来,等一会儿,如果等待时间过后还是没有,直接报错
bro.maximize_window()  # 全屏
a = bro.find_element(by=By.LINK_TEXT,value='登录')
a.click()
time.sleep(1)
input_search = bro.find_element(by=By.ID,value='TANGRAM__PSP_11__submit')
input_search.click()

from selenium.webdriver.common.keys import Keys
input_search.send_keys(Keys.ENTER)

time.sleep(3)
bro.close()
  • 元素操作

# 点击
标签.click()
# input写文字
标签.send_keys('文字')
#input清空文字
标签.clear()

# 模拟键盘操作
from selenium.webdriver.common.keys import Keys
input_search.send_keys(Keys.ENTER)
  • 执行js代码

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.jd.com/')
# 1.打印cookie
# res = bro.execute_script('alert(document.cookie)')
# print(res)
# time.sleep(10)
# bro.close()


for i in range(30):
    y = 400*i
    bro.execute_script('scrollTo(0,%s)'%y)
    time.sleep(0.3)

bro.execute_script('scrollTo(0,document.body.scrollHeight)')

time.sleep(3)
bro.close()

  • 切换选项卡

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys


bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.implicitly_wait(10)  # 等待,找一个标签页,如果标签没加爱出来,等一会儿,如果等待时间过后还是没有,直接报错
bro.maximize_window()  # 全屏
bro.get('https://www.jd.com/')

time.sleep(2)
bro.execute_script('window.open()')
bro.switch_to.window(bro.window_handles[1])
bro.get('https://www.bilibili.com/')
time.sleep(2)
bro.switch_to.window(bro.window_handles[0])
time.sleep(1)
bro.switch_to.window(bro.window_handles[1])
time.sleep(1)
bro.switch_to.window(bro.window_handles[0])
time.sleep(1)
time.sleep(3)
bro.close()
bro.quit()
  • 浏览器前进后退

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys


bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.implicitly_wait(10)  # 等待,找一个标签页,如果标签没加爱出来,等一会儿,如果等待时间过后还是没有,直接报错
bro.maximize_window()  # 全屏
bro.get('https://www.jd.com/')


time.sleep(2)
bro.get('https://www.taobao.com/')
time.sleep(2)
bro.get('https://www.bilibili.com/')
time.sleep(2)
bro.get('https://www.jd.com/')
bro.back()
time.sleep(1)
bro.back()
time.sleep(1)
bro.forward()
time.sleep(1)
bro.back()
time.sleep(3)
bro.close()

  • 异常处理

import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys


from selenium.common.exceptions import TimeoutException,NoSuchElementException,NoSuchFrameException
try:
    bro = webdriver.Chrome(executable_path='./chromedriver.exe')
    bro.implicitly_wait(10)  # 等待,找一个标签页,如果标签没加爱出来,等一会儿,如果等待时间过后还是没有,直接报错
    bro.maximize_window()  # 全屏
    bro.get('https://www.jd.com/')
except Exception as e:
    print(e)

finally:
    bro.close()

selenium登录cnblogs获取cookie


import time
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
import json

bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.implicitly_wait(10)
bro.maximize_window()  # 全屏
bro.get('https://www.cnblogs.com/')
try:
    # 找到登录按钮
    submit_btn = bro.find_element(by=By.LINK_TEXT,value='登录')
    submit_btn.click()
    time.sleep(1)
    username = bro.find_element(by=By.ID,value='mat-input-0')
    password = bro.find_element(by=By.ID,value='mat-input-1')
    username.send_keys('Joseph-bright')
    password.send_keys('joseph520520')
    submit = bro.find_element(By.CSS_SELECTOR,value='body > app-root > app-sign-in-layout > div > div > app-sign-in > app-content-container > div > div > div > form > div > button')
    time.sleep(3)
    submit.click()
    # input()
    cookie = bro.get_cookies()
    print(cookie)
    with open('cnblogs.json','w',encoding='utf-8')as f:
        json.dump(cookie,f)
    # time.sleep(5)
except Exception as e:
    print(e)

finally:
    bro.close()


bro = webdriver.Chrome(executable_path='./chromedriver.exe')
bro.get('https://www.cnblogs.com')
bro.implicitly_wait(10)
time.sleep(3)
with open('cnblogs.json','r',encoding='utf-8')as f:
    cookie = json.load(f)

for item in cookie:
    bro.add_cookie(item)

bro.refresh()
time.sleep(3)
bro.close()

抽屉半自动点赞

from selenium import webdriver
from selenium.webdriver.common.by import By
import time
import json
import requests

try:
    bro = webdriver.Chrome(executable_path='./chromedriver.exe')
    bro.implicitly_wait(10)
    bro.maximize_window()  # 全屏
    bro.get('https://dig.chouti.com/')
    submit = bro.find_element(by=By.ID, value='login_btn')
    bro.execute_script('arguments[0].click()', submit)
    time.sleep(2)
    username = bro.find_element(by=By.NAME, value='phone')
    username.send_keys('18235592827')
    username = bro.find_element(by=By.NAME, value='password')
    username.send_keys('joseph520')
    time.sleep(3)
    submit_button = bro.find_element(By.CSS_SELECTOR,
                                     'body > div.login-dialog.dialog.animated2.scaleIn > div > div.login-footer > div:nth-child(4) > button')
    submit_button.click()

    # 验证码
    input()
    cookie = bro.get_cookies()
    print(cookie)
    with open('chouti.json', 'w', encoding='utf-8') as f:
        json.dump(cookie, f)

    # 找出所有文章的id号
    div_list = bro.find_elements(By.CLASS_NAME, 'link-item')
    l = []
    for div in div_list:
        article_id = div.get_attribute('data-id')
        l.append(article_id)



except Exception as e:
    print(e)

finally:
    bro.close()

#  继续往下写,selenium完成它的任务了,登录---》拿到cookie,使用requests发送[点赞]

print(l)

with open('chouti.json', 'r', encoding='utf-8')as f:
    cookie = json.load(f)
# 小细节,selenium的cookie不能直接给request用,需要有些处理
request_cookies = {}
for item in cookie:
    request_cookies[item['name']] = item['value']
print(request_cookies)
header = {
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/106.0.0.0 Safari/537.36'
}
for i in l:
    data = {
        'linkId': i
    }
    res = requests.post('https://dig.chouti.com/link/vote', data=data, headers=header, cookies=request_cookies)
    print(res.text)

posted @ 2022-11-25 20:02  Joseph-bright  阅读(97)  评论(0编辑  收藏  举报