13天搞定Python分布爬虫(第一天、第二天)
尚学堂学习笔记
第一天:
03:
from urllib.request import urlopen response=urlopen('http://sogo.com') #注:开启Fiddler会导致请求失败 # print(response.read().decode()) print(response.geturl()) print(response.getcode()) print(response.info())
运行结果:
1 C:\Users\xiongjiawei\PycharmProjects\Spider\venv\Scripts\python.exe C:/Users/xiongjiawei/PycharmProjects/Spider/13天搞定Python分布式爬虫/v01.py 2 https://www.sogo.com/ 3 200 4 Server: nginx 5 Date: Fri, 07 Feb 2020 09:27:47 GMT 6 Content-Type: text/html; charset=utf-8 7 Transfer-Encoding: chunked 8 Connection: close 9 Vary: Accept-Encoding 10 Set-Cookie: ABTEST=0|1581067667|v17; expires=Sun, 08-Mar-20 09:27:47 GMT; path=/ 11 P3P: CP="CURa ADMa DEVa PSAo PSDo OUR BUS UNI PUR INT DEM STA PRE COM NAV OTC NOI DSP COR" 12 Set-Cookie: IPLOC=CN3201; expires=Sat, 06-Feb-21 09:27:47 GMT; domain=.sogo.com; path=/ 13 P3P: CP="CURa ADMa DEVa PSAo PSDo OUR BUS UNI PUR INT DEM STA PRE COM NAV OTC NOI DSP COR" 14 Set-Cookie: SUID=7C5114701808990A000000005E3D2D93; expires=Thu, 02-Feb-2040 09:27:47 GMT; domain=.sogo.com; path=/ 15 P3P: CP="CURa ADMa DEVa PSAo PSDo OUR BUS UNI PUR INT DEM STA PRE COM NAV OTC NOI DSP COR" 16 x-log-ext: nodejs=1 17 Set-Cookie: black_passportid=; path=/; expires=Thu, 01 Jan 1970 00:00:00 GMT; domain=.sogo.com 18 Pragma: No-cache 19 Cache-Control: max-age=0 20 Expires: Fri, 07 Feb 2020 09:27:47 GMT 21 22 23 24 Process finished with exit code 0
Request的使用:
from urllib.request import urlopen,Request request=Request('http://sogo.com') response=urlopen(request) print(response.read().decode())
headers->User-agent
from urllib.request import urlopen, Request request = Request('http://sogo.com') print(request.get_header('User-agent')) # None
伪装User-agent
1 from urllib.request import Request 2 3 url = 'http://sogo.com' 4 headers = {
# User-Agent大小写随意,底层会转换为User-Agent,建议直接写成User-Agent 5 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 6 } 7 request = Request(url, headers=headers) 8 print(request.get_header(
# 必须定成User-agent,否是获取结果为None 9 'User-agent')) # Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36
1 from urllib.request import Request, urlopen 2 3 url = 'http://sogo.com' 4 headers = { 5 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 6 } 7 request = Request(url, headers=headers) 8 9 response = urlopen(request) 10 print(response.read().decode())
随机选择User-Agent(写法一)
1 from urllib.request import Request, urlopen 2 from random import randint 3 4 url = 'http://sogo.com' 5 6 user_agents=[ 7 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36', 8 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0', 9 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360SE', 10 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362' 11 ] 12 13 headers = { 14 'User-Agent': user_agents[randint(1,len(user_agents))-1] 15 } 16 17 request = Request(url, headers=headers) 18 19 print(request.get_header('User-agent')) 20 21 response = urlopen(request) 22 print(response.read().decode())
随机选择User-Agent(写法二)
1 from urllib.request import Request, urlopen 2 from random import choice 3 4 url = 'http://sogo.com' 5 6 user_agents = [ 7 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36', 8 'Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:72.0) Gecko/20100101 Firefox/72.0', 9 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/55.0.2883.87 Safari/537.36 QIHU 360SE', 10 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.102 Safari/537.36 Edge/18.18362' 11 ] 12 13 headers = { 14 'User-Agent': choice(user_agents) 15 } 16 17 request = Request(url, headers=headers) 18 19 print(request.get_header('User-agent')) 20 21 response = urlopen(request) 22 print(response.read().decode())
fake_useragent
from fake_useragent import UserAgent # pip install fake_useragent ua = UserAgent() # 实际实验结果程序运行到此行会报错,实际到这取值https://fake-useragent.herokuapp.com/browsers/0.1.11 # print(ua.chrome)
# print(ua.random)
# print(ua.firefox) # print(ua.ie) #不建议使用IE,可能会有问题
get请求,参数中文编码:quote
1 from urllib.request import Request,urlopen 2 from urllib.parse import quote 3 4 # url='https://www.sogou.com/web?query=%E9%BB%84%E5%B1%B1' 5 url='https://www.sogou.com/web?query=' 6 print(quote('黄山')) # %E9%BB%84%E5%B1%B1 7 url=url+quote('黄山') 8 headers={ 9 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 10 } 11 12 request=Request(url,headers=headers) 13 response=urlopen(request) 14 print(response.read().decode())
get请求,参数中文编码:urlencode,当有多个参数时可能比quote更方便
1 from urllib.request import Request, urlopen 2 from urllib.parse import urlencode 3 4 # url='https://www.sogou.com/web?query=%E9%BB%84%E5%B1%B1' 5 args = { 6 'query': '黄山' 7 } 8 url = 'https://www.sogou.com/web?' 9 print(urlencode(args)) # query=%E9%BB%84%E5%B1%B1 10 url = url + urlencode(args) 11 headers = { 12 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 13 } 14 15 request = Request(url, headers=headers) 16 response = urlopen(request) 17 print(response.read().decode())
百度贴吧经典案例:抓百度贴吧前几页数据,此案例把以上知识点串起来了,简单易懂易学。
1 from urllib.request import Request, urlopen 2 from urllib.parse import urlencode 3 4 5 def getHtml(url): 6 headers = { 7 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 8 } 9 request = Request(url, headers=headers) 10 response = urlopen(request) 11 return response.read() 12 13 14 def saveHtml(filename, html): 15 with open(filename, 'wb') as f: 16 f.write(html) 17 18 19 def main(): 20 base_url = 'http://tieba.baidu.com/f?ie=utf-8&{}' 21 kw = input('请输入要搜索的内容:') 22 num = input('请输入要下载的页数:') 23 for pn in range(int(num)): 24 args = { 25 'kw': kw, 26 'pn': pn * 50 27 } 28 # print(getHtml(base_url.format(urlencode(args)))) 29 # print(base_url.format(urlencode(args))) 30 saveHtml('index' + str(pn + 1) + '.html', getHtml(base_url.format(urlencode(args)))) 31 32 33 if __name__ == '__main__': 34 main()
post请求:要勾选Preserve log,不然login请求会被冲掉。
1 from urllib.request import Request,urlopen 2 from urllib.parse import urlencode 3 4 data={ 5 'username':'1**####****', 6 'password':'******' 7 } 8 9 data=urlencode(data).encode() 10 11 headers={ 12 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36' 13 } 14 15 url='https://passport.baidu.com/v2/api/?login' 16 17 request=Request(url,data=data,headers=headers) 18 19 response=urlopen(request) 20 print(response.getcode()) 21 # print(response.read().decode())
第二天
08-ajax请求的抓取
1 from urllib.request import Request,urlopen 2 3 url='https://movie.douban.com/j/search_subjects?type=movie&tag=%E5%8D%8E%E8%AF%AD&sort=recommend&page_limit=20&page_start=20' 4 headers={ 5 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 6 } 7 8 request=Request(url,headers=headers) 9 response=urlopen(request) 10 print(response.read().decode())
from urllib.request import Request,urlopen import json url='https://movie.douban.com/j/search_subjects?type=movie&tag=%E5%8D%8E%E8%AF%AD&sort=recommend&page_limit=20&page_start={}' headers={ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' } page_start=1 while True: url=url.format(page_start*20) request=Request(url,headers=headers) response=urlopen(request) json_str_data=response.read().decode() print(type(json_str_data)) dict_data=json.loads(json_str_data) print(type(dict_data)) print(type(dict_data['subjects'])) print(len(dict_data['subjects'])) print('page_start=', page_start) if len(dict_data['subjects'])==20: page_start+=1 continue print('最终page_start=',page_start) break
09-https请求的使用
1 from urllib.request import Request, urlopen 2 import ssl 3 4 # url='https://www.12306.cn/index/' 5 url = 'https://www.12306.cn/mormhweb/' 6 headers = { 7 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 8 } 9 request = Request(url, headers=headers) 10 11 '''忽略验证证书 12 context = ssl._create_unverified_context() 13 response = urlopen(request, context=context) 14 ''' 15 16 response=urlopen(request) 17 print(response.read().decode())
10-proxy的使用:ip代理
opener的使用
1 from urllib.request import Request, build_opener 2 3 url = 'http://sogo.com' 4 headers = { # 用户代理 5 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 6 } 7 request = Request(url, headers=headers) 8 opener = build_opener() 9 response = opener.open(request) 10 print(response.read().decode())
再加个handler
1 from urllib.request import Request, build_opener,HTTPHandler 2 3 url = 'http://sogo.com' 4 headers = { # 用户代理 5 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 6 } 7 request = Request(url, headers=headers) 8 handler=HTTPHandler() 9 opener = build_opener(handler) 10 response = opener.open(request) 11 print(response.read().decode())
免费代理ip:https://www.xicidaili.com/ http://www.nimadaili.com/
收费代理ip:https://www.kuaidaili.com/
1 from urllib.request import Request,ProxyHandler,build_opener 2 3 url='http://httpbin.org/get' 4 headers={ 5 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 6 } 7 request=Request(url,headers=headers) 8 9 handler=ProxyHandler({'http':'118.212.106.29:9999'}) 10 # handler=ProxyHandler({'http':'IP:端口'}) 11 # handler=ProxyHandler({'http':'用户名:密码@IP:端口'}) 12 # handler=ProxyHandler({'http':'398707160:j8inhg2g@120.27.224.41:16818'}) 13 14 # opener=build_opener() 15 opener=build_opener(handler) 16 17 response=opener.open(request) 18 print(response.read().decode())
运行结果:
C:\Users\xiongjiawei\PycharmProjects\Spider\venv\Scripts\python.exe C:/Users/xiongjiawei/PycharmProjects/Spider/13天搞定Python分布式爬虫/第02天/v04-proxy的使用.py { "args": {}, "headers": { "Accept-Encoding": "identity", "Cache-Control": "max-age=259200", "Host": "httpbin.org", "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36", "X-Amzn-Trace-Id": "Root=1-5e3e184b-9403f1b4cd1fcab8e9904eac" }, "origin": "118.212.106.29", "url": "http://httpbin.org/get" } Process finished with exit code 0
11-cookie的使用1
不带cookie时会提示登录:
from urllib.request import Request,urlopen url='https://www.sxt.cn/profile/course' headers={ 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' } request=Request(url,headers=headers) response=urlopen(request) print(response.read().decode())
运行结果:

C:\Users\xiongjiawei\PycharmProjects\Spider\venv\Scripts\python.exe C:/Users/xiongjiawei/PycharmProjects/Spider/13天搞定Python分布式爬虫/第02天/v05-cookie的使用.py <!DOCTYPE html> <html> <head lang="en"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>登录 | 速学堂尚学堂旗下IT在线教育平台-在线培训学习视频课程</title> <meta name="keywords" content="速学堂,IT在线教育平台,在线学习" /> <meta name="description" content="速学堂云课堂为尚学堂旗下在线教育品牌,将积累13年的实体班线下课程和教学方法引到线上。您可通过速学堂IT在线教育平台培训学习Java、Python、云计算大数据、前端开发、UI设计、区块链与人工智能、软件测试与移动开发、PHP等。培训学习的课程大纲全新优化,内容有广度、有深度,顶尖讲师全程直播授课。专注整合尚学堂优势教学资源、打造适合在线学习并能保证教学结果的优质教学产品,同时打造和运营一整套教育生态软件体系,为用户提供满足自身成长和发展要求的有效服务。"/> <link rel="stylesheet" href="/public/new/css/font-awesome.min.css" type="text/css"> <link rel="stylesheet" href="/public/new/css/idangerous.swiper.css" type="text/css"> <link rel="stylesheet" href="/public/new/css/common.css?v=1.0.2" type="text/css"> <script src="/public/new/js/jquery-1.8.0.js" type="text/javascript"></script> <script src="/public/new/js/idangerous.swiper.js" type="text/javascript"></script> <script type="text/javascript" src="/public/js/aes_1.js"></script> <script type="text/javascript" src="/public/js/aes_fun.js"></script> <script src="/public/layui/layui.all.js"></script> <style type="text/css"> @media screen and (max-width:1000px) { .wap_main {width: 1200px; margin:0 auto; display:table; overflow:hidden} } </style> <script src="/public/new/js/reset.js?v=1.0.2" type="text/javascript"></script> <script type="text/javascript" src="/public/new/js/singlePoint.js?v=1.0.2"></script> </head> <body style="background: #f4f4f4"> <div class="topbar_s wap_main"> <div class="content_s clearfix"> <ul class="topbar_left fl clearfix"> <li><a href="/">首页 </a></li> <li><span>|</span></li> <li><a href="/profile/course">学习中心 </a></li> <li><span>|</span></li> <li><a href="/manual.html">在线API</a></li> </ul> <ul class="topbar_right fr clearfix"> <li><a href="/login?redirect_uri=https://www.sxt.cn/login?redirect_uri=https://www.sxt.cn/profile/course">登录</a></li> <li><span>|</span></li> <li><a href="/register?redirect_uri=https://www.sxt.cn/login?redirect_uri=https://www.sxt.cn/profile/course">注册</a></li> </ul> </div> </div> <div class="topnav_s wap_main"> <div class="content_s clearfix"> <div class="logo_s fl"> <a href="/"><img src="/public/new/images/logo.png" ></a> </div> <ul class="nav_info_s fl"> <li><a href="/category?free=yes" target="_blank">免费课</a></li> <li><a href="https://www.itbaizhan.cn" target="_blank">在线就业班</a></li> <li><a href="https://www.bjsxt.com" target="_blank">面授高薪班</a></li> <li><a href="https://www.sxt.cn/Java_jQuery_in_action/History_Direction.html" target="_self">Java实战教程系列</a></li> </ul> <div class="nav_search fr"> <input class="search_info fl" value="" /> <div class="hot_box"> <a href="/category?catId=9" target="_blank"><img src="/public/new/images/fire.png" alt="">人工智能</a> <a href="category?catId=80" target="_blank">微服务</a> </div> <i class="fa fa-search fr"></i> </div> </div> </div> <link rel="stylesheet" href="/public/new/css/login.css?v=1.0.2"/> <script src="/public/js/gt.js"></script> <script src="/public/new/js/login.js?v=1.0.2"></script> <div class="logoin wap_main"> <p>欢迎登录</p> <form action="" method="post" onsubmit="return login()"> <div class="email"> <label >账号登录:</label> <input type="text" name="user" onblur="checkloginEmail()" placeholder="请输入手机号/邮箱" /> <img src="/public/images/user.png" alt="" class="emailImg"/> </div> <div class="captcha"> <div id="embed-captcha"></div> <p id="wait" class="show">正在加载验证码......</p> <input type="hidden" name="captcha" value="0"> </div> <div class="code1"> <label>验证码:</label> <input type="text" name="code" class="code" placeholder="请输入验证码" onkeyup="checkLoginCode()" onblur="checkLoginCode()"/> <input type="button" id="btn" value="获取验证码" onclick="getLoginCode()" disabled="disabled" class="get_dxcode" /> <img src="/public/images/code.png" alt="" class="codeImg"/> </div> <input type="submit" value="登录" class="sub_login" id = "submitD" /> </form> <a class="enroll" href="/register?redirect_uri=https://www.sxt.cn/profile/course">免费注册</a> </div> </body> </html> Process finished with exit code 0
带cookie时正常访问:
1 from urllib.request import Request,urlopen 2 3 url='https://www.sxt.cn/profile/course' 4 headers={ 5 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36', 6 'Cookie':'UM_distinctid=1701f85c5db247-04eb63ce77cc27-b383f66-e1000-1701f85c5dd90; CNZZDATA1261969808=487362013-1581072764-%7C1581126778; user=a%3A2%3A%7Bs%3A2%3A%22id%22%3Bi%3A35311%3Bs%3A5%3A%22phone%22%3Bs%3A11%3A%2218555251650%22%3B%7D' 7 } 8 request=Request(url,headers=headers) 9 response=urlopen(request) 10 print(response.read().decode())
运行结果:

C:\Users\xiongjiawei\PycharmProjects\Spider\venv\Scripts\python.exe C:/Users/xiongjiawei/PycharmProjects/Spider/13天搞定Python分布式爬虫/第02天/v05-cookie的使用.py <!DOCTYPE html> <html> <head lang="en"> <meta http-equiv="Content-Type" content="text/html; charset=utf-8" /> <meta http-equiv="X-UA-Compatible" content="IE=8" /> <meta http-equiv="X-UA-Compatible" content="IE=edge"> <title>我的课程 | 速学堂尚学堂旗下IT在线教育平台-在线培训学习视频课程</title> <meta name="keywords" content="速学堂,IT在线教育平台,在线学习" /> <meta name="description" content="速学堂云课堂为尚学堂旗下在线教育品牌,将积累13年的实体班线下课程和教学方法引到线上。您可通过速学堂IT在线教育平台培训学习Java、Python、云计算大数据、前端开发、UI设计、区块链与人工智能、软件测试与移动开发、PHP等。培训学习的课程大纲全新优化,内容有广度、有深度,顶尖讲师全程直播授课。专注整合尚学堂优势教学资源、打造适合在线学习并能保证教学结果的优质教学产品,同时打造和运营一整套教育生态软件体系,为用户提供满足自身成长和发展要求的有效服务。"/> <link rel="stylesheet" href="/public/new/css/font-awesome.min.css" type="text/css"> <link rel="stylesheet" href="/public/new/css/idangerous.swiper.css" type="text/css"> <link rel="stylesheet" href="/public/new/css/common.css?v=1.0.7" type="text/css"> <script src="/public/new/js/jquery-1.8.0.js" type="text/javascript"></script> <script src="/public/new/js/idangerous.swiper.js" type="text/javascript"></script> <script type="text/javascript" src="/public/js/aes_1.js"></script> <script type="text/javascript" src="/public/js/aes_fun.js"></script> <script src="/public/layui/layui.all.js"></script> <style type="text/css"> @media screen and (max-width:1000px) { .wap_main {width: 1200px; margin:0 auto; display:table; overflow:hidden} } </style> <script src="/public/new/js/reset.js?v=1.0.7" type="text/javascript"></script> <script type="text/javascript" src="/public/new/js/singlePoint.js?v=1.0.7"></script> </head> <body style="background: #f4f4f4"> <div class="topbar_s wap_main"> <div class="content_s clearfix"> <ul class="topbar_left fl clearfix"> <li><a href="/">首页 </a></li> <li><span>|</span></li> <li><a href="/profile/course">学习中心 </a></li> <li><span>|</span></li> <li><a href="/manual.html">在线API</a></li> </ul> <ul class="topbar_right fr clearfix"> <li><a href="/profile/course">VIP会员中心</a></li> <li><span>|</span></li> <li><a href="/logout?redirect_uri=https://www.sxt.cn/profile/course" class="logout">退出</a></li> </ul> </div> </div> <div class="topnav_s wap_main"> <div class="content_s clearfix"> <div class="logo_s fl"> <a href="/"><img src="/public/new/images/logo.png" ></a> </div> <ul class="nav_info_s fl"> <li><a href="/category?free=yes" target="_blank">免费课</a></li> <li><a href="https://www.itbaizhan.cn" target="_blank">在线就业班</a></li> <li><a href="https://www.bjsxt.com" target="_blank">面授高薪班</a></li> <li><a href="https://www.sxt.cn/Java_jQuery_in_action/History_Direction.html" target="_self">Java实战教程系列</a></li> </ul> <div class="nav_search fr"> <input class="search_info fl" value="" /> <div class="hot_box"> <a href="/category?catId=9" target="_blank"><img src="/public/new/images/fire.png" alt="">人工智能</a> <a href="category?catId=80" target="_blank">微服务</a> </div> <i class="fa fa-search fr"></i> </div> </div> </div> <link rel="stylesheet" href="/public/new/css/profile.css" type="text/css"/> <div class="person_box wap_main"> <div class="content_s content_s_per clearfix"> <div class="person_left fl"> <ul> <li class="clearfix checked_li"> <a href="/profile/course"><img src="/public/new/images/profile/pericon1.png" alt="" class="fl"> <p class="fl">我的课程</p> <i class="fa fa-angle-right fr"></i></a> </li> <li class="clearfix "> <a href="/profile/comments"><img src="/public/new/images/profile/pericon2.png" alt="" class="fl"> <p class="fl">我的评论</p> <i class="fa fa-angle-right fr"></i></a> </li> <li class="clearfix "> <a href="/profile/order"><img src="/public/new/images/profile/pericon3.png" alt="" class="fl"> <p class="fl">订单列表</p> <i class="fa fa-angle-right fr"></i></a> </li> <li class="clearfix "> <a href="/profile/integral"><img src="/public/new/images/profile/pericon6.png" alt="" class="fl"> <p class="fl">学币中心</p> <i class="fa fa-angle-right fr"></i></a> </li> <li class="clearfix "> <a href="/profile/setting"><img src="/public/new/images/profile/pericon4.png" alt="" class="fl"> <p class="fl">个人设置</p> <i class="fa fa-angle-right fr"></i></a> </li> <li class="clearfix "> <a href="/profile/suggestion"><img src="/public/new/images/profile/pericon5.png" alt="" class="fl"> <p class="fl">意见反馈</p> <i class="fa fa-angle-right fr"></i></a> </li> </ul> </div> <div class="person_right fr"> <div class="right_top"> <div class="right_top_l clearfix fl"> <span class="fl"></span> 我的课程 </div> </div> <div class="right_nav"> <ul class="fl"> <li class="li_cheaked"> <a href="/profile/course">学习中</a> </li> <li > <a href="?status=finish">已学完</a> </li> <li > <a href="?status=collect">收藏</a> </li> </ul> </div> <div class="right_con_box"> <div class="right_con clearfix"> <div class="person_kong"><img src="/public/new/images/kong.png" alt=""></div> </div> </div> </div> </div> </div> <div class="wap_main"> <div class="leftnav_box"> <div class="nav_btn1"><img src="/public/new/images/navbtn1.png" alt=""></div> <ul> <li>编程开发前景如何?</li> <li class="1">课程是怎么学习的?</li> <li>如何选择适合的编程?</li> <li>我适合做开发吗?</li> </ul> <div class="nav_btn2"><img src="/public/new/images/navbtn2.png" alt=""></div> <a href="javascript:;" class="backtop"><img src="/public/new/images/top.png" alt=""></a> <div class="qq_box"><img src="/public/new/images/qqimg.png" alt=""></div> <div class="wx_box"><img src="/public/new/images/wximg.png" alt=""></div> </div> <div class="sxt_bottom"> <p>北京总部地址:北京市海淀区西三旗街道建材城西路中腾建华商务大厦东侧二层尚学堂</p> <p>咨询电话:400-009-1906 010-56233821</p> <p>Copyright 2007-2019 北京尚学堂科技有限公司 京ICP备13018289号-1 京公网安备11010802015183 <script type="text/javascript">var cnzz_protocol = (("https:" == document.location.protocol) ? " https://" : " http://");document.write(unescape("%3Cspan id='cnzz_stat_icon_1261969808'%3E%3C/span%3E%3Cscript src='" + cnzz_protocol + "s13.cnzz.com/stat.php%3Fid%3D1261969808%26show%3Dpic' type='text/javascript'%3E%3C/script%3E"));</script></p> </div> <script type="text/javascript"> $(".backtop").click(function(){ $('body,html').animate({scrollTop:0},1000); return false; }); $(".nav_btn2").mouseover(function () { $(".wx_box").show(); }).mouseout(function () { $(".wx_box").hide(); }); $(".nav_btn1").mouseover(function () { $(".qq_box").css('top','33px'); $(".qq_box").show(); }).mouseout(function () { $(".qq_box").hide(); }); $(".leftnav_box ul li").mouseover(function () { var li_i= $(".leftnav_box ul"); var i=$(this).index(); if(i==0){ $(".qq_box").css('top','120px'); }else if(i==1){ $(".qq_box").css('top','155px'); }else if(i==2){ $(".qq_box").css('top','190px'); }else if(i==3){ $(".qq_box").css('top','225px'); } $(".qq_box").show(); }).mouseout(function () { $(".qq_box").hide(); }) </script> </div> </body> </html> Process finished with exit code 0
自动获取登录的cookie给下一个请求使用(页面已变,无法运行成功,但记录解决方案思路很重要)
1 from urllib.request import Request,urlopen,HTTPCookieProcessor,build_opener 2 from fake_useragent import UserAgent 3 from urllib.parse import urlencode 4 5 #登录 6 login_url='http://www.sxt.cn/index/login/login' 7 headers={ 8 'User-Agent':UserAgent().chrome 9 } 10 data={ 11 'user':'17703181473', 12 'password':'123456' 13 } 14 data=urlencode(data).encode() 15 request=Request(login_url,headers=headers,data=data) 16 # response=urlopen(request) 17 handler=HTTPCookieProcessor() 18 opener=build_opener(handler) 19 response=opener.open(request) 20 print(response.read().decode()) 21 22 #访问页面 23 info_url='http://www.sxt.cn/index/user.html' 24 request=Request(info_url,headers=headers) 25 # response=urlopen(request) 26 response=opener.open(request) 27 print(response.read().decode())
12-cookie的使用2
cookie保存到文件,从文件中获取cookie:网页已变,代码无法运行,记录的是解决问题的思路
1 from urllib.request import Request,build_opener,HTTPCookieProcessor 2 from fake_useragent import UserAgent 3 from http.cookiejar import MozillaCookieJar 4 from urllib.parse import urlencode 5 6 #登录 7 #保存cookie到文件 8 def save_cookie(): 9 login_url='http://www.sxt.cn/index/login/login' 10 headers={ 11 'User-Agent':UserAgent().chrome 12 } 13 data={ 14 'user':'17703181473', 15 'password':'123456' 16 } 17 data=urlencode(data).encode() 18 request=Request(login_url,headers=headers,data=data) 19 cookie_jar=MozillaCookieJar() 20 handler=HTTPCookieProcessor(cookie_jar) 21 opener=build_opener(handler) 22 # response=opener.open(request) 23 opener.open(request) # 不需要赋值到变量,因为不使用 24 cookie_jar.save('cookie.txt',ignore_expires=True,ignore_discard=True) 25 26 def get_cookie(): 27 info_url='http://www.sxt.cn/index/user.html' 28 headers={ 29 'User-Agent':UserAgent().chrome 30 } 31 request=Request(info_url,headers=headers) 32 cookie_jar=MozillaCookieJar() 33 cookie_jar.load('cookie.txt',ignore_discard=True,ignore_expires=True) 34 handler=HTTPCookieProcessor(cookie_jar) 35 opener=build_opener(handler) 36 response=opener.open(request) 37 print(response.read().decode()) 38 39 if __name__ == '__main__': 40 save_cookie() 41 get_cookie()
13-URLError的使用
1 from urllib.request import Request, urlopen 2 from urllib.error import URLError 3 4 # url = 'http://cnblogs.com/aaaaaaaaaaa' # HTTP Error 404: Not Found 5 url = 'http://cnblogsbbbbbbbb.com/' # <urlopen error [Errno 11001] getaddrinfo failed> 6 headers = { 7 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.87 Safari/537.36' 8 } 9 request = Request(url, headers=headers) 10 try: 11 response = urlopen(request) 12 print(response.read().decode()) 13 # except BaseException as e: 14 except URLError as e: 15 print(e) 16 print(type(e)) 17 if e.args == (): 18 print(e.code) 19 else: 20 print(e.args[0].errno) 21 print('结束')
14-requests的使用:
安装:pip install requests
请求:
req=requests.get('http://sogo.com')
req=requests.post('http://sogo.com')
req=requests.put('http://sogo.com')
req=requests.delete('http://sogo.com')
req=requests.head('http://sogo.com')
req=requests.options('http://sogo.com')
get请求:参数是字典,也可以传递json类型的参数
import requests
resp=requests.get('http://sogo.com/web',params={'query':'黄山'}
print(resp.url)
resp.encoding='utf-8'
html=resp.text
print(html)
post请求:参数是字典,也可以传递json类型的参数
url='http://www.sxt.cn/index/login/login.html'
data={'user':'17703181473','password':'123456'}
resp=requests.post(url,data=data)
resp.encoding='utf-8'
html=response.text
print(html)
自定义请求头部
伪装请求头部是采集时经常用的,我们可以用这个方法来隐藏:
headers={'User-Agent':'python'}
r=requests.get(url,headers=headers)
print(r.request.headers['User-Agent'])
设置超时时间
可以通过timeout属性设置超时时间,一旦超过这个时间还没获得响应内容,就会提示错误
requests.get('http://github.com',timeout=0.001)
代理访问
采集时为避免被封IP,经常会使用代理,requests也有相应的proxies属性
import requests
proxies={
'http':'http://10.10.1.10:3128',
'https':'https://10.10.1.10:1080',
}
requests.get('http://www.zhidaow.com',proxies=proxies)
如果代理需要账户和密码,则需这样
proxies={'http':'http://用户名:密码@IP:端口/',}
session自动保存cookies
session的意思是保持一个会话,比如登陆后继续操作(记录身份信息)而requests是单次请求的请求,身份信息不会被记录
#创建一个session对象
s=requests.Session()
#用session对象发出get请求,设置cookies
s.get('http://httpbin.org/cookies/set/sessioncookie/123456789')
ssl验证
#禁用安全请求警告
requests.packages.urllib3.disable_warnings()
resp=requests.get(url,verity=False,headers=headers)
代码 | 含义 |
resp.json() | 获取响应内容(以json字符串) |
resp.text | 获取响应内容(以字符串) |
resp.content | 获取响应内容(以字节的方式) |
resp.headers | 获取响应头内容 |
resp.url | 获取访问地址 |
resp.encoding | 获取页面编码 |
resp.request.headers | 请求头内容 |
resp.cookie | 获取cookie |
requests的get请求:代码运行正常
1 import requests 2 from fake_useragent import UserAgent 3 4 headers={ 5 'User-Agent':UserAgent().chrome 6 } 7 url='http://sogo.com/web' 8 params={ 9 'query':'黄山' 10 } 11 resp=requests.get(url,headers=headers,params=params) 12 print(resp.text)
requests的post请求:
import requests from fake_useragent import UserAgent url = 'https://fanyi.baidu.com/sug' headers = { 'User-Agent': UserAgent().chrome } data = { 'kw': '中国' } resp = requests.post(url, headers=headers, data=data) print(resp.text)
requests的IP代理proxy:
import requests from fake_useragent import UserAgent url = 'http://httpbin.org/get' headers = { 'User-Agent': UserAgent().random } proxies = { 'http': '220.175.144.47:9999' } resp = requests.get(url, headers=headers, proxies=proxies) print(resp.text)
运行结果:免费代理IP很不好用,很难找到一个能用的,而且只能用一次

C:\Users\xiongjiawei\PycharmProjects\Spider\venv\Scripts\python.exe C:/Users/xiongjiawei/PycharmProjects/Spider/13天搞定Python分布式爬虫/第02天/v11-requests-proxy.py { "args": {}, "headers": { "Accept": "*/*", "Accept-Encoding": "gzip, deflate", "Cache-Control": "max-age=259200", "Host": "httpbin.org", "User-Agent": "Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/37.0.2049.0 Safari/537.36", "X-Amzn-Trace-Id": "Root=1-5e3e49cb-ad12455619b0ce5dbd2047fe" }, "origin": "220.175.144.47", "url": "http://httpbin.org/get" } Process finished with exit code 0
requests的SSL认证
import requests from fake_useragent import UserAgent url = 'https://www.12306.cn/mormhweb/' headers = { 'User-Agent': UserAgent().random } # resp=requests.get(url,headers=headers) # 也可以 requests.packages.urllib3.disable_warnings() # 解决有verify=False时运行结果有红色警告问题 resp = requests.get(url, verify=False, headers=headers) resp.encoding = 'utf-8' # 解决运行结果中有中文乱码问题 print(resp.text)
requests的cookie:页面已变,只为记录解决问题的思路
import requests from fake_useragent import UserAgent session = requests.Session() url = 'http://www.sxt.cn/index/login/login' headers = { 'User-Agent': UserAgent().chrome } data = { 'user': '17703181473', 'password': '123456' } resp = session.post(url, headers=headers, data=data) info_url = 'http://www.sxt.com/cn/index/user.html' resp = session.get(info_url, headers=headers) print(resp.text)
未完待续……