爬蟲實戰
第一個爬蟲程式
import urllib.request #1.指定url url = ("http://www.eastmoney.com/") #2。發起請求,urlopen可以根據url返回一個響應對象 response = urllib.request.urlopen(url=url) #3.獲取頁面數據 text = response.read() #4.持續化存儲 with open('./baidu.html','wb') as f: f.write(text) print('success')
第二個解決編碼問題
import urllib.request import urllib.parse #指定url url = 'https://www.sogou.com/web?query=' #url特性:url不可以存在非ASCII编码的字符数据 word = urllib.parse.quote("人民币") url += word #有效的url #发请求 response = urllib.request.urlopen(url=url) #获取页面数据 page_text = response.read() with open('renminbi.html','wb') as fp: fp.write(page_text)
UA的身份偽裝
import urllib.request url = 'https://www.baidu.com/' #UA伪装 #1.子制定一个请求对象 headers = { #存储任意的请求头信息 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.67 Safari/537.36' } #该请求对象的UA进行了成功的伪装 request = urllib.request.Request(url=url,headers=headers) #2.针对自制定的请求对象发起请求 response = urllib.request.urlopen(request) print(response.read())
用post請求
import urllib.request import urllib.parse #1.指定url url = 'https://fanyi.baidu.com/sug' #post请求携带的参数进行处理 流程: #1.将post请求参数封装到字典 data = { 'kw':'西瓜' } #2.使用parse模块中的urlencode(返回值类型为str)进行编码处理 data = urllib.parse.urlencode(data) #3.将步骤2的编码结果转换成byte类型 data = data.encode() #2.发起post请求:urlopen函数的data参数表示的就是经过处理之后的post请求携带的参数 response = urllib.request.urlopen(url=url,data=data) response.read()
request模塊:
get請求
import requests url = 'https://www.sogou.com/' response = requests.get(url = url) text = response.text with open('./sougou.html','w',encoding='utf-8') as f: f.write(text) print('finsh!')
post請求,有兩種方式
方式1:
import requests url = 'https://www.sogou.com/web?query=周杰伦&ie=utf-8' #這種相對之前方法便利,可以直接幫你utf,不用自己轉化 response = requests.get(url=url) page_text = response.text with open('./zhou.html','w',encoding='utf-8') as fp: fp.write(page_text)
方式2:
import requests #自定义请求头信息 headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #指定url url = 'https://www.sogou.com/web' #封装get请求参数 prams = { 'query':'周杰伦', 'ie':'utf-8' } #发起请求 response = requests.get(url=url,params=prams) print(response.content)
基於requests的post請求:
import requests #1.指定post请求的url url = 'https://accounts.douban.com/login' #封装post请求的参数 data = { "source": "movie", "redir": "https://movie.douban.com/", "form_email": "15027900535", "form_password": "bobo@15027900535", "login": "登录", } #自定义请求头信息 headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #2.发起post请求 response = requests.post(url=url,data=data,headers=headers) #3.获取响应对象中的页面数据 page_text = response.text #4.持久化操作 with open('./douban.html','w',encoding='utf-8') as fp: fp.write(page_text)
使用requests模塊發出ajax的get請求:
import requests url = 'https://movie.douban.com/j/chart/top_list?' params = { 'type':'13', 'interval_id':'100:90', 'action':'', 'start':'10', #可以通過修改start和limit兩個屬性來控制輸出的爬取內容 'limit':'20', } headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } response = requests.get(url=url,params=params,headers=headers) print(response.text) #response.content 是二進制
使用requests模塊的綜合練習
實現功能:用戶輸入搜索字,再輸入起始與結束頁,輸出對應的搜索頁面
import os import requests if not os.path.exists('./path'): os.mkdir('./path') word = input('Enter the word:') url = 'https://zhihu.sogou.com/zhihu' start_pag = int(input('Enter the start page:')) end_pag = int(input('Enter the end page:')) headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } for page in range(start_pag,end_pag+1): params={ 'query':word, 'page':page, 'ie':'utf-8', } response = requests.get(url=url,params=params,headers=headers) page_text = response.text file_name = word+'_'+str(page) with open('./path/'+file_name,'w',encoding='utf-8') as f: f.write(page_text) print(str(page)+'finish')
使用session進行的登錄操作之後顯示的個人主頁
import requests session = requests.session() #1.发起登录请求:将cookie获取,切存储到session对象中 login_url = 'https://accounts.douban.com/login' data = { "source": "None", "redir": "https://www.douban.com/people/185687620/", "form_email": "15027900535", "form_password": "bobo@15027900535", "login": "登录", } headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } #使用session发起post请求 login_response = session.post(url=login_url,data=data,headers=headers) #2.对个人主页发起请求(session(cookie)),获取响应页面数据 url = 'https://www.douban.com/people/185687620/' response = session.get(url=url,headers=headers) page_text = response.text with open('./douban110.html','w',encoding='utf-8') as fp: fp.write(page_text)
使用requests模塊進行代理請求,推薦生成代理IP網站:http://www.goubanjia.com/
import requests url = 'http://www.baidu.com/s?wd=ip&ie=utf-8' proxy = { 'http':'39.137.69.7:8080' } response = requests.get(url = url,proxies=proxy) with open('./daili.html','w',encoding='utf-8') as f: f.write(response.text) print('finish')
驗證碼處理:
云打码平台处理验证码的实现流程:
- 1.对携带验证码的页面数据进行抓取
- 2.可以将页面数据中验证码进行解析,验证码图片下载到本地
- 3.可以将验证码图片提交给三方平台进行识别,返回验证码图片上的数据值
- 云打码平台:
- 1.在官网中进行注册(普通用户和开发者用户)
- 2.登录开发者用户:
- 1.实例代码的下载(开发文档-》调用实例及最新的DLL-》PythonHTTP实例下载)
- 2.创建一个软件:我的软件-》添加新的软件
-3.使用示例代码中的源码文件中的代码进行修改,让其识别验证码图片中的数据值
#该函数就调用了打码平台的相关的接口对指定的验证码图片进行识别,返回图片上的数据值 def getCode(codeImg): # 云打码平台普通用户的用户名 username = 'bobo328410948' # 云打码平台普通用户的密码 password = 'bobo328410948' # 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得! appid = 6003 # 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得! appkey = '1f4b564483ae5c907a1d34f8e2f2776c' # 验证码图片文件 filename = codeImg # 验证码类型,# 例:1004表示4位字母数字,不同类型收费不同。请准确填写,否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html codetype = 3000 # 超时时间,秒 timeout = 20 # 检查 if (username == 'username'): print('请设置好相关参数再测试') else: # 初始化 yundama = YDMHttp(username, password, appid, appkey) # 登陆云打码 uid = yundama.login(); print('uid: %s' % uid) # 查询余额 balance = yundama.balance(); print('balance: %s' % balance) # 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果 cid, result = yundama.decode(filename, codetype, timeout); print('cid: %s, result: %s' % (cid, result)) return result
import requests from lxml import etree import json import time import re #1.对携带验证码的页面数据进行抓取 url = 'https://www.douban.com/accounts/login?source=movie' headers = { 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/70.0.3538.77 Mobile Safari/537.36' } page_text = requests.get(url=url,headers=headers).text #2.可以将页面数据中验证码进行解析,验证码图片下载到本地 tree = etree.HTML(page_text) codeImg_url = tree.xpath('//*[@id="captcha_image"]/@src')[0] #获取了验证码图片对应的二进制数据值 code_img = requests.get(url=codeImg_url,headers=headers).content #获取capture_id '<img id="captcha_image" src="https://www.douban.com/misc/captcha?id=AdC4WXGyiRuVJrP9q15mqIrt:en&size=s" alt="captcha" class="captcha_image">' c_id = re.findall('<img id="captcha_image".*?id=(.*?)&.*?>',page_text,re.S)[0] with open('./code.png','wb') as fp: fp.write(code_img) #获得了验证码图片上面的数据值 codeText = getCode('./code.png') print(codeText) #进行登录操作 post = 'https://accounts.douban.com/login' data = { "source": "movie", "redir": "https://movie.douban.com/", "form_email": "15027900535", "form_password": "bobo@15027900535", "captcha-solution":codeText, "captcha-id":c_id, "login": "登录", } print(c_id) login_text = requests.post(url=post,data=data,headers=headers).text with open('./login.html','w',encoding='utf-8') as fp: fp.write(login_text)
#這個是雲打碼上面有的
class YDMHttp: apiurl = 'http://api.yundama.com/api.php' username = '' password = '' appid = '' appkey = '' def __init__(self, username, password, appid, appkey): self.username = username self.password = password self.appid = str(appid) self.appkey = appkey def request(self, fields, files=[]): response = self.post_url(self.apiurl, fields, files) response = json.loads(response) return response def balance(self): data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['balance'] else: return -9001 def login(self): data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['uid'] else: return -9001 def upload(self, filename, codetype, timeout): data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)} file = {'file': filename} response = self.request(data, file) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['cid'] else: return -9001 def result(self, cid): data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)} response = self.request(data) return response and response['text'] or '' def decode(self, filename, codetype, timeout): cid = self.upload(filename, codetype, timeout) if (cid > 0): for i in range(0, timeout): result = self.result(cid) if (result != ''): return cid, result else: time.sleep(1) return -3003, '' else: return cid, '' def report(self, cid): data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'} response = self.request(data) if (response): return response['ret'] else: return -9001 def post_url(self, url, fields, files=[]): for key in files: files[key] = open(files[key], 'rb'); res = requests.post(url, files=files, data=fields) return res.text
使用爬蟲爬取頁面的圖片並保存
import os,re,requests url = 'https://www.qiushibaike.com/pic/' headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } response = requests.get(url=url,headers=headers) page_text = response.text img_list = re.findall('<div class="thumb">.*?<img src="(.*?)".*?>.*?</div>',page_text,re.S) if not os.path.exists('./imgs'): os.mkdir('imgs') print(img_list) for i in img_list: img_url = 'https:'+i img_data = requests.get(url=img_url,headers=headers).content imgName = i.split('/')[-1] imgPath = './imgs/'+imgName with open(imgPath,'wb') as f: f.write(img_data) print(imgName+'寫入成功')
使用bs4實踐
import requests from bs4 import BeautifulSoup url = 'http://www.shicimingju.com/book/sanguoyanyi.html' headers={ 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/69.0.3497.100 Safari/537.36', } def get_content(url): content_page = requests.get(url=url,headers=headers).text soup = BeautifulSoup(content_page,'lxml') div = soup.find('div',class_='chapter_content') return div.text page_text = requests.get(url = url,headers=headers).text soup = BeautifulSoup(page_text,'lxml') a_list = soup.select('.book-mulu > ul > li > a') fp = open('./sanguo3.txt','w',encoding='utf-8') for a in a_list: title = a.text content_url = 'http://www.shicimingju.com' + a['href'] content = get_content(content_url) fp.write(title+":"+content+'\n\n\n\n') print('章節寫入成功')