爬虫之代理、乱码、验证码
代理
爬虫中为什么需要使用代理
一些网站会有相应的反爬虫措施,例如很多网站会检测某一段时间某个IP的访问次数,如果访问频率太快以至于看起来不像正常访客,它可能就会会禁止这个IP的访问。
所以我们需要设置一些代理IP,每隔一段时间换一个代理IP,就算IP被禁止,依然可以换个IP继续爬取。
代理的分类
正向代理:代理客户端获取数据。正向代理是为了保护客户端防止被追究责任。
反向代理:代理服务器提供数据。反向代理是为了保护服务器或负责负载均衡。
免费代理ip提供网站
http://www.goubanjia.com/ 西祠代理 快代理
简单运用示例
import requests # ip 代理网站 http://www.goubanjia.com/ headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' } url = 'https://www.baidu.com/s?wd=ip' page_text = requests.get(url=url,headers=headers,proxies={'https':'212.119.229.18:33852'}).text # print(page_text) with open('./ip.html','w',encoding = 'utf-8') as f: f.write(page_text) # proxy = [{},{},{}] 代理池的使用
不受信任的网站
例如有些网站的证书不被ca认证的
添加verify = False 就可以了。
response = requests.get(url='http://www.123.com', verify=False)
乱码
1.
import requests url = 'http://www.baidu.com' response = requests.get(url) # print(type(response.text)) # <class 'str'> # print(response.text) # 获取文本内容(python猜测的格式,可能获取的是乱码) # print(type(response.content)) # <class 'bytes'> # print(response.content) # 获取字节流数据 # print(response.content.decode()) # 获取utf-8的数据 # response.encoding = 'utf-8' # 编码为 utf-8格式 # print(response.text) # 此时获取的不是乱码。
2.
import requests from urllib import request from lxml import etree headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36', 'Connection':'close' } url = "http://pic.netbian.com/4kmeinv/" response = requests.get(url,headers) response.encoding = 'gbk' # print(response.encoding) page_text = response.text # print(page_text) tree = etree.HTML(page_text) li_list = tree.xpath('//div[@class="slist"]/ul/li') for li in li_list: img_src = 'http://pic.netbian.com' + li.xpath('./a/img/@src')[0] img_name = li.xpath('./a/img/@alt')[0] # img_name = img_name.encode('gbk').decode('utf-8') # img_name = img_name.encode('ISO-8859-1').decode('gbk') # ISO-8859-1 范围 大于 gbk 大于 utf-8 print(img_src,img_name)
其他编码可看这里
https://www.cnblogs.com/clbao/articles/11697228.html
验证码
1. 云打码平台
需要云打码平台在线解析验证码
1.官方下载的 不用动 直接加载就可以
import http.client, mimetypes, urllib, json, time, requests ###################################################################### class YDMHttp: apiurl = 'http://api.yundama.com/api.php' username = '' password = '' appid = '' appkey = '' def __init__(self, username, password, appid, appkey): self.username = username self.password = password self.appid = str(appid) self.appkey = appkey def request(self, fields, files=[]): response = self.post_url(self.apiurl, fields, files) response = json.loads(response) return response def balance(self): data = {'method': 'balance', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['balance'] else: return -9001 def login(self): data = {'method': 'login', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey} response = self.request(data) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['uid'] else: return -9001 def upload(self, filename, codetype, timeout): data = {'method': 'upload', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'codetype': str(codetype), 'timeout': str(timeout)} file = {'file': filename} response = self.request(data, file) if (response): if (response['ret'] and response['ret'] < 0): return response['ret'] else: return response['cid'] else: return -9001 def result(self, cid): data = {'method': 'result', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid)} response = self.request(data) return response and response['text'] or '' def decode(self, filename, codetype, timeout): cid = self.upload(filename, codetype, timeout) if (cid > 0): for i in range(0, timeout): result = self.result(cid) if (result != ''): return cid, result else: time.sleep(1) return -3003, '' else: return cid, '' def report(self, cid): data = {'method': 'report', 'username': self.username, 'password': self.password, 'appid': self.appid, 'appkey': self.appkey, 'cid': str(cid), 'flag': '0'} response = self.request(data) if (response): return response['ret'] else: return -9001 def post_url(self, url, fields, files=[]): for key in files: files[key] = open(files[key], 'rb'); res = requests.post(url, files=files, data=fields) return res.text
2.普通用户的调用
def get_code_text(code_type,img_path): # 用户名 username = '普通用户账号 # 密码 password = 普通用户密码 # 软件ID,开发者分成必要参数。登录开发者后台【我的软件】获得! appid = 6578 # 软件密钥,开发者分成必要参数。登录开发者后台【我的软件】获得! appkey = '2b3ef98633145e0b478800905af4e10b' # 图片文件 filename = img_path # 验证码类型,# 例:1004表示4位字母数字,不同类型收费不同。请准确填写,否则影响识别率。在此查询所有类型 http://www.yundama.com/price.html codetype = code_type # 1004 # 超时时间,秒 timeout = 25 # 检查 if (username == 'username'): print('请设置好相关参数再测试') else: # 初始化 yundama = YDMHttp(username, password, appid, appkey) # 登陆云打码 uid = yundama.login(); print('uid: %s' % uid) # 查询余额 balance = yundama.balance(); print('balance: %s' % balance) # 开始识别,图片路径,验证码类型ID,超时时间(秒),识别结果 cid, result = yundama.decode(filename, codetype, timeout); print('cid: %s, result: %s' % (cid, result)) return result
模拟登陆后抓取个人信息数据
import requests from lxml import etree from urllib import request # 获取一个session对象 session = requests.Session() #session对象和requests作用几乎一样,都可以进行请求的发送,并且请求发送的方式也是一致的, #session进行请求的发送,如果会产生cookie的话,则cookie会自动被存储到session对象中 #1 获取验证码图片 url = 'http://www.renren.com/' headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' } page_text = session.get(url=url,headers=headers).text tree = etree.HTML(page_text) code_img_src = tree.xpath('//*[@id="verifyPic_login"]/@src')[0] request.urlretrieve(url=code_img_src,filename='./code.jpg') # 保存到本地 code = get_code_text('2004','./code.jpg') # 获取验证码文本信息 # 模拟登陆 login_url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=2019031945879' # data信息需要抓包工具 获取
data = { "email":人人账号, "icode":code if code else '', "origURL":"http://www.renren.com/home", "domain":"renren.com", "key_id":"1", "captcha_type":"web_login", "password":通过抓包工具获取对应账号的加密密码, "rkey":"d4287c72b3f7ddf41b62170adec10265", "f":"http%3A%2F%2Fwww.renren.com%2F969397225", } print(session) #进行登录,当登录成功之后,可以获取cookie response = session.post(url=login_url,headers=headers,data=data) # #对登录成功后对应的当前用户的个人详情页进行请求发送 detail_url = "http://www.renren.com/969397225/profile" # 个人信息网址 page_text = session.get(url=detail_url,headers=headers).text with open('./renren.html','w',encoding='utf-8') as fp: fp.write(page_text) print('over')
古诗网
第一步 和人人一样调用云打码
主代码
import requests from lxml import etree from urllib import request # 获取一个session对象 session = requests.Session() #session对象和requests作用几乎一样,都可以进行请求的发送,并且请求发送的方式也是一致的, #session进行请求的发送,如果会产生cookie的话,则cookie会自动被存储到session对象中 headers = { 'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36' } url = 'https://so.gushiwen.org/user/login.aspx?from=http://so.gushiwen.org/user/collect.aspx' page_text = requests.get(url=url,headers=headers).text # print(page_text) tree = etree.HTML(page_text) code_img_src = 'https://so.gushiwen.org' + tree.xpath('//*[@id="imgCode"]/@src')[0] # 获取验证码生成地址 img_data = session.get(url=code_img_src,headers=headers).content # print(img_data) with open('./gushi.jpg',"wb") as fp: fp.write(img_data) code_text = get_code_text('1004','./gushi.jpg') print(code_text) # 每次访问都会修改 每次访问都获取依稀 __VIEWSTATE = tree.xpath('//*[@id="__VIEWSTATE"]/@value')[0] __VIEWSTATEGENERATOR = tree.xpath('//*[@id="__VIEWSTATEGENERATOR"]/@value')[0] login_url = "https://so.gushiwen.org/user/login.aspx?from=http%3a%2f%2fso.gushiwen.org%2fuser%2fcollect.aspx" data = { "__VIEWSTATE":__VIEWSTATE, # 动态获取 "__VIEWSTATEGENERATOR":__VIEWSTATEGENERATOR, # 动态获取 "from":"http://so.gushiwen.org/user/collect.aspx", "email":"古诗文网站好", "pwd":"古诗文网的密码.", "code":code_text, "denglu":"登录", } page_text = session.post(url=login_url,headers=headers,data=data).text with open('./gushiwen.html','w',encoding='utf-8') as fp: fp.write(page_text) print('ok')