概述
用爬虫时,大部分网站都有一定的反爬措施,有些网站会限制每个 IP 的访问速度或访问次数,超出了它的限制你的 IP 就会被封掉。对于访问速度的处理比较简单,只要间隔一段时间爬取一次就行了,避免频繁访问;而对于访问次数,就需要使用代理 IP 来帮忙了,使用多个代理 IP 轮换着去访问目标网址可以有效地解决问题。
目前网上有很多的代理服务网站提供代理服务,也提供一些免费的代理,但可用性较差,如果需求较高可以购买付费代理,可用性较好。
因此我们可以自己构建代理池,从各种代理服务网站中获取代理 IP,并检测其可用性(使用一个稳定的网址来检测,最好是自己将要爬取的网站),再保存到数据库中,需要使用的时候再调用。
提供免费代理的网站
本次使用的案例是小幻代理
代码
导包
import loguru, requests, random, time
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
网站页面的url
由于小幻代理的每个页面的url没有规律,所以需要一一获取
def get_url ():
print ("正在获取ip池" , ",不要着急!" )
for i in range (random.randint(10 , 20 )):
time.sleep(1 )
if i == 0 :
url = "https://ip.ihuan.me/"
else :
url = url_list[-1 ]
try :
resp = requests.get(url=url, headers=headers_test, timeout=10 )
except Exception as e:
print (e)
break
html = etree.HTML(resp.text)
ul = html.xpath('//ul[@class="pagination"]' )
ul_num = html.xpath('//ul[@class="pagination"]/li' )
for j in range (len (ul_num)):
if j != 0 and j != len (ul_num) - 1 :
a = ul[0 ].xpath(f"./li[{j} +1]/a/@href" )[0 ]
url_list.append("https://ip.ihuan.me/" + a)
loguru.logger.info(f"over,{url} " )
ip地址
def get_ip ():
for i in url_list:
time.sleep(1 )
resp = requests.get(url=i, headers=headers)
html = etree.HTML(resp.text)
td = html.xpath("//tbody/tr" )
for i in td:
ip = i.xpath("./td[1]//text()" )[0 ]
pt = i.xpath("./td[2]//text()" )[0 ]
tp = "http" if i.xpath("./td[5]//text()" )[0 ] == "不支持" else "https"
ip_list.append({"type" : tp, "proxy" : f"{ip} :{pt} " })
loguru.logger.info("ip地址获取完成" )
检测
def test_ip (ip ):
proxy_test = {
"http" : f"{ip} " ,
"https" : f"{ip} "
}
resp = requests.get(url=url_test, headers=headers, proxies=proxy_test, timeout=6 )
if resp.json()["origin" ] == ip.split(":" )[0 ]:
ip = {"type" : url.strip(":" )[0 ], "proxy" : ip}
temp_ip.append(ip)
整理
def set_ip (url ) -> "动态构建ip池" :
try :
f = open ('./app/ip.txt' , "r" )
for j in eval (f.read()):
temp_ip.append(j)
f.close()
except Exception as e:
print ("没有ip,正在构造ip池,请稍等" )
if not temp_ip:
print ("没有ip地址,正在获取" )
get_url()
else :
for i in temp_ip:
ip_list.append(i)
temp_ip.clear()
get_ip()
with open ('./app/ip.txt' , "w" ) as file:
file.write(ip_list)
ip_able = list (set (j["proxy" ] for j in ip_list if j["type" ] == url.split(":" )[0 ]))
url_test = "http://httpbin.org/ip" if url.split(":" )[0 ] == "http" else ""
def test_ip (ip ):
proxy_test = {
"http" : f"{ip} " ,
"https" : f"{ip} "
}
resp = requests.get(url=url_test, headers=headers, proxies=proxy_test, timeout=6 )
if resp.json()["origin" ] == ip.split(":" )[0 ]:
ip = {"type" : url.strip(":" )[0 ], "proxy" : ip}
temp_ip.append(ip)
with ThreadPoolExecutor(50 ) as pool:
pool.map (test_ip, ip_able)
pool.join()
print ("测试完毕" )
if temp_ip:
i = random.choice(temp_ip)
proxy = {
"http" : f"{i['proxy' ]} " ,
"https" : f"{i['proxy' ]} "
}
return proxy
else :
set_ip(url=url)
必要参数
headers = {
'User-Agent' : "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 96.0.4664 .93 Safari / 537.36" ,
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
}
headers_test = {
'User-Agent' : "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 96.0.4664 .93 Safari / 537.36" ,
"accept-encoding" : "gzip, deflate, br" ,
"cookie" : "Hm_lvt_8ccd0ef22095c2eebfe4cd6187dea829=1642389014,1642412091" ,
"Referer" : "https://ip.ihuan.me/"
}
url_list, ip_list, temp_ip = ["https://ip.ihuan.me/" ], [], []
总代码
import loguru, requests, random, time
from lxml import etree
from concurrent.futures import ThreadPoolExecutor
def get_url ():
print ("正在获取ip池" , ",不要着急!" )
for i in range (random.randint(10 , 20 )):
time.sleep(1 )
if i == 0 :
url = "https://ip.ihuan.me/"
else :
url = url_list[-1 ]
try :
resp = requests.get(url=url, headers=headers_test, timeout=10 )
except Exception as e:
print (e)
break
html = etree.HTML(resp.text)
ul = html.xpath('//ul[@class="pagination"]' )
ul_num = html.xpath('//ul[@class="pagination"]/li' )
for j in range (len (ul_num)):
if j != 0 and j != len (ul_num) - 1 :
a = ul[0 ].xpath(f"./li[{j} +1]/a/@href" )[0 ]
url_list.append("https://ip.ihuan.me/" + a)
loguru.logger.info(f"over,{url} " )
def get_ip ():
for i in url_list:
time.sleep(1 )
resp = requests.get(url=i, headers=headers)
html = etree.HTML(resp.text)
td = html.xpath("//tbody/tr" )
for i in td:
ip = i.xpath("./td[1]//text()" )[0 ]
pt = i.xpath("./td[2]//text()" )[0 ]
tp = "http" if i.xpath("./td[5]//text()" )[0 ] == "不支持" else "https"
ip_list.append({"type" : tp, "proxy" : f"{ip} :{pt} " })
loguru.logger.info("ip地址获取完成" )
def set_ip (url ) -> "动态构建ip池" :
try :
f = open ('./app/ip.txt' , "r" )
for j in eval (f.read()):
temp_ip.append(j)
f.close()
except Exception as e:
print ("没有ip,正在构造ip池,请稍等" )
if not temp_ip:
print ("没有ip地址,正在获取" )
get_url()
else :
for i in temp_ip:
ip_list.append(i)
temp_ip.clear()
get_ip()
with open ('./app/ip.txt' , "w" ) as file:
file.write(ip_list)
ip_able = list (set (j["proxy" ] for j in ip_list if j["type" ] == url.split(":" )[0 ]))
url_test = "http://httpbin.org/ip" if url.split(":" )[0 ] == "http" else ""
def test_ip (ip ):
proxy_test = {
"http" : f"{ip} " ,
"https" : f"{ip} "
}
resp = requests.get(url=url_test, headers=headers, proxies=proxy_test, timeout=6 )
if resp.json()["origin" ] == ip.split(":" )[0 ]:
ip = {"type" : url.strip(":" )[0 ], "proxy" : ip}
temp_ip.append(ip)
with ThreadPoolExecutor(50 ) as pool:
pool.map (test_ip, ip_able)
pool.join()
print ("测试完毕" )
if temp_ip:
i = random.choice(temp_ip)
proxy = {
"http" : f"{i['proxy' ]} " ,
"https" : f"{i['proxy' ]} "
}
return proxy
else :
set_ip(url=url)
headers = {
'User-Agent' : "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 96.0.4664 .93 Safari / 537.36" ,
"Accept" : "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9"
}
headers_test = {
'User-Agent' : "Mozilla / 5.0(Windows NT 10.0;Win64;x64) AppleWebKit / 537.36(KHTML, likeGecko) Chrome / 96.0.4664 .93 Safari / 537.36" ,
"accept-encoding" : "gzip, deflate, br" ,
"cookie" : "Hm_lvt_8ccd0ef22095c2eebfe4cd6187dea829=1642389014,1642412091" ,
"Referer" : "https://ip.ihuan.me/"
}
url_list, ip_list, temp_ip = ["https://ip.ihuan.me/" ], [], []
if __name__ == '__main__' :
proxy = set_ip(url="https://www.baidu.com" )
print (proxy)
总结
如果安装了数据库的话,可以使用数据库存储得到的ip,代码中使用的是本地的文件存储数据,同时,要尽量避免本机ip被封
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 分享4款.NET开源、免费、实用的商城系统
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· 上周热点回顾(2.24-3.2)