python扫描proxy并获取可用代理ip列表
mac或linux下可以work的代码如下:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 | # coding=utf-8 import requests import re from bs4 import BeautifulSoup as bs import Queue import threading import random import re headers_useragents = [] headers_referers = [] headers_referers.append( 'http://www.google.com/?q=' ) headers_referers.append( 'http://www.usatoday.com/search/results?q=' ) headers_referers.append( 'http://engadget.search.aol.com/search?q=' ) headers_useragents.append( 'Mozilla/5.0 (X11; U; Linux x86_64; en-US; rv:1.9.1.3) Gecko/20090913 Firefox/3.5.3' ) headers_useragents.append( 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)' ) headers_useragents.append( 'Mozilla/5.0 (Windows; U; Windows NT 5.2; en-US; rv:1.9.1.3) Gecko/20090824 Firefox/3.5.3 (.NET CLR 3.5.30729)' ) headers_useragents.append( 'Mozilla/5.0 (Windows; U; Windows NT 6.1; en-US; rv:1.9.1.1) Gecko/20090718 Firefox/3.5.1' ) headers_useragents.append( 'Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US) AppleWebKit/532.1 (KHTML, like Gecko) Chrome/4.0.219.6 Safari/532.1' ) headers_useragents.append( 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.1; WOW64; Trident/4.0; SLCC2; .NET CLR 2.0.50727; InfoPath.2)' ) headers_useragents.append( 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0; SLCC1; .NET CLR 2.0.50727; .NET CLR 1.1.4322; .NET CLR 3.5.30729; .NET CLR 3.0.30729)' ) headers_useragents.append( 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.2; Win64; x64; Trident/4.0)' ) headers_useragents.append( 'Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 5.1; Trident/4.0; SV1; .NET CLR 2.0.50727; InfoPath.2)' ) headers_useragents.append( 'Mozilla/5.0 (Windows; U; MSIE 7.0; Windows NT 6.0; en-US)' ) headers_useragents.append( 'Mozilla/4.0 (compatible; MSIE 6.1; Windows XP)' ) headers_useragents.append( 'Opera/9.80 (Windows NT 5.2; U; ru) Presto/2.5.22 Version/10.51' ) class proxyPick(threading.Thread): def __init__( self , queue): threading.Thread.__init__( self ) self ._queue = queue def run( self ): while not self ._queue.empty(): url = self ._queue.get() proxy_spider(url) def proxy_spider(url): headers = { # ....... } headers[ 'User-Agent' ] = random.choice(headers_useragents) headers[ 'Cache-Control' ] = 'no-cache' headers[ 'Accept-Charset' ] = 'ISO-8859-1,utf-8;q=0.7,*;q=0.7' headers[ 'Referer' ] = random.choice(headers_referers) + str (random.randint( 5 , 10 )) headers[ 'Keep-Alive' ] = str (random.randint( 110 , 120 )) headers[ 'Connection' ] = 'keep-alive' r = requests.get(url = url, headers = headers) soup = bs(r.content, "html.parser" ) data = soup.find_all(name = 'tr' , attrs = { 'class' : re. compile ( '|[^odd]' )}) for i in data: soup = bs( str (i), 'html.parser' ) data2 = soup.find_all(name = 'td' ) ip = str (data2[ 1 ].string) port = str (data2[ 2 ].string) types = str (data2[ 5 ].string).lower() proxy = {} proxy[types] = '%s:%s' % (ip, port) print proxy, " check proxy" try : proxy_check(proxy, ip) except Exception, e: print e pass def proxy_check(proxy, ip): # url = 'http://1212.ip138.com/ic.asp' # url = 'https://www.ipip.net/ip.html' # url = 'http://www.baid.com' # url = 'http://ip138.com/' url = 'http://2018.ip138.com/ic.asp' r = requests.get(url = url, proxies = proxy, timeout = 6 ) # r.encoding = 'gb2312' for url = 'http://ip138.com/' reip = r '\[(\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})\]' # print r.text f = open ( 'ip_proxy.txt' , 'a+' ) found = re.search(reip, r.text, re.M | re.I) if found: ip2 = found.group( 1 ) print "ip==> : " , ip2 if ip2 = = ip: print "*" * 30 print "ip is wanted:" , ip f.write( '%s' % proxy + '\n' ) print "*" * 30 # import sys # sys.exit(0) f.close() # proxy_spider() def main(): queue = Queue.Queue() for i in range ( 1 , 2288 ): queue.put( 'http://www.xicidaili.com/nn/' + str (i)) threads = [] thread_count = 10 for i in range (thread_count): spider = proxyPick(queue) threads.append(spider) for i in threads: i.start() for i in threads: i.join() print "It's down,sir!" if __name__ = = '__main__' : main() |
最后得到的可用结果就几个,蛋疼:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 | { 'http' : '118.190.95.43:9001' } { 'http' : '118.190.95.35:9001' } { 'http' : '101.236.35.98:8866' } { 'http' : '101.236.23.202:8866' } { 'http' : '175.11.213.25:808' } { 'http' : '106.56.102.141:8070' } { 'http' : '121.231.155.161:6666' } { 'http' : '120.25.81.117:80' } { 'http' : '117.63.78.100:6666' } { 'http' : '111.231.115.150:8888' } { 'http' : '222.185.23.251:6666' } { 'http' : '106.56.102.107:8070' } { 'http' : '118.114.77.47:8080' } { 'http' : '115.28.90.79:9001' } { 'http' : '58.57.75.142:63000' } |
python扫描proxy并获取可用代理ip的实例
今天咱写一个挺实用的工具,就是扫描并获取可用的proxy
首先呢,我先百度找了一个网站:http://www.xicidaili.com 作为例子
这个网站里公布了许多的国内外可用的代理的ip和端口
我们还是按照老样子进行分析,就先把所有国内的proxy扫一遍吧
点开国内部分进行审查发现,国内proxy和目录为以下url:
http://www.xicidaili.com/nn/x
这个x差不多两千多页,那么看来又要线程处理了。。。
老样子,我们尝试是否能直接以最简单的requests.get()获取内容
返回503,那么我们加一个简单的headers
返回200,成咯
好了我们先进行网页内容分析并获取想要的内容
我们发现,包含ip信息的内容在<tr>标签内,于是我们就能很方便的用bs进行获取标签内容
但是我们随之又发现,ip、端口、协议的内容分别在提取的<tr>标签的第2,3,6三个<td>标签内
r = requests.get(url = url,headers = headers) soup = bs(r.content,"html.parser") data = soup.find_all(name = 'tr',attrs = {'class':re.compile('|[^odd]')}) for i in data: soup = bs(str(i),'html.parser') data2 = soup.find_all(name = 'td') ip = str(data2[1].string) port = str(data2[2].string) types = str(data2[5].string).lower() proxy = {} proxy[types] = '%s:%s'%(ip,port)
这样,我们每次循环都能生成对应的proxy字典,以便我们接下来验证ip可用性所使用
字典这儿有个注意点,我们有一个将types变为小写的操作,因为在get方法中的proxies中写入的协议名称应为小写,而网页抓取的是大写的内容,所以进行了一个大小写转换
那么验证ip可用性的思路呢
很简单,我们使用get,加上我们的代理,请求网站:
http://1212.ip138.com/ic.asp
这是一个神奇的网站,能返回你的外网ip是什么
1 2 | url = 'http://1212.ip138.com/ic.asp' r = requests.get(url = url,proxies = proxy,timeout = 6) |
这里我们需要加上timeout去除掉那些等待时间过长的代理,我设置为6秒
我们以一个ip进行尝试,并且分析返回的页面
返回的内容如下:
1 2 3 4 5 6 7 8 9 10 11 | < html > < head > < meta xxxxxxxxxxxxxxxxxx> < title > 您的IP地址 </ title > </ head > < body style="margin:0px">< center >您的IP是:[xxx.xxx.xxx.xxx] 来自:xxxxxxxx</ center ></ body ></ html > |
那么我们只需要提取出网页内[]的内容即可
如果我们的代理可用,就会返回代理的ip
(这里会出现返回的地址还是我们本机的外网ip的情况,虽然我也不是很清楚,但是我把这种情况排除,应该还是代理不可用)
那么我们就能进行一个判断,如果返回的ip和proxy字典中的ip相同,则认为这个ip是可用的代理,并将其写入文件
我们的思路就是这样,最后进行queue和threading线程的处理即可
上代码:
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 | #coding=utf-8 import requests import re from bs4 import BeautifulSoup as bs import Queue import threading class proxyPick(threading.Thread): def __init__(self,queue): threading.Thread.__init__(self) self._queue = queue def run(self): while not self._queue.empty(): url = self._queue.get() proxy_spider(url) def proxy_spider(url): headers = { ....... } r = requests.get(url = url,headers = headers) soup = bs(r.content,"html.parser") data = soup.find_all(name = 'tr',attrs = {'class':re.compile('|[^odd]')}) for i in data: soup = bs(str(i),'html.parser') data2 = soup.find_all(name = 'td') ip = str(data2[1].string) port = str(data2[2].string) types = str(data2[5].string).lower() proxy = {} proxy[types] = '%s:%s'%(ip,port) try: proxy_check(proxy,ip) except Exception,e: print e pass def proxy_check(proxy,ip): url = 'http://1212.ip138.com/ic.asp' r = requests.get(url = url,proxies = proxy,timeout = 6) f = open('E:/url/ip_proxy.txt','a+') soup = bs(r.text,'html.parser') data = soup.find_all(name = 'center') for i in data: a = re.findall(r'\[(.*?)\]',i.string) if a[0] == ip: #print proxy f.write('%s'%proxy+'\n') print 'write down' f.close() #proxy_spider() def main(): queue = Queue.Queue() for i in range(1,2288): queue.put('http://www.xicidaili.com/nn/'+str(i)) threads = [] thread_count = 10 for i in range(thread_count): spider = proxyPick(queue) threads.append(spider) for i in threads: i.start() for i in threads: i.join() print "It's down,sir!" if __name__ == '__main__': main() |
这样我们就能把网站上所提供的能用的代理ip全部写入文件ip_proxy.txt文件中了
以上这篇python扫描proxy并获取可用代理ip的实例就是小编分享给大家的全部内容了,希望能给大家一个参考,也希望大家多多支持脚本之家。
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 记一次.NET内存居高不下排查解决与启示
· 探究高空视频全景AR技术的实现原理
· 理解Rust引用及其生命周期标识(上)
· 浏览器原生「磁吸」效果!Anchor Positioning 锚点定位神器解析
· 没有源码,如何修改代码逻辑?
· 全程不用写代码,我用AI程序员写了一个飞机大战
· MongoDB 8.0这个新功能碉堡了,比商业数据库还牛
· 记一次.NET内存居高不下排查解决与启示
· 白话解读 Dapr 1.15:你的「微服务管家」又秀新绝活了
· DeepSeek 开源周回顾「GitHub 热点速览」
2017-06-30 LDA解决的问题