爬虫使用代理+头信息

#coding:utf-8
import urllib2

def url_user_agent(url):
    proxy = {'http':'127.0.0.1:8080'}
    proxy_support = urllib2.ProxyHandler(proxy)
    # opener = urllib2.build_opener(proxy_support,urllib2.HTTPHandler(debuglevel=1))
    opener = urllib2.build_opener(proxy_support)
    urllib2.install_opener(opener)

    converted = {'User-Agent':'Mozilla/5.0 (Windows NT 6.1)',
                 'X-Forward-For':'127.0.0.1'}
    req = urllib2.Request(url,headers=converted)
    html = urllib2.urlopen(req)
    if url == html.geturl():
        doc = html.read()
        return doc
    return

url = 'http://www.baidu.com'
doc = url_user_agent(url)
print doc

  

posted @ 2016-12-15 11:05  tzuxung  阅读(301)  评论(0编辑  收藏  举报