python中运用urllib.request简单抓取网页数据

urllib.request

1、抓取百度首页

 1 from urllib import request
 2 
 3 #url
 4 #根据url获取数据,下载数据到本地
 5 #正则orXpath处理数据
 6 #数据转储
 7 
 8 url = 'http://www.baidu.com/'
 9 #获取数据
10 response = request.urlopen(url)
11 #读取数据
12 html_bytes = response.read()
13 #数据写入文件
14 with open('html_bytes','wb') as fb:
15     fb.write(html_bytes)
16 # print(response.read().decode('utf-8'))

2、抓取西刺代理首页

from urllib import request
from urllib.error import HTTPError, URLError
# url = 'http://www.xicidaili.com/'
# #若出现urllib.error.HTTPError: HTTP Error 503: Service Temporarily Unavailable
# #用以下方式处理,参数中添加form
# user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
# headers = {
#     'User-Agent': user_agent
# }
# #定义Request
# req = request.Request(url,headers=headers)
# response = request.urlopen(req)
# print(response.read().decode('utf-8'))
#封装成函数
def urlrequest(url,headers=None):
    user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    if headers == None:
        headers = {
            'User-Agent': user_agent
        }
    # 定义Request
    try:
        req = request.Request(url,headers=headers)
        response = request.urlopen(req)
        html_bytes = response.read()
        return html_bytes
    except HTTPError as e:
        return e
    except URLError as e:
        return e
    
if __name__ == '__main__':
  #测试
print(urlrequest('http://www.baidu.com/').decode('utf-8')) print(urlrequest('http://www.xicidaili.com/').decode('utf-8'))

3、GET、POST请求

from urllib import request, parse
from urllib.error import HTTPError, URLError
import json

#get请求
def get(url, headers=None):
    return urlrequest(url, headers=headers)

#post请求
def post(url, form, headers=None):
    return json.loads(urlrequest(url, form, headers=headers))


def urlrequest(url, form=None, headers=None):
    html_bytes = b''
    #判断是否传入请求头
    if headers == None:
        user_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
        headers = {
            'User-Agent': user_agent
        }
    try:
        #判断是否传入form
        if form:
            # 字典转bytes
            # 1、先转字符串
            form_str = parse.urlencode(form)
            # 2、转为bytes型
            form_bytes = form_str.encode('utf-8')
            # 定义Request
            req = request.Request(url, data=form_bytes, headers=headers)
        else:
            req = request.Request(url, headers=headers)
        response = request.urlopen(req)
        html_bytes = response.read()
        
    except HTTPError as e:
        print(e)
    except URLError as e:
        print(e)
    return html_bytes

if __name__ == '__main__':
    #post请求测试
    url = 'http://fanyi.baidu.com/sug/'
    form = {
        'kw':'呵呵'
    }
    html_dict = post(url,form=form)
    print(html_dict)
    print(html_dict['data'][0]['v'])
    
    #get请求测试
    url = 'http://www.python.org'
    html_bytes = get(url)
    print(html_bytes.decode('utf-8'))

 4、有道翻译加盐破解

import time, random, json
from day01.tuozhan_all import post
#有道翻译加盐破解

def md5_my(need_str):
    import hashlib
    #创建MD5对象
    md5_o = hashlib.md5()
    #需要bytes类型作为参数
    sign_bytes = need_str.encode('utf-8')
    md5_o.update(sign_bytes)
    sign_str = md5_o.hexdigest()
    return sign_str

def trans(kw):
    #url
    url = 'http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule'
    #headers
    headers = {
            'Accept': 'application/json, text/javascript, */*; q=0.01',
            #'Accept-Encoding': 'gzip, deflate',
            'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.9',
            'Connection': 'keep-alive',
            #'Content-Length': '241',
            'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',
            'Cookie': 'OUTFOX_SEARCH_USER_ID=1371246944@10.169.0.83; OUTFOX_SEARCH_USER_ID_NCOO=175172091.56957078; JSESSIONID=aaa6Z2lldaepL-EYrr4uw; ___rl__test__cookies=1534249690001',
            'Host': 'fanyi.youdao.com',
            'Origin': 'http://fanyi.youdao.com',
            'Referer': 'http://fanyi.youdao.com/',
            'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36',
            'X-Requested-With': 'XMLHttpRequest'
    }
    key = kw
    # salt : ((new Date).getTime() + parseInt(10 * Math.random(), 10))
    salt = int(time.time() * 1000 + random.randint(0, 10))
    salt_str = str(salt)
    S = "fanyideskweb"
    D = "ebSeFb%=XZ%T[KZ)c(sy!"
    sign_str = S+key+salt_str+D
    sign_md5_str = md5_my(sign_str)
    #form
    form = {
        'i': key,
        'from': 'AUTO',
        'to': 'AUTO',
        'smartresult': 'dict',
        'client': 'fanyideskweb',
        'salt': salt_str,
        'sign': sign_md5_str,
        'doctype': 'json',
        'version': '2.1',
        'keyfrom': 'fanyi.web',
        'action': 'FY_BY_REALTIME',
        'typoResult': 'false',
    }
    html_bytes = post(url, form, headers=headers)
    #将json格式数据转化为字典
    res_dict = json.loads(html_bytes.decode('utf-8'))
    print(res_dict)
    trans_res = res_dict['translateResult'][0][0]['tgt']
    return trans_res

if __name__ == '__main__':
    res = trans('青青河边草')
    print(res)

5、人人网首页数据抓取

from urllib import request, parse
from urllib.error import HTTPError, URLError
from http import cookiejar
import json

class session(object):
    def __init__(self):
        # 1、实例化cookie对象
        cookie_obj = cookiejar.CookieJar()
        # 2、handler处理
        handler = request.HTTPCookieProcessor(cookie_obj)
        # 3、opener 遇到有cookie的response的时候,调用handler内部的一个函数, 存储到cookie object
        self.opener = request.build_opener(handler)
    def get(self,url, headers=None):
        return get(url, headers, self.opener)
    def post(self, url, form, headers=None):
        return post(url, form, headers, self.opener)
    
def get(url, headers=None, opener=None):
    return urlrequests(url, headers=headers, opener=opener)
def post(url, form, headers=None, opener=None):
    return urlrequests(url, form, headers=headers, opener=opener)
#b. post(url, form, headers=None)
#1. 传入url
#2. user_agent
#3. headers
#4. 定义Request
#5. urlopen
#6. 返回byte数组
def urlrequests(url, form=None, headers=None, opener=None):
    user_agent = 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/67.0.3396.99 Safari/537.36'
    # 如果用户需要自行传入headers, 则覆盖之前的headers
    if headers == None:
        headers = {
            'User-Agent': user_agent
        }
    html_bytes = b''
    try:
        if form:
            # POST
            # 2.1 转换成str
            form_str = parse.urlencode(form)
            #print(form_str)
            # 2.2 转换成bytes
            form_bytes = form_str.encode('utf-8')
            req = request.Request(url, data=form_bytes, headers=headers)
        else:
            
            req = request.Request(url, headers=headers)
        if opener:
            response = opener.open(req)
        else:
            response = request.urlopen(req)
        html_bytes = response.read()
    except HTTPError as e:
        print(e)
    except URLError as e:
        print(e)

    return html_bytes

if __name__ == '__main__':
    #测试--带cookies的请求
    url = 'http://www.renren.com/ajaxLogin/login?1=1&uniqueTimestamp=201872221746'
    form = {
        'email': '18573151260',
        'icode': '',
        'origURL': 'http: // www.renren.com / home',
        'domain': 'renren.com',
        'key_id': '1',
        'captcha_type': 'web_login',
        'password': '60a075bc14562c91baf36094ffe93d8fa9c1809a8501a43a7da131e1f119ae91',
        'rkey': 'f9cd77eb1280ab74763c1f1552dc11c7',
        'f': 'http%3A%2F%2Fwww.renren.com%2F967453528'
    }
    #实例化对象
    s = session()
    #post请求
    res = s.post(url,form)
    #获取的数据转为字典
    res_dict = json.loads(res)
    #获取url,此时cookie以通过s对象传入cookies_obj中
    home_url = res_dict['homeUrl']
    #打开url,此时将cookie_obj中的cookie
    response = s.opener.open(home_url
    #打印获取的数据
    print(response.read().decode('utf-8'))
    
    # url = 'http://www.baidu.com/'
    # html_byte = get(url)
    # print(html_byte.decode('utf-8'))

 

posted @ 2018-08-13 22:11  哈哈毛毛怪  阅读(4050)  评论(0编辑  收藏  举报