爬取猫眼top100

import urllib.request
import random
import re
import json

'''
解决访问403的问题,需要模仿浏览器访问
'''
my_headers = [
    "Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/39.0.2171.95 Safari/537.36",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/35.0.1916.153 Safari/537.36",
    "Mozilla/5.0 (Windows NT 6.1; WOW64; rv:30.0) Gecko/20100101 Firefox/30.0",
    "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_9_2) AppleWebKit/537.75.14 (KHTML, like Gecko) Version/7.0.3 Safari/537.75.14",
    "Mozilla/5.0 (compatible; MSIE 10.0; Windows NT 6.2; Win64; x64; Trident/6.0)"
]

url2 = "http://maoyan.com/board/4?"





def parse_one_page(html):
    pattern = re.compile('<dd>.*?board-index.*?>(\d+)</i>.*?data-src="(.*?)".*?name"><a'
                         +'.*?>(.*?)</a>.*?star">(.*?)</p>.*?releasetime">(.*?)</p>'
                         +'.*?integer">(.*?)</i>.*?fraction">(.*?)</i>.*?</dd>', re.S)
    items = re.findall(pattern, html)
    for item in items:
        yield {
            'index': item[0],
            'image': item[1],
            'title': item[2],
            'actor': item[3].strip()[3:],
            'time': item[4].strip()[5:],
            'score': item[5]+item[6]
        }

def write_to_file(content):
    with open('result5.txt', 'a', encoding='utf-8') as f:
        f.write(json.dumps(content, ensure_ascii=False) + '\n')
        f.close()

'''
呃呃,用循环爬取10页数据
'''
def main():
    for i in range(10):
        offset = i * 10
        url = url2 + 'offset=' + str(offset)
        randdom_header = random.choice(my_headers)
        req = urllib.request.Request(url)
        req.add_header("User-Agent", randdom_header)
        req.add_header("GET", url)
        response = urllib.request.urlopen(req)
        html = response.read().decode('utf-8')
        for item in parse_one_page(html):
            print(item)
            write_to_file(item)

if __name__ == '__main__':
    main()

 

posted @ 2018-11-08 16:48  徐李帅  阅读(207)  评论(0编辑  收藏  举报