爬虫实例

code
import re
from urllib.request import urlopen
from multiprocessing import Pool
 
def get_page(url,pattern):
    response=urlopen(url).read().decode('utf-8')
    return pattern,response
 
def parse_page(info):
    pattern,page_content=info
    res=re.findall(pattern,page_content)
    for item in res:
        dic={
            'index':item[0].strip(),
            'title':item[1].strip(),
            'actor':item[2].strip(),
            'time':item[3].strip(),
        }
        print(dic)
if __name__ == '__main__':
    regex = r'<dd>.*?<.*?class="board-index.*?>(\d+)</i>.*?title="(.*?)".*?class="movie-item-info".*?<p class="star">(.*?)</p>.*?<p class="releasetime">(.*?)</p>'
    pattern1=re.compile(regex,re.S)
 
    url_dic={
        'http://maoyan.com/board/7':pattern1,
    }
 
    p=Pool()
    res_l=[]
    for url,pattern in url_dic.items():
        res=p.apply_async(get_page,args=(url,pattern),callback=parse_page)
        res_l.append(res)

    for i in res_l:
        i.get()
 

 

 
 
 
 
 
 
 
 
 

posted @ 2020-12-26 17:24  anobscureretreat  阅读(77)  评论(0编辑  收藏  举报