joxin

诚信、业绩、创新

导航

python网页爬虫开发之四-串行爬虫代码示例

Posted on 2018-10-25 16:40    阅读(333)  评论(0编辑  收藏  举报
实现功能:代理、限速、深度、反爬
import re
import queue
import urllib.parse
import urllib.robotparser
import time
from urllib import request
from datetime import datetime
 
def download(url, user_agent="wsap", num=2):
    print("Downloading:"+url)
    try:
        req = request.Request(url)
        req.add_header('user_agent', user_agent)
        html = request.urlopen(req).read()
    except Exception as e:
        print('Download error:')
        html = None
        if num > 0:
            if hasattr(e, "code") and 500 <= e.code < 600:
                return download(url, user_agent, num-1)
    return html
 
def link_crawler(seed_url, link_regex=None, delay=5, max_depth=-1, max_urls=-1, headers=None, user_agent='BadCrawler', proxy=None, num_retries=1):
    crawl_queue = queue.deque([seed_url])
    seen = {seed_url: 0}
    num_urls = 0
    rp = get_robots(seed_url)
    throttle = Throttle(delay)
    headers = headers or {}
    if user_agent:
        headers['User-agent'] = user_agent
    while crawl_queue:
        url = crawl_queue.pop()
        if rp.can_fetch(user_agent, url):
            throttle.wait(url)
            html = download(url)
            links = []
            depth = seen[url]
            if depth != max_depth:
                if link_regex:
                    links.extend(link for link in get_links(html) if re.match(link_regex, link))
                for link in links:
                    link = normalize(seed_url, link)
                    if link not in seen:
                        seen[link] = depth +1
                        if same_domain(seed_url, link):
                            crawl_queue.append(link)
            num_urls += 1
            if num_urls == max_urls:
                break
        else:
            print('Blocked by robots.txt:'+url)
 
class Throttle:
    def __init__(self, delay):
        self.delay = delay
        self.domains = {}
 
    def wait(self, url):
        domain = urllib.parse.urlparse(url).netloc
        last_accessed = self.domains.get(domain)
        if self.delay > 0 and last_accessed is not None:
            sleep_secs = self.delay - (datetime.now() - last_accessed).seconds
            if sleep_secs > 0:
                time.sleep(sleep_secs)
        self.domains[domain] = datetime.now()
 
def normalize(seed_url,link):
    link, _ = urllib.parse.urldefrag(link)
    return urllib.parse.urljoin(seed_url, link)
 
def same_domain(url1, url2):
    return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc
 
def get_robots(url):
    rp = urllib.robotparser.RobotFileParser()
    rp.set_url(urllib.parse.urljoin(url, '/robots.txt'))
    rp.read()
    return rp
 
def get_links(html):
    webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE)
    html = html.decode('utf-8')
    return webpage_regex.findall(html)
 
if __name__ == '__main__':
    link_crawler('http://example.webscraping.com', '/places/default/view/', delay=0, num_retries=1, max_depth=1, user_agent='GoodCrawler')