爬虫示例

一、拉钩网爬虫

【lg.py】

import scrapy
import test1.items


class LgSpider(scrapy.Spider):
    name = 'lg'
    # 允许爬取的域
    allowed_domains = ['lagou.com']
    # 爬虫入口
    start_urls = [
        'https://www.lagou.com/wn/jobs?tagCodeList=200001&gx=%E5%85%A8%E8%81%8C&yx=15k-25k&xl=%E6%9C%AC%E7%A7%91&city=%E6%88%90%E9%83%BD&pn=1']
    # 页码数
    page_num = 1
    max_page_num = 10

    def nex_url(self):
        """
        获取下一页url
        :return:
        """
        url = LgSpider.start_urls[0].split("pn=")[0] + "pn=" + str(LgSpider.page_num)
        LgSpider.page_num += 1
        if LgSpider.page_num > LgSpider.max_page_num:
            return None
        return url

    def parse(self, response):
        url = self.nex_url()
        # 循环翻页
        while url is not None:
            yield scrapy.Request(url=url, callback=self.parse_item)

    def parse_item(self, response):
        """
        解析项目列表信息
        :param response:
        :return:
        """
        size = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[1]/div[1]/a').__len__()
        for i in range(size):
            item = test1.items.LgItem()
            item['url'] = response.url
            item['title'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[1]/div[1]/a')[i].re(
                "<a>(.*)<!-- -->\\[(.*)\\]</a>")
            item['money'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[1]/div[2]/span/text()')[i].get()
            item['time'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[1]/div[1]/span/text()')[i].get()
            item['company'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[2]/div[1]/a/text()')[i].get()
            item['experience'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[1]/div[1]/div[2]/text()')[i].get()
            item['require'] = response.xpath('//*[@id="jobList"]/div[1]/div/div[2]/div[1]')[i].re("<span>(.*?)</span>")
            positionId = self.get_detail_url(response, i)
            detail_url = "https://www.lagou.com/wn/jobs/{}.html".format(positionId)
            item['detail_url'] = detail_url
            req = scrapy.Request(url=detail_url, callback=self.parse_detail_item)
            # 将列表页项目数据带到详情页响应去
            req.meta['data'] = item
            yield req

    def parse_detail_item(self, response):
        """解析项目详情信息"""
        # 解析列表页带进来的数据
        item = response.meta['data']
        # 绑定详情页数据
        item['fl'] = response.xpath('//*[@id="job_detail"]/dd[1]/p/text()').get()
        item['describe'] = ''.join(response.xpath('//*[@id="job_detail"]/dd[2]/div/text()').getall())
        yield item

    def get_detail_url(self, response, num):
        """
        获取详情页的url地址
        :param response:
        :param num:
        :return:
        """
        positionIds = response.xpath('//*[@id="__NEXT_DATA__"]').re('{"positionId":(\\d+),"positionName":')
        return positionIds[num]

【items.py】

# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html

import scrapy

class LgItem(scrapy.Item):
    title = scrapy.Field()
    money = scrapy.Field()
    time = scrapy.Field()
    company = scrapy.Field()
    experience = scrapy.Field()
    require = scrapy.Field()
    url = scrapy.Field()
    detail_url = scrapy.Field()
    fl = scrapy.Field()
    describe = scrapy.Field()

【settings.py】

# Scrapy settings for test1 project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://docs.scrapy.org/en/latest/topics/settings.html
#     https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://docs.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'test1'

SPIDER_MODULES = ['test1.spiders']
NEWSPIDER_MODULE = 'test1.spiders'

# Crawl responsibly by identifying yourself (and your website) on the user-agent
USER_AGENT = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36'

# Obey robots.txt rules
ROBOTSTXT_OBEY = False

# Configure maximum concurrent requests performed by Scrapy (default: 16)
# CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://docs.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
CONCURRENT_REQUESTS_PER_DOMAIN = 16
# CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
TELNETCONSOLE_ENABLED = False

# Override the default request headers:
DEFAULT_REQUEST_HEADERS = {
    'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
    'Accept-Encoding': 'gzip, deflate, br',
    'Accept-Language': 'zh-CN,zh;q=0.9,en;q=0.8',
    'Cache-Control': 'max-age=0',
    'Connection': 'keep-alive',
    'Host': 'www.lagou.com',
    'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="98", "Google Chrome";v="98"',
    'sec-ch-ua-mobile': '?0',
    'sec-ch-ua-platform': '"Windows"',
    'Sec-Fetch-Dest': 'document',
    'Sec-Fetch-Mode': 'navigate',
    'Sec-Fetch-Site': 'none',
    'Sec-Fetch-User': '?1',
    'Upgrade-Insecure-Requests': '1',
    'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36',
    'Cookie': 'LGUID=20200729122826-f23d2554-37c9-4eea-a0f1-76305157298e; _ga=GA1.2.724351898.1595996909; JSESSIONID=ABAAABAABEIABCI7CA522728C67E4E0F7C750CD9762065C; WEBTJ-ID=20221003124510-1839c29875366d-0970d6698ff7f2-a3e3164-921600-1839c29875488c; RECOMMEND_TIP=true; user_trace_token=20221003124510-b8f80e95-b5ed-4555-af88-571ceaa841e4; _gid=GA1.2.224750483.1664772313; Hm_lvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1664772313; privacyPolicyPopup=false; sensorsdata2015session=%7B%7D; index_location_city=%E5%85%A8%E5%9B%BD; X_MIDDLE_TOKEN=24b0e5dcd6b0d6d17b2b43a84356bd47; __lg_stoken__=2937b6ec1f6071d030195f3bbbf9d8d5f08275b1dc96dcb085d84e846d47cc8777eac632a56ad7baa1d35de9ec2dd5989fac1d31ec7adfd20d8cf8714e1759daf038302bec59; TG-TRACK-CODE=index_navigation; gate_login_token=b53a8d18c988653d08776aa19aeca43d7750120c85aac2f4b4e8e61ffb89ca45; _putrc=3219144C4F893ADF123F89F2B170EADC; login=true; unick=%E8%B4%BE%E7%BF%B0%E6%9E%97; showExpriedIndex=1; showExpriedCompanyHome=1; showExpriedMyPublish=1; hasDeliver=7; __SAFETY_CLOSE_TIME__17642295=1; Hm_lpvt_4233e74dff0ae5bd0a3d81c6ccf756e6=1664779467; LGRID=20221003144425-b0a48fc2-bdee-4e10-9448-588be0be6020; sensorsdata2015jssdkcross=%7B%22distinct_id%22%3A%2217642295%22%2C%22%24device_id%22%3A%2217398d42f6951f-0fe90ec8ff83a2-f7d123e-2073600-17398d42f6a71c%22%2C%22props%22%3A%7B%22%24latest_traffic_source_type%22%3A%22%E7%9B%B4%E6%8E%A5%E6%B5%81%E9%87%8F%22%2C%22%24latest_referrer%22%3A%22%22%2C%22%24latest_search_keyword%22%3A%22%E6%9C%AA%E5%8F%96%E5%88%B0%E5%80%BC_%E7%9B%B4%E6%8E%A5%E6%89%93%E5%BC%80%22%2C%22%24os%22%3A%22Windows%22%2C%22%24browser%22%3A%22Chrome%22%2C%22%24browser_version%22%3A%2298.0.4758.102%22%7D%2C%22first_id%22%3A%2217398d42f6951f-0fe90ec8ff83a2-f7d123e-2073600-17398d42f6a71c%22%7D; X_HTTP_TOKEN=8d8fc241936f5e254893874661b866ea1ca75031b2'
}

# Enable or disable spider middlewares
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
SPIDER_MIDDLEWARES = {
    'test1.middlewares.Test1SpiderMiddleware': 543,
}

# Enable or disable downloader middlewares
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
# DOWNLOADER_MIDDLEWARES = {
#    'test1.middlewares.Test1DownloaderMiddleware': 543,
# }

# Enable or disable extensions
# See https://docs.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
# }

# Configure item pipelines
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
    'test1.pipelines.Test1Pipeline': 300,
}

# Enable and configure the AutoThrottle extension (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
# AUTOTHROTTLE_ENABLED = True
# The initial download delay
# AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
# AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
# AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'

二、安徽省某学府官网

【AhSpider.py】

import scrapy
import test1.items


class AhSpider(scrapy.Spider):
    name = 'ah'
    allowed_domains = ['aepu.com.cn']
    start_urls = ['http://www.aepu.com.cn/html/929/']

    def parse(self, response):
        """
        解析分类
        :param response:
        :return:
        """
        for i in range(self._parse_data(response, '//*[@id="accordion"]/li/div/a/text()').__len__()):
            big_title = self._parse_data(response, '//*[@id="accordion"]/li[' + str((i + 1)) + ']/div/a/text()')[
                0].get()
            big_title_url = self._parse_data(response, '//*[@id="accordion"]/li[' + str((i + 1)) + ']/div/a/@href')[
                0].get()
            if not big_title_url.startswith('http'):
                big_title_url = 'http://www.aepu.com.cn' + big_title_url
            req = scrapy.Request(url=big_title_url, callback=self.parse_list)
            item = test1.items.AhItem()
            item["big_title"] = big_title
            item["big_title_url"] = big_title_url
            req.meta['data'] = item
            yield req

    def parse_list(self, response):
        """
        解析列表页
        :param response:
        :return:
        """
        page = self._get_page_num(response)
        total = int(page[1])
        for i in range(total):
            if i + 1 == 1:
                url = response.url
            elif i + 1 <= total:
                url = (response.url + 'list-{}' + '.' +
                       response.xpath('//*[@id="page"]/a/@href')[1].get().split('/')[-1].split('.')[-1]).format(i + 1)
            if url is None:
                continue
            req = scrapy.Request(url=url, callback=self.parse_list_data)
            item = response.meta['data']
            req.meta['data'] = item
            yield req

    def parse_list_data(self, response):
        item = response.meta['data']
        for i in range(
                self._parse_data(response, '//*[@id="content-full"]/div[2]/div/div/div[2]/div[2]/ul/li/a').__len__()):
            title = self._parse_data(response, '//*[@id="content-full"]/div[2]/div/div/div[2]/div[2]/ul/li[' + str(
                i + 1) + ']/a/text()')
            if len(title) == 0:
                continue
            else:
                title = title[0].get()
            href = self._parse_data(response, '//*[@id="content-full"]/div[2]/div/div/div[2]/div[2]/ul/li[' + str(
                i + 1) + ']/a/@href')
            if len(href) == 0:
                continue
            else:
                href = href[0].get()
            time = self._parse_data(response, '//*[@id="content-full"]/div[2]/div/div/div[2]/div[2]/ul/li[' + str(
                i + 1) + ']/span/text()')
            if len(time) == 0:
                continue
            else:
                time = time[0].get()
            item['title'] = title
            item['href'] = href
            item['time'] = time
            yield item

    def _parse_data(self, response, xpath):
        return response.xpath(xpath)

    def _get_page_num(self, response):
        """
        页码数
        :param response:
        :return: 当前页,总页码
        """
        return response.xpath('//*[@id="page"]').re('(\\d+)/(\\d+)页')

    def _get_demo_page_url(self, response):
        response.xpath('//*[@id="page"]/a[13]/@href').re('list-(\\d+)')[0]

【item】

import scrapy
class AhItem(scrapy.Item):
    big_title = scrapy.Field()
    big_title_url = scrapy.Field()
    title = scrapy.Field()
    href = scrapy.Field()
    time = scrapy.Field()

【pipelines.py】

import json


class Test1Pipeline:
    # def process_item(self, item, spider):
    #     print("############# [ 项目管道处理: %s ] #############" % item)
    #     return item



    def __init__(self):
        self.file = open('teacher.json', 'wb')

    def process_item(self, item, spider):
        content = json.dumps(dict(item), ensure_ascii=False) + "\n"
        self.file.write(content.encode('utf-8'))
        return item

    def close_spider(self, spider):
        self.file.close()
posted @ 2022-10-03 16:18  黄河大道东  阅读(112)  评论(0编辑  收藏  举报