python之scrapy爬取某集团招聘信息以及招聘详情

1、定义爬取的字段items.py

# -*- coding: utf-8 -*-

# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html

import scrapy
class GosuncnItem(scrapy.Item):
    """
    定义爬虫的字段
    """
    # define the fields for your item here like:
    # name = scrapy.Field()
    platform = scrapy.Field()
    position = scrapy.Field()
    num = scrapy.Field()
    time = scrapy.Field()
    url = scrapy.Field()
    content = scrapy.Field()
    responsible = scrapy.Field()
    page = scrapy.Field()
    pass
View Code

2、配置设置settings.py

# -*- coding: utf-8 -*-

# Scrapy settings for gosuncn project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
#     https://doc.scrapy.org/en/latest/topics/settings.html
#     https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#     https://doc.scrapy.org/en/latest/topics/spider-middleware.html

BOT_NAME = 'gosuncn'

SPIDER_MODULES = ['gosuncn.spiders']
NEWSPIDER_MODULE = 'gosuncn.spiders'

LOG_LEVEL="WARNING"
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'gosuncn (+http://www.yourdomain.com)'

# Obey robots.txt rules
ROBOTSTXT_OBEY = True

# Configure maximum concurrent requests performed by Scrapy (default: 16)
#CONCURRENT_REQUESTS = 32

# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
#DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
#CONCURRENT_REQUESTS_PER_DOMAIN = 16
#CONCURRENT_REQUESTS_PER_IP = 16

# Disable cookies (enabled by default)
#COOKIES_ENABLED = False

# Disable Telnet Console (enabled by default)
#TELNETCONSOLE_ENABLED = False

# Override the default request headers:
#DEFAULT_REQUEST_HEADERS = {
#   'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
#   'Accept-Language': 'en',
#}

# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
#SPIDER_MIDDLEWARES = {
#    'gosuncn.middlewares.GosuncnSpiderMiddleware': 543,
#}

# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
#DOWNLOADER_MIDDLEWARES = {
#    'gosuncn.middlewares.GosuncnDownloaderMiddleware': 543,
#}

# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
#EXTENSIONS = {
#    'scrapy.extensions.telnet.TelnetConsole': None,
#}

# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
ITEM_PIPELINES = {
   'gosuncn.pipelines.GosuncnPipeline': 300,
}
LOG_LEVEL ="WARNING"
LOG_FILE = "./log.log"
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
#AUTOTHROTTLE_ENABLED = True
# The initial download delay
#AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
#AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
# Enable showing throttling stats for every response received:
#AUTOTHROTTLE_DEBUG = False

# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
#HTTPCACHE_ENABLED = True
#HTTPCACHE_EXPIRATION_SECS = 0
#HTTPCACHE_DIR = 'httpcache'
#HTTPCACHE_IGNORE_HTTP_CODES = []
#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
View Code

3、爬取某集团的招聘信息,注意:items的字段必须和该文件爬取的字段一直,否则报错

# -*- coding: utf-8 -*-
import scrapy
import logging
from gosuncn.items import GosuncnItem
logger = logging.getLogger(__name__)
#引入日志
class GaoxinxingSpider(scrapy.Spider):
    name = 'gaoxinxing'
    allowed_domains = ['gosuncn.zhiye.com']
    start_urls = ['http://gosuncn.zhiye.com/Social']
    next_page_num = 1
    def parse(self, response):
        tr_list = response.xpath("//table[@class='jobsTable']/tr")[1:]
        #print(tr_list)
        for tr in tr_list:
            item = GosuncnItem()
            item["position"]=tr.xpath(".//td[1]/a/text()").extract_first()
            item["url"] = "http://gosuncn.zhiye.com"+tr.xpath(".//td[1]/a/@href").extract_first()
            item["platform"] = tr.xpath(".//td[3]/text()").extract_first()
            item["num"] = tr.xpath(".//td[4]/text()").extract_first()
            item["time"] = tr.xpath(".//td[6]/text()").extract_first()
            item["page"]= self.next_page_num
            #print(item)
            #logger.warning(item) #打印日志
            #yield item
        ################爬取详情页######################
            yield scrapy.Request(
                item["url"],
                callback=self.url_parse,  # 不能打括号,否则是调用了
                meta = {"item":item}# 将数据传递给url_parse()
            )

        ##############翻页爬取###############################
        next_page_url = response.xpath("//div[@class='pager2']//a[@class='next']/@href").extract_first()
        print(next_page_url)
        self.next_page_num = self.next_page_num+1
        if self.next_page_num<5:
            next_url = "http://gosuncn.zhiye.com/social/?PageIndex=" + str(self.next_page_num)
            #print(next_url)
            yield scrapy.Request(
                next_url,
                callback=self.parse  #不能打括号,否则是调用了
            )

    def url_parse(self,response):
        """
        爬取详情页
        :param response:
        :return:
        """
        item = response.meta["item"]
        item["content"] = response.xpath("//div[@class='xiangqingcontain']/ul[@class='xiangqinglist clearfix']/li[6]/text()").extract()
        #item["responsible"] = response.xpath("//div[@class='xiangqingcontain']/div[@class='xiangqingtext']/p[2]/text()").extract()
        logger.warning(item)  # 打印日志
        #print(item)
        yield item
View Code

4、在pipelines处理传递过来的数据

# -*- coding: utf-8 -*-

# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import  re
from gosuncn.items import GosuncnItem
class GosuncnPipeline(object):
    def process_item(self, item, spider):
        if isinstance(item,GosuncnItem):
            item["content"] = self.process_content(item["content"])
            print(item)
        return item

    def process_content(self,content):
        content =[re.sub(r"\r\n|' '","",i) for i in content]
        content = [i for i in content if len(i)>0]
        return content
# class GosuncnPipeline1(object):
#     def process_item(self, item, spider):
#         if isinstance(item,GosuncnItem):
#             print(item)
#         return item
View Code

 

posted @ 2019-06-25 10:55  小白啊小白,Fighting  阅读(342)  评论(0编辑  收藏  举报