spider爬取拉勾网

兴奋的开发除了爬取拉勾网的爬虫信息,可是当调试都成功了的那一刻,我被拉钩封IP了。

下面是spider的主要内容

import re
import scrapy
from bs4 import BeautifulSoup
from scrapy.http import Request
from lagoupy.items import LagoupyItem

class Myspider(scrapy.Spider):

name = 'lagoupy'
allowed_domains = ['www.lagou.com']
bash_url = 'https://www.lagou.com/zhaopin/Python/'
bashurl = '/?filterOption=3'

def start_requests(self):
for i in range(1, 31):
url = self.bash_url + str(i) + self.bashurl
yield Request(url, self.parse)

def parse(self, response):
urllist=BeautifulSoup(response.text, 'lxml').find_all('div',class_='p_top')
for plink in urllist:
urlp=plink.find('a')['href'].replace('//','')
jobidd = urlp.replace('www.lagou.com/jobs/','')
jobidd = jobidd.replace('.html', '')
yield Request('https://'+urlp, callback=self.get_shuju, meta={'jobid': jobidd})


def get_shuju(self, response):
item = LagoupyItem()
item['jobid'] =response.meta['jobid']
item['yaoqiu']=BeautifulSoup(response.text, 'lxml').find('dd',class_='job_bt').get_text()

return item

posted on 2017-03-06 11:15  耀扬  阅读(164)  评论(0编辑  收藏  举报

导航