Fork me on GitHub

scrapy+selenium+chromedriver解析动态渲染页面

背景:动态页面是页面是通过js代码渲染出来的,无法直接使用scrapy爬虫,这是就需要先把js代码转为静态的html,再用scrapy爬虫就可以解决

解决办法:增加SeleniumMiddlewares中间件

代码: 

class SeleniumMiddleware(object):
def __init__(self,timeout=25):
chrome_options = Options()
prefs = {
'profile.default_content_setting_values': {
'images': 2, # 禁用图片的加载
'javascript': 2 # 禁用js,可能会导致通过js加载的互动数抓取失效
}
}
chrome_options.add_experimental_option("prefs", prefs)
self.browser = webdriver.Chrome(executable_path="C:\Program Files (x86)\Google\Chrome\Application\chromedriver",chrome_options=chrome_options)
self.timeout = timeout
self.browser.maximize_window()
# self.browser.implicitly_wait(20)
# self.browser.set_page_load_timeout(25)
self.browser.set_page_load_timeout(self.timeout)
self.wait = WebDriverWait(self.browser, self.timeout)

def __del__(self):
self.browser.close()

def process_request(self, request, spider):
"""
用ChromeDriver抓取页面
:param request: Request对象
:param spider: Spider对象
:return: HtmlResponse
"""
logging.info('******ChromeDriver is Starting******')
try:
self.browser.get(request.url)
self.wait.until(EC.presence_of_element_located((By.XPATH, '//div[@class="s-result-list sg-row"]')))
time.sleep(2)
return HtmlResponse(url=request.url, body=self.browser.page_source, request=request, encoding='utf-8',
status=200)
except TimeoutException:
return HtmlResponse(url=request.url, status=500, request=request)

在setting文件中增加如下配置:
DOWNLOADER_MIDDLEWARES = {
# 'amazon.middlewares.AmazonDownloaderMiddleware': 543,
'amazon.custom_rewrite.SeleniumMiddlewares.SeleniumMiddleware': 541, #自定义selenium中间件
}
posted @ 2019-03-01 14:46  猿起缘灭  阅读(3009)  评论(0编辑  收藏  举报