网易新闻爬取
项目目录
创建项目
scrapy startproject wangyiPro
进入项目
cd wangyiPro
创建项目文件
scrapy genspider wangyi www.xxx.com
创建启动文件
start.py
from scrapy import cmdline cmdline.execute("scrapy crawl wangyi".split())
配置文件
setting.py

# -*- coding: utf-8 -*- # Scrapy settings for wangyiPro project # # For simplicity, this file contains only settings considered important or # commonly used. You can find more settings consulting the documentation: # # https://doc.scrapy.org/en/latest/topics/settings.html # https://doc.scrapy.org/en/latest/topics/downloader-middleware.html # https://doc.scrapy.org/en/latest/topics/spider-middleware.html BOT_NAME = 'wangyiPro' SPIDER_MODULES = ['wangyiPro.spiders'] NEWSPIDER_MODULE = 'wangyiPro.spiders' # Crawl responsibly by identifying yourself (and your website) on the user-agent USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36" LOG_LEVEL = "ERROR" # Obey robots.txt rules ROBOTSTXT_OBEY = False # Configure maximum concurrent requests performed by Scrapy (default: 16) #CONCURRENT_REQUESTS = 32 # Configure a delay for requests for the same website (default: 0) # See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay # See also autothrottle settings and docs #DOWNLOAD_DELAY = 3 # The download delay setting will honor only one of: #CONCURRENT_REQUESTS_PER_DOMAIN = 16 #CONCURRENT_REQUESTS_PER_IP = 16 # Disable cookies (enabled by default) #COOKIES_ENABLED = False # Disable Telnet Console (enabled by default) #TELNETCONSOLE_ENABLED = False # Override the default request headers: #DEFAULT_REQUEST_HEADERS = { # 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', # 'Accept-Language': 'en', #} # Enable or disable spider middlewares # See https://doc.scrapy.org/en/latest/topics/spider-middleware.html #SPIDER_MIDDLEWARES = { # 'wangyiPro.middlewares.WangyiproSpiderMiddleware': 543, #} # Enable or disable downloader middlewares # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html DOWNLOADER_MIDDLEWARES = { 'wangyiPro.middlewares.WangyiproDownloaderMiddleware': 543, } # Enable or disable extensions # See https://doc.scrapy.org/en/latest/topics/extensions.html #EXTENSIONS = { # 'scrapy.extensions.telnet.TelnetConsole': None, #} # Configure item pipelines # See https://doc.scrapy.org/en/latest/topics/item-pipeline.html ITEM_PIPELINES = { 'wangyiPro.pipelines.WangyiproPipeline': 300, 'wangyiPro.pipelines.MysqlPipeline': 301, } # Enable and configure the AutoThrottle extension (disabled by default) # See https://doc.scrapy.org/en/latest/topics/autothrottle.html #AUTOTHROTTLE_ENABLED = True # The initial download delay #AUTOTHROTTLE_START_DELAY = 5 # The maximum download delay to be set in case of high latencies #AUTOTHROTTLE_MAX_DELAY = 60 # The average number of requests Scrapy should be sending in parallel to # each remote server #AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0 # Enable showing throttling stats for every response received: #AUTOTHROTTLE_DEBUG = False # Enable and configure HTTP caching (disabled by default) # See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings #HTTPCACHE_ENABLED = True #HTTPCACHE_EXPIRATION_SECS = 0 #HTTPCACHE_DIR = 'httpcache' #HTTPCACHE_IGNORE_HTTP_CODES = [] #HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
wangyi.py
# -*- coding: utf-8 -*- import scrapy from selenium import webdriver from wangyiPro.items import WangyiproItem class WangyiSpider(scrapy.Spider): name = 'wangyi' # allowed_domains = ['www.xx.com'] start_urls = ['https://news.163.com/'] bro = webdriver.Chrome(executable_path='chromedriver.exe') urls = [] def parse(self, response): li_list = response.xpath('//*[@id="index2016_wrap"]/div[1]/div[2]/div[2]/div[2]/div[2]/div/ul/li') for index in [3,4,6,7,8]: li = li_list[index] new_url = li.xpath('./a/@href').extract_first() self.urls.append(new_url) yield scrapy.Request(url=new_url,callback=self.parse_news) def parse_news(self,response): div_list = response.xpath('//div[@class="ndi_main"]/div') for div in div_list: title = div.xpath('./div/div[1]/h3/a/text()').extract_first() news_detail_url = div.xpath('./div/div[1]/h3/a/@href').extract_first() item = WangyiproItem() item['title'] = title yield scrapy.Request(url=news_detail_url,callback=self.parse_detail,meta={'item':item}) def parse_detail(self,response): item = response.meta['item'] content = response.xpath('//div[@id="endText"]//text()').extract() content = "".join(content) item['content'] = content yield item def closed(self, spider): print("爬取完毕") self.bro.quit()
items.py
import scrapy class WangyiproItem(scrapy.Item): title = scrapy.Field() content = scrapy.Field()
middlewares.py
from time import sleep from scrapy.http import HtmlResponse class WangyiproDownloaderMiddleware(object): def process_request(self, request, spider): return None def process_response(self, request, response, spider): if request.url in spider.urls: bro = spider.bro bro.get(request.url) try: bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) bro.execute_script('window.scrollTo(0,document.body.scrollHeight)') sleep(1) except Exception as e: pass page_text = bro.page_source new_response = HtmlResponse(url=request.url,body=page_text,encoding='utf-8',request=request) return new_response else: return response def process_exception(self, request, exception, spider): pass
pipelines.py
import pymysql class WangyiproPipeline(object): def process_item(self, item, spider): return item class MysqlPipeline(object): conn = None cursor = None def open_spider(self,spider): self.conn = pymysql.Connect(host='127.0.0.1',port=3306,user = 'root',password= '',db='db',charset = 'utf8') print(self.conn) def process_item(self, item, spider): self.cursor = self.conn.cursor() try: self.cursor.execute('insert into wy(title,content) values ("%s","%s")'%(item['title'],item['content'])) print("ok") self.conn.commit() except Exception as e: self.conn.rollback() return item def close_spider(self,spider): self.cursor.close() self.conn.close()