day96 scrapy

1.scrapy安装   ubuntu下安装 

1.先安装pip
    
            sudo apt install python3-pip

2.安装依耐
    
            sudo apt install python-dev

3.安装非Python的依赖 sudo apt-get install python-dev python-pip libxml2-dev libxslt1-dev zlib1g-dev libffi-dev libssl-dev

4.用pip安装scrapy
            
            sudo pip install scrapy

 2.创建爬虫项目 

scrapy startproject mySpider

3.在当前目录下输入命令,将在mySpider/spider目录下创建一个名为itcast的爬虫,并指定爬取域的

scrapy genspider itcast 'itcast.cn'

4.items 创建类 主要用来存数据的 有点像mode里面的orm

 1 import scrapy
 2 
 3 
 4 class MyspiderItem(scrapy.Item):
 5     # define the fields for your item here like:
 6     # name = scrapy.Field()
 7 
 8     name = scrapy.Field()
 9     title = scrapy.Field()
10     info= scrapy.Field()

5.spiders 文件里面刚才创建的爬虫

# -*- coding: utf-8 -*-
import scrapy
from mySpider.items import MyspiderItem


class ChuanzhiSpider(scrapy.Spider):
    name = 'chuanzhi'
    allowed_domains = ['itcast.cn']
    start_urls = ['http://www.itcast.cn/channel/teacher.shtml#apython']

    def parse(self, response):
        # with open("teacher.html", "w") as f:
        #    f.write(response.body)
        # 通过scrapy自带的xpath匹配出所有老师的根节点列表集合
        teacher_list = response.xpath('//div[@class="li_txt"]')

        # 所有老师信息的列表集合
        teacherItem = []
        # 遍历根节点集合
        for each in teacher_list:
            # Item对象用来保存数据的
            item = MyspiderItem()
            # name, extract() 将匹配出来的结果转换为Unicode字符串
            # 不加extract() 结果为xpath匹配对象
            #extract_first 直接提取第一个不用加下标取值
            name = each.xpath('./h3/text()').extract_first()
            # title
            title = each.xpath('./h4/text()').extract()
            # info
            info = each.xpath('./p/text()').extract()

            item['name'] = name
            # item['title'] = title[0]
            # item['info'] = info[0]

            # item['name'] = name[0]
            # item['title'] = title[0]
            # item['info'] = info[0]

            #yield item


            teacherItem.append(item)

            # print name[0]
            # print title[0]
            # print info[0]

        return teacherItem

注意:运行用命令 scrapy  crawl  itcast   --nolog 参数--nolog不显示执行中的其它信息 可以不加    导出的命令  scrapy crawl itcast -o test.json/test.csv  

实例:

  items文件

1 import scrapy
2 
3 class cqitItme(scrapy.Item):
4     pageText = scrapy.Field()
5     hrefText = scrapy.Field()

爬虫文件

 1 import scrapy
 2 from scrapy.selector import Selector, XmlXPathSelector
 3 from scrapy.http import Request
 4 from ..items import cqitItme
 5 
 6 
 7 class CqitSpider(scrapy.Spider):
 8     name = 'cqit'
 9     allowed_domains = ['cqsq.com'] #注意这个域名范围  不能写成 'www.cqsq.com' 或者'cqsq.com/'
10     start_urls = ['https://www.cqsq.com/list/7']
11     set_href = []
12 
13     def parse(self, response):
14         a = Selector(response=response).xpath("//div[@class= 'pages']/a")
15   #实例化items里面创建的类 以便下面赋值并返回数据到pipelines文件中去做数据处理
16         itme = cqitItme()
17         for i in a:
18 
19             i1 = i.xpath('.//@href').extract_first()
20             itext = i.xpath('.//text()').extract_first()
21 
22             if i1 in self.set_href or i1 == "//www.cqsq.com/list/7/50":
23                 pass
24             else:
25 
26                 self.set_href.append(i1)
27                 url1 = 'https:' + i1
28                 itme['pageText'] =itext
29                 itme['hrefText'] =url1
30 
31                 print(url1)
32           #这里是返回实例数据并提交到pipelines文件中去处理
33                 yield itme
34                 #发送新的url到控制的阵列里去等去等待下次扏行  注意dont_filter去重参数 true是代表不去重 默认为false
35                 yield Request(url=url1, callback=self.parse, dont_filter=True)

 

 

pipelines文件

1 class dyspiderPipeline(object):  #注意在settings文件中去设置该类的权重
2 
3     def __init__(self):
4         self.f = open('test1','w')
5     def process_item(self, item, spider):#这里的itm就是爬虫文件k yield回来的数据
6         import json
7         content = json.dumps(dict(item),ensure_ascii=False)+'\n'
8         self.f.write(content)
9         return item

 setting 配置文件 

 

ITEM_PIPELINES = {
   'mySpider.pipelines.MyspiderPipeline': 300,
   'mySpider.pipelines.dyspiderPipeline': 300, #PIPELINENS 注册的位置 300代表的是权重
}

DEPTH_LIMIT = 2 #深度为2 如果为0的话就是代表没有深度
DUPEFILTER_CLASS = "mySpider.spiders.repeatfilter.Repeat" #自定义去重的路径

 自定义去重 重写yield Rquest(url = url ,callback = self.parse ,dont_filter = False ) #false 代表的就是去重 默认调用的是  #from scrapy.dupefilters import RFPDupeFilter   

重构代码: 新建一个repeatfilter 的python文件  导入方法看上面setting配置文件里面的设置

 1 class Repeat(object):
 2     def __init__(self):
       '''第二步执行 构造函数'''
3 self.visited_set = [] 4 @classmethod 5 def from_settings(cls, settings):
      '''这里是第一步执行 相当于是obj = Repeat.from_settings'''
6 return cls() 7 8 def request_seen(self, request):
    '''第里是第四步执行 主要是判断罗辑的'''
9 10 if request.url in self.visited_set or request.url == "https://www.cqsq.com/list/7/50": 11 12 return True 13 else: 14 self.visited_set.append(request.url) 15 16 return False 17 18 19 20 def open(self): # can return deferred 21 # print('open') 第三步执行 22 pass 23 24 def close(self, reason): # can return a deferred 25 # print('close') 第5步执行 26 pass 27 def log(self, request, spider): # log that a request has been filtered 28 # print('log....') 最后执行的日志 29 pass

pipeline补充

 1 from scrapy.exceptions import DropItem
 2 
 3 class Day96Pipeline(object):
 4 
 5     def __init__(self,conn_str):
 6         '''第二步'''
 7         self.conn_str = conn_str
 8 
 9     @classmethod
10     def from_crawler(cls, crawler):
11         """
12         初始化时候,用于创建pipeline对象
13         :param crawler:
14         :return:
15         第一步
16         """
17         conn_str = crawler.settings.get('DB')
18         return cls(conn_str)
19 
20     def open_spider(self,spider):
21         """
22         第三步
23         爬虫开始执行时,调用
24         :param spider:
25         :return:
26         """
27         self.conn = open(self.conn_str, 'a')
28 
29     def close_spider(self,spider):
30         """
31         第五步
32         爬虫关闭时,被调用
33         :param spider:
34         :return:
35         """
36         self.conn.close()
37 
38     def process_item(self, item, spider):
39         """
40         第四步
41         每当数据需要持久化时,就会被调用
42         :param item:
43         :param spider:
44         :return:
45         """
46         # if spider.name == 'chouti'
47         tpl = "%s\n%s\n\n" %(item['title'],item['href'])
48         self.conn.write(tpl)
49         # 交给下一个pipeline处理
50         return item
51         # 丢弃item,不交给
52         # raise DropItem()
53 
54 class Day97Pipeline(object):
55 
56     def __init__(self,conn_str):
57         self.conn_str = conn_str
58 
59     @classmethod
60     def from_crawler(cls, crawler):
61         """
62         初始化时候,用于创建pipeline对象
63         :param crawler:
64         :return:
65         """
66         conn_str = crawler.settings.get('DB')
67         return cls(conn_str)
68 
69     def open_spider(self,spider):
70         """
71         爬虫开始执行时,调用
72         :param spider:
73         :return:
74         """
75         self.conn = open(self.conn_str, 'a')
76 
77     def close_spider(self,spider):
78         """
79         爬虫关闭时,被调用
80         :param spider:
81         :return:
82         """
83         self.conn.close()
84 
85     def process_item(self, item, spider):
86         """
87         每当数据需要持久化时,就会被调用
88         :param item:
89         :param spider:
90         :return:
91         """
92         # if spider.name == 'chouti'
93         tpl = "%s\n%s\n\n" %(item['title'],item['href'])
94         self.conn.write(tpl)
95 
96         #这几个方法的套路和上面自定义去重是一样的执行套路  注意process_itme里面return itme 代表交给下个权重执行 如果是return Dropitme() 则代表抛异常 不交给下个执行
 

 

posted @ 2018-05-17 14:59  战胜自已  阅读(147)  评论(0编辑  收藏  举报