爬虫笔记二

一、BeautifulSoup模块

  BeautifulSoup模块,该模块用于接收一个HTML或XML字符串,然后将其进行格式化,之后便可以使用它提供的方法进行快速查找指定元素,从而使得在HTML或XML中查找指定元素变得简单。

  安装:

pip3 install beautifulsoup4
pip3 install lxml

  使用示例:

from bs4 import BeautifulSoup

html_doc = """
<html><head><title>The Dormouse's story</title></head>
<body>
asdf
    <div class="title">
        <b>The Dormouse's story总共</b>
        <h1>f</h1>
    </div>
<div class="story">Once upon a time there were three little sisters; and their names were
    <a  class="sister0" id="link1">Els<span>f</span>ie</a>,
    <a href="http://example.com/lacie" class="sister" id="link2">Lacie</a> and
    <a href="http://example.com/tillie" class="sister" id="link3">Tillie</a>;
and they lived at the bottom of a well.</div>
ad<br/>sf
<p class="story">...</p>
</body>
</html>
"""
# soup = BeautifulSoup(html_doc,'html.parser')  # 内置的parser
soup = BeautifulSoup(html_doc, features="lxml")

# 找到第一个a标签
tag1 = soup.find(name='a')

# 找到所有的a标签
tag2 = soup.find_all(name='a')

# 找到id=link2的标签 类似css选择器
tag3 = soup.select('#link2')
View Code

  1. name 标签名称

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('a')
name = tag.name # 获取标签名称
print(name)

tag.name = 'span' # 把soup中的a标签改成span标签
print(soup)

  2. attr,标签属性

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('a')
attrs = tag.attrs    # 获取第一个a标签中含有的属性
print(attrs)

tag.attrs = {'ik':123} # 给找到的第一个a标签设置属性
tag.attrs['id'] = 'iiiii' # 给找到的第一个a标签设置属性
print(soup)

  3.children,所有子标签

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')
v = body.children

  4. children 所有的子子孙孙标签

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')  # 先找到body标签
v = body.descendants       # 找到body标签里所有的子孙标签

# v是一个生成器,需要循环遍历出来
for i in v:
    print(i,end=' ')

  5. clear,将标签的所有子标签全部清空(保留外层标签名)

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('body')
tag.clear()
print(soup)

  6. decompose,递归的删除所有的标签(包括外层标签)

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')
body.decompose()
print(soup)

  7. extract,递归的删除所有的标签,并获取删除的标签

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')
v = body.extract()
print(soup)

  8. decode,转换为字符串(含当前标签);decode_contents(不含当前标签)

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')
# v = body.decode()
v = body.decode_contents()
print(v,type(v))

  9. encode,转换为字节(含当前标签);encode_contents(不含当前标签) 

soup = BeautifulSoup(html_doc, features="lxml")
body = soup.find('body')
# v = body.encode()
v = body.encode_contents()
print(v,type(v))

  10. find,获取匹配的第一个标签

soup = BeautifulSoup(html_doc, features="lxml")
# 找到第一个a标签
tag = soup.find('a')
print(tag)

# 找到属性中有class="sister" 文本内容为Lacie 符合要求的第一个a标签
tag = soup.find(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
# tag = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
print(tag)

  11. find_all,获取匹配的所有标签

soup = BeautifulSoup(html_doc, features="lxml")
# 找到所有a标签
tags = soup.find_all('a')
print(tags)

# 找到所有的a标签后取第一个
tags = soup.find_all('a',limit=1)
print(tags)

# 找到所有属性class="sister" 文本内容为Lacie 的a标签
tags = soup.find_all(name='a', attrs={'class': 'sister'}, recursive=True, text='Lacie')
# tags = soup.find(name='a', class_='sister', recursive=True, text='Lacie')
print(tags)


# ####### 列表 #######
v = soup.find_all(name=['a','div'])
print(v)

v = soup.find_all(class_=['sister0', 'sister'])
print(v)

v = soup.find_all(text=['Tillie'])
print(v, type(v[0]))


v = soup.find_all(id=['link1','link2'])
print(v)

v = soup.find_all(href=['link1','link2'])
print(v)

# ####### 正则 #######
import re
# rep = re.compile('p')
rep = re.compile('^p')
v = soup.find_all(name=rep)
print(v)

rep = re.compile('sister.*')
v = soup.find_all(class_=rep)
print(v)

rep = re.compile('http://www.oldboy.com/static/.*')
v = soup.find_all(href=rep)
print(v)

# ####### 方法筛选 #######
def func(tag):
    return tag.has_attr('class') and tag.has_attr('id')
v = soup.find_all(name=func)
print(v)


## get,获取标签属性
tag = soup.find('a')
v = tag.get('id')
print(v)

  12. has_attr,检查标签是否具有该属性

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('a')
v = tag.has_attr('id')
print(v)

  13. get_text,获取标签内部文本内容

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('a')
v = tag.get_text()
print(v)

  14. index,检查标签在某标签中的索引位置

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('body')
v = tag.index(tag.find('div'))
print(v)

tag = soup.find('body')
for i,v in enumerate(tag):
    print(i,v)

  15. is_empty_element,是否是空标签(是否可以是空)或者自闭合标签,判断是否是如下标签:'br' , 'hr', 'input', 'img', 'meta','spacer', 'link', 'frame', 'base'

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('br')
v = tag.is_empty_element
print(v)

  16. 当前的关联标签

soup = BeautifulSoup(html_doc, features="lxml")
# soup.next
# soup.next_element
# soup.next_elements
# soup.next_sibling
# soup.next_siblings

#
# tag.previous
# tag.previous_element
# tag.previous_elements
# tag.previous_sibling
# tag.previous_siblings

#
# tag.parent
# tag.parents

  17. 查找某标签的关联标签

# tag.find_next(...)
# tag.find_all_next(...)
# tag.find_next_sibling(...)
# tag.find_next_siblings(...)
 
# tag.find_previous(...)
# tag.find_all_previous(...)
# tag.find_previous_sibling(...)
# tag.find_previous_siblings(...)
 
# tag.find_parent(...)
# tag.find_parents(...)
 
# 参数同find_all

  18. select,select_one, CSS选择器

soup.select("title")
 
soup.select("p nth-of-type(3)")
 
soup.select("body a")
 
soup.select("html head title")
 
tag = soup.select("span,a")
 
soup.select("head > title")
 
soup.select("p > a")
 
soup.select("p > a:nth-of-type(2)")
 
soup.select("p > #link1")
 
soup.select("body > a")
 
soup.select("#link1 ~ .sister")
 
soup.select("#link1 + .sister")
 
soup.select(".sister")
 
soup.select("[class~=sister]")
 
soup.select("#link1")
 
soup.select("a#link2")
 
soup.select('a[href]')
 
soup.select('a[href="http://example.com/elsie"]')
 
soup.select('a[href^="http://example.com/"]')
 
soup.select('a[href$="tillie"]')
 
soup.select('a[href*=".com/el"]')
 
 
from bs4.element import Tag
 
def default_candidate_generator(tag):
    for child in tag.descendants:
        if not isinstance(child, Tag):
            continue
        if not child.has_attr('href'):
            continue
        yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator)
print(type(tags), tags)
 
from bs4.element import Tag
def default_candidate_generator(tag):
    for child in tag.descendants:
        if not isinstance(child, Tag):
            continue
        if not child.has_attr('href'):
            continue
        yield child
 
tags = soup.find('body').select("a", _candidate_generator=default_candidate_generator, limit=1)
print(type(tags), tags)

  19. 标签的内容

# tag = soup.find('span')
# print(tag.string)          # 获取
# tag.string = 'new content' # 设置
# print(soup)
 
# tag = soup.find('body')
# print(tag.string)
# tag.string = 'xxx'
# print(soup)
 
# tag = soup.find('body')
# v = tag.stripped_strings  # 递归内部获取所有标签的文本
# print(v)

  20.append在当前标签内部追加一个标签

soup = BeautifulSoup(html_doc, features="lxml")
tag = soup.find('body')
tag.append(soup.find('a'))
print(soup)

from bs4.element import Tag
obj = Tag(name='i',attrs={'id': 'it'})
obj.string = '我是一个新来的'
tag = soup.find('body')
tag.append(obj)
print(soup)

  21.insert在当前标签内部指定位置插入一个标签

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('body')
# tag.insert(2, obj)
# print(soup)

  22. insert_after,insert_before 在当前标签后面或前面插入

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('body')
# # tag.insert_before(obj)
# tag.insert_after(obj)
# print(soup)

  23. replace_with 在当前标签替换为指定标签

# from bs4.element import Tag
# obj = Tag(name='i', attrs={'id': 'it'})
# obj.string = '我是一个新来的'
# tag = soup.find('div')
# tag.replace_with(obj)
# print(soup)

  24. 创建标签之间的关系

# tag = soup.find('div')
# a = soup.find('a')
# tag.setup(previous_sibling=a)
# print(tag.previous_sibling)

  25. wrap,将指定标签把当前标签包裹起来

# from bs4.element import Tag
# obj1 = Tag(name='div', attrs={'id': 'it'})
# obj1.string = '我是一个新来的'
#
# tag = soup.find('a')
# v = tag.wrap(obj1)
# print(soup)
 
# tag = soup.find('a')
# v = tag.wrap(soup.find('p'))
# print(soup)

  26. unwrap,去掉当前标签,将保留其包裹的标签

# tag = soup.find('a')
# v = tag.unwrap()
# print(soup) 

 转载:武沛齐

 二、爬虫性能相关

详细:性能相关

import socket
import select

class Request(object):
    def __init__(self, sock, info):
        self.sock = sock
        self.info = info
    # 调用sock内部的fileno方法
    def fileno(self):
        return self.sock.fileno()

class AsyncRequest(object):
    def __init__(self):
        self.sock_list = []
        self.conns = []

    def add_request(self, req_info):
        """
        创建请求
         req_info: {'host': 'www.baidu.com', 'port': 80, 'path': '/'},
        :return:
        """
        sock = socket.socket()
        sock.setblocking(False)
        try:
            sock.connect((req_info['host'], req_info['port']))
        except BlockingIOError as e:
            pass

        obj = Request(sock, req_info)
        self.sock_list.append(obj)
        self.conns.append(obj)

    def run(self):
        """
        开始事件循环,检测:连接成功?数据是否返回?
        :return:
        """
        while True:
            # select.select([socket对象,])
            # 可是任何对象,对象一定要fileno方法
            # 对象.fileno()
            # select.select([request对象,])
            rlist, wlist, elist = select.select(self.sock_list, self.conns, [], 0.05)

            # wlist,是否连接成功
            for obj in wlist:
                # 检查obj:request对象
                # socket, {'host': 'www.baidu.com', 'port': 80, 'path': '/'},
                data = "GET %s http/1.1\r\nhost:%s\r\n\r\n" % (obj.info['path'], obj.info['host'])
                obj.sock.send(data.encode('utf-8'))
                self.conns.remove(obj)

            # rlist 数据返回,接收到数据
            for obj in rlist:
                response = obj.sock.recv(8096)
                obj.info['callback'](response)
                self.sock_list.remove(obj)

            # self.sock_list为空时,表明所有请求已经返回
            if not self.sock_list:
                break

# 相应网址定制的回调函数
def done1(response):
    print(response)

# 相应网址定制的回调函数
def done2(response):
    print(response)

url_list = [
    {'host': 'www.baidu.com', 'port': 80, 'path': '/', 'callback': done1},
    {'host': 'www.cnblogs.com', 'port': 80, 'path': '/index.html', 'callback': done2},
    {'host': 'www.bing.com', 'port': 80, 'path': '/', 'callback': done2},
]

asyncRequest = AsyncRequest()
for item in url_list:
    asyncRequest.add_request(item)

asyncRequest.run()
自定义异步非阻塞模块
import socket
import select

############### HTTP请求本质 阻塞

"""
sk = socket.socket()
# 连接
sk.connect(('www.baidu.com',80))    # IO阻塞
print('连接成功')

# 发送消息
sk.send(b'GET / HTTP/1.0\r\nHost:www.baidu.com\r\n\r\n')

# 等待服务端响应
data = sk.recv(8096)    # IO阻塞
print(data)

#关闭连接
sk.close()
"""

############### HTTP请求本质 非阻塞


# sk = socket.socket()
# sk.setblocking(False)
#
# try:
#     # 连接
#     sk.connect(('www.baidu.com',80))    # IO阻塞
#     print('连接成功')
# except BlockingIOError as e:
#     print(e)
#
# # 发送消息
# sk.send(b'GET / HTTP/1.0\r\nHost:www.baidu.com\r\n\r\n')
#
# # 等待服务端响应
# data = sk.recv(8096)    # IO阻塞
# print(data)
#
# #关闭连接
# sk.close()

#########################
Http请求本质 阻塞与非阻塞
url_list = [
    'http://www.cnblogs.com/wupeiqi/articles/6229292.html',
    'http://www.baidu.com',
    'http://www.xiaohuar.com',
]
import requests
# 1. 穿行(6s,1个)
# for url in url_list:
#     response = requests.get(url)
#     print(response.content)

# 2. 线程,进程(3s,三个)
# from concurrent.futures import ThreadPoolExecutor,ProcessPoolExecutor
#
# def tast(url):
#     response = requests.get(url)
#     print(response.content)
#
# pool = ThreadPoolExecutor(10)
# # pool = ProcessPoolExecutor(10)
# for url in url_list:
#     # requests.get(url)
#     pool.submit(tast,url) # 线程池中获取一个线程,执行task函数
# pool.shutdown(wait=True)

# 3. 异步非阻塞
#   【异步】,回调(请求发出去后就不管了,然后去干别的事情,等执行完后回调函数告诉我一下,)
#   【非阻塞】,不等,socket,
#                       阻塞: client = socket();client.connet(ip,端口)
#                       非阻塞:client = socket(); client.setblocking(False); client.connet(ip,端口) 
#   【协程】和异步非阻塞没有关系,协程是人为的先控制这执行一会儿然后再那儿执行一会儿
#连接;发送数据;接受数据


from twisted.web.client import getPage, defer
from twisted.internet import reactor

def all_done(arg):
    reactor.stop()

def callback(contents):
    print(contents)

deferred_list = []

url_list = ['http://www.bing.com', 'http://www.baidu.com', ]
for url in url_list:
    deferred = getPage(bytes(url, encoding='utf8')) # requests
    deferred.addCallback(callback)
    deferred_list.append(deferred)

dlist = defer.DeferredList(deferred_list)
dlist.addCallback(all_done)

reactor.run()
异步非阻塞与twisted示例

 

posted @ 2017-12-02 11:44  _慕  阅读(250)  评论(0编辑  收藏  举报
Title
返回顶部