python实训第八天

1、BeautifulSoup 解析库
2、MongoDB 存储库
3、requests-html 请求库

BeautifulSoup
1、什么bs4,为什么要使用bs4?
是一个基于re开发的解析库,可以提供一些强大的解析功能。
提高提取数据的效率与爬虫开发效率。

2、安装与使用
pip3 install beautifulsoup4 # 安装bs4
pip3 install lxml # 下载lxml解析器

MongoDB 非关系型数据库
一 安装与使用
1、下载安装
https://www.mongodb.com/download-center/community

2、在C盘创建一个data/db文件夹
- 数据的存放路径

3、mongod启动服务
进入终端,输入mongod启动mongoDB服务。

4、mongo进入mongoDB客户端
打开一个新的终端,输入mongo进入客户端

二 数据库操作

数据库操作:
切换库:
SQL:
use admin; 有则切换,无则报错。

MongoDB:
use tank; 有则切换,无则创建,并切换tank库中。

查数据库:
SQL:
show databases;

MongoDB:
show dbs;
显示的数据库若无数据,则不显示。

删除库:
SQL:
drop database

MongoDB:
db.dropDatabase()


集合操作: MySQL中叫做表。
创建集合:
SQL:
create table f1, f2...

MongoDB:
# 在当前库中通过.来创建集合
db.student

插入数据:
# 插入多条数据
db.student.insert([{"name1": "tank1"}, {"name2": "tank2"}])

# 插入一条
db.student.insert({"name": "tank"})


查数据:
# 查找student集合中所有数据
db.student.find({})

# 查一条 查找name为tank的记录
db.student.find({"name":"tank"})

三 python链接MongoDB
1、下载第三方模块pymongo
pip3 install pymongo

2、链接mongoDB客户端
client = MongoClient('localhost', 27017)

爬取豌豆荚:

'''
主页:
图标地址、下载次数、大小、详情页地址

详情页:
游戏名、图标名、好评率、评论数、小编点评、简介、网友评论、1-5张截图链接地址、下载地址
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

32
'''
import requests
from bs4 import BeautifulSoup
# 1、发送请求
def get_page(url):
response = requests.get(url)
return response

# 2、开始解析
# 解析主页
def parse_index(data):
soup = BeautifulSoup(data, 'lxml')

# 获取所有app的li标签
app_list = soup.find_all(name='li', attrs={"class": "card"})
for app in app_list:
# print('tank *' * 1000)
# print(app)
# 图标地址
img = app.find(name='img').attrs['data-original']
print(img)

# 下载次数
down_num = app.find(name='span', attrs={"class": "install-count"}).text
print(down_num)

import re
# 大小
size = soup.find(name='span', text=re.compile("\d+MB")).text
print(size)

# 详情页地址
detail_url = soup.find(name='a', attrs={"class": "detail-check-btn"}).attrs['href']
print(detail_url)


def main():
for line in range(1, 33):
url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

# 1、往app接口发送请求
response = get_page(url)
# print(response.text)
print('*' * 1000)
# 反序列化为字典
data = response.json()
# 获取接口中app标签数据
app_li = data['data']['content']
# print(app_li)
# 2、解析app标签数据
parse_index(app_li)


if __name__ == '__main__':
main()

爬取豌豆荚详情:
'''
主页:
图标地址、下载次数、大小、详情页地址

详情页:
游戏名、好评率、评论数、小编点评、下载地址、简介、网友评论、1-5张截图链接地址、
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

32
'''
import requests
from bs4 import BeautifulSoup
# 1、发送请求
def get_page(url):
response = requests.get(url)
return response

# 2、开始解析
# 解析详情页
def parse_detail(text):
soup = BeautifulSoup(text, 'lxml')
# print(soup)

# app名称
name = soup.find(name="span", attrs={"class": "title"}).text
# print(name)

# 好评率
love = soup.find(name='span', attrs={"class": "love"}).text
# print(love)

# 评论数
commit_num = soup.find(name='a', attrs={"class": "comment-open"}).text
# print(commit_num)

# 小编点评
commit_content = soup.find(name='div', attrs={"class": "con"}).text
# print(commit_content)

# app下载链接
download_url = soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']
# print(download_url)

print(
f'''
============= tank ==============
app名称:{name}
好评率: {love}
评论数: {commit_num}
小编点评: {commit_content}
app下载链接: {download_url}
============= end ==============
'''
)



# 解析主页
def parse_index(data):
soup = BeautifulSoup(data, 'lxml')

# 获取所有app的li标签
app_list = soup.find_all(name='li', attrs={"class": "card"})
for app in app_list:
# print(app)
# print('tank' * 1000)
# print('tank *' * 1000)
# print(app)
# 图标地址
# 获取第一个img标签中的data-original属性
img = app.find(name='img').attrs['data-original']
print(img)

# 下载次数
# 获取class为install-count的span标签中的文本
down_num = app.find(name='span', attrs={"class": "install-count"}).text
print(down_num)

import re
# 大小
# 根据文本正则获取到文本中包含 数字 + MB(\d+代表数字)的span标签中的文本
size = soup.find(name='span', text=re.compile("\d+MB")).text
print(size)

# 详情页地址
# 获取class为detail-check-btn的a标签中的href属性
# detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href']
# print(detail_url)

# 详情页地址
detail_url = app.find(name='a').attrs['href']
print(detail_url)

# 3、往app详情页发送请求
response = get_page(detail_url)

# 4、解析app详情页
parse_detail(response.text)


def main():
for line in range(1, 33):
url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

# 1、往app接口发送请求
response = get_page(url)
# print(response.text)
print('*' * 1000)
# 反序列化为字典
data = response.json()

# 获取接口中app标签数据
app_li = data['data']['content']
# print(app_li)
# 2、解析app标签数据
parse_index(app_li)


if __name__ == '__main__':
main()

''''
主页:
图标地址、下载次数、大小、详情页地址

详情页:
游戏名、好评率、评论数、小编点评、下载地址、简介、网友评论、1-5张截图链接地址、
https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=1&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=2&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page=3&ctoken=FRsWKgWBqMBZLdxLaK4iem9B

32
'''
import requests
from bs4 import BeautifulSoup
from pymongo import MongoClient
'''
3、把豌豆荚爬取的数据插入mongoDB中
- 创建一个wandoujia库
- 把主页的数据存放一个名为index集合中
- 把详情页的数据存放一个名为detail集合中
'''
# 连接MongoDB客户端
client = MongoClient('localhost', 27017)
# 创建或选择wandoujia库,index集合
index_col = client['wandoujia']['index']
# 创建或选择wandoujia库,detail集合
detail_col = client['wandoujia']['detail']

# 1、发送请求
def get_page(url):
response = requests.get(url)
return response


# 2、开始解析
# 解析详情页
def parse_detail(text):

soup = BeautifulSoup(text, 'lxml')
# print(soup)

# app名称
try:
name = soup.find(name="span", attrs={"class": "title"}).text
except Exception:
# 若有异常,设置为None
name = None
# print(name)

# 好评率
try:
love = soup.find(name='span', attrs={"class": "love"}).text

except Exception:
love = None
# print(love)

# 评论数
try:
commit_num = soup.find(name='a', attrs={"class": "comment-open"}).text
except Exception:
commit_num = None
# print(commit_num)

# 小编点评
try:
commit_content = soup.find(name='div', attrs={"class": "con"}).text
except Exception:
commit_content = None
# print(commit_content)

# app下载链接

try:
download_url = soup.find(name='a', attrs={"class": "normal-dl-btn"}).attrs['href']
except Exception:
# 若有异常,设置为None
download_url = None

# print(download_url)

# print(
# f'''
# ============= tank ==============
# app名称:{name}
# 好评率: {love}
# 评论数: {commit_num}
# 小编点评: {commit_content}
# app下载链接: {download_url}
# ============= end ==============
# '''
# )

# 判断所有数据都存在,正常赋值
if name and love and commit_num and commit_content and download_url :
detail_data = {
'name': name,
'love': love,
'commit_num': commit_num,
'commit_content': commit_content,
'download_url': download_url
}

# 若love没有值,则设置为 没人点赞,很惨
if not love:
detail_data = {
'name': name,
'love': "没人点赞,很惨",
'commit_num': commit_num,
'commit_content': commit_content,
'download_url': download_url
}
# 若download_url没有值,则设置为 没有安装包
if not download_url:
detail_data = {
'name': name,
'love': love,
'commit_num': commit_num,
'commit_content': commit_content,
'download_url': '没有安装包'
}



# 插入详情页数据
detail_col.insert(detail_data)
print(f'{name}app数据插入成功!')

# 解析主页
def parse_index(data):
soup = BeautifulSoup(data, 'lxml')

# 获取所有app的li标签
app_list = soup.find_all(name='li', attrs={"class": "card"})
for app in app_list:
# print(app)
# print('tank' * 1000)
# print('tank *' * 1000)
# print(app)
# 图标地址
# 获取第一个img标签中的data-original属性
img = app.find(name='img').attrs['data-original']
# print(img)

# 下载次数
# 获取class为install-count的span标签中的文本
down_num = app.find(name='span', attrs={"class": "install-count"}).text
# print(down_num)

import re
# 大小
# 根据文本正则获取到文本中包含 数字 + MB(\d+代表数字)的span标签中的文本
size = soup.find(name='span', text=re.compile("\d+MB")).text
# print(size)

# 详情页地址
# 获取class为detail-check-btn的a标签中的href属性
# detail_url = soup.find(name='a', attrs={"class": "name"}).attrs['href']
# print(detail_url)

# 详情页地址
detail_url = app.find(name='a').attrs['href']
# print(detail_url)

# 拼接数据
index_data = {
'img': img,
'down_num': down_num,
'size': size,
'detail_url': detail_url
}

# 插入数据
index_col.insert(index_data)
print('主页数据插入成功!')

# 3、往app详情页发送请求
response = get_page(detail_url)

# 4、解析app详情页
parse_detail(response.text)


def main():
for line in range(1, 33):
url = f"https://www.wandoujia.com/wdjweb/api/category/more?catId=6001&subCatId=0&page={line}&ctoken=FRsWKgWBqMBZLdxLaK4iem9B"

# 1、往app接口发送请求
response = get_page(url)
# print(response.text)
print('*' * 1000)
# 反序列化为字典
data = response.json()

# 获取接口中app标签数据
app_li = data['data']['content']
# print(app_li)

# 2、解析app标签数据
parse_index(app_li)

# 执行完所有函数关闭mongoDB客户端
client.close()

if __name__ == '__main__':
main()
posted @ 2019-06-21 08:04  呼啦啦的呼  阅读(160)  评论(0编辑  收藏  举报