Ajax 数据爬取实例
大概流程
- 在网页找目标Ajax
- 破解Ajax规律
- 定义Ajax的URL,爬取Ajax的数据
其实跟普通爬虫差别不大,只不过爬的对象从看到的网页变成了Ajax链接而已。
只要找到Ajax链接,就可以很轻易的爬取下来了。
全部代码
import requests
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s - %(levelname)s: %(message)s')
# 找到含有电影简介页面数据的 Ajax 链接,一般是 XHR 类型的
INDEX_URL = 'https://spa1.scrape.center/api/movie/?limit={limit}&offset={offset}'
# 如果顺利的话,返回电影简介 Ajax 的json格式数据,否则打印错误信息
def scrape_api(url):
logging.info('scraping %s...', url)
try:
response = requests.get(url)
if response.status_code == 200:
return response.json()
logging.error('get invalid status code %s while scraping %s', response.status_code, url)
except requests.RequestException:
logging.error('error occurred while scraping %s', url, exc_info=True)
LIMIT = 10
# 返回每一页的源代码
def scrape_index(page):
url = INDEX_URL.format(limit=LIMIT, offset=LIMIT * (page - 1))
return scrape_api(url)
# 找到含有电影页面数据的 Ajax 链接,一般是 XHR 类型的
DETAIL_URL = 'https://spa1.scrape.center/api/movie/{id}'
# 返回指定的电影数据
def scrape_detail(id):
url = DETAIL_URL.format(id=id)
return scrape_api(url)
TOTAL_PAGE = 10
MONGO_CONNECTION_STRING = 'mongodb://localhost:27017'
MONGO_GO_NAME = 'movies'
MONGO_COLLECTION_NAME = 'movies'
# 数据库相关操作,不重点说明
import pymongo
client = pymongo.MongoClient(MONGO_CONNECTION_STRING)
db = client.movies
collection = db.movies
def save_data(data):
collection.update_one({
'name': data.get('name'),
}, {
'$set': data
}, upsert=True
)
def main():
# 遍历每一页的电影简介页面
for page in range(1, TOTAL_PAGE+1):
# 返回电影简介页面的Ajax数据
index_data = scrape_index(page)
for item in index_data.get('results'):
id = item.get('id')
detail_data = scrape_detail(id)
save_data(detail_data)
logging.info('data saved successfully')
if __name__ == '__main__':
main()