爬取北京小猪短租网前2页信息的内容
参考网址:http://bj.xiaozhu.com/
需要爬取的信息包括:标题、地址、价格、房东名称、房东性别和房东头像的链接,将数据分别使用TXT、JSON、CSV存储。
import csv import time import json import requests from bs4 import BeautifulSoup from requests import RequestException def get_one_page(url): try: headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/73.0.3683.103 Safari/537.36'} response = requests.get(url, headers=headers) #response.encoding = response.apparent_encoding if response.status_code == 200: return response.text return None except RequestException: return None def get_detailurl(text): detailurl = [] soup = BeautifulSoup(text, 'lxml') result_btm_con = soup.find_all(name='div', class_='result_btm_con lodgeunitname') for i in range(len(result_btm_con)): detailurl.append(result_btm_con[i]['detailurl']) return detailurl def parse_one_page(text): soup = BeautifulSoup(text, 'lxml') #使用lxml XML 解析库 title = soup.select('.pho_info > h4 > em') addresse = soup.select('.pho_info > p') price = soup.find_all(name='span', class_='detail_avgprice') name = soup.find_all(name='a', class_='lorder_name') sex = soup.find_all(name='div', class_='member_ico') sex1 = soup.find_all(name='div', class_='member_ico1') ssex = '男' if len(sex) else '女' img = soup.select('.member_pic img') yield{ 'title': title[0].string, 'address': addresse[0]['title'], 'price': price[0].string, 'name': name[0]['title'], 'sex': ssex, 'img': img[0]['src'] } def write_to_file(content): with open('xiaozhu.txt', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False)+'\n') #dumps将json对象转化为字符串 def write_to_json(content): with open('xiaozhu.json', 'a', encoding='utf-8') as f: f.write(json.dumps(content, ensure_ascii=False)+'\n') def write_to_csv(content): with open('xiaozhu.csv', 'a', encoding='utf-8') as f: fieldnames = ['title','address','price','name','sex','img'] writer = csv.DictWriter(f, fieldnames=fieldnames) writer.writeheader() writer.writerows(content) if __name__ == '__main__': url = 'http://bj.xiaozhu.com/search-duanzufang-p{}-0/' urls = [url.format(page) for page in range(1,3)] content = [] for url in urls: text1 = get_one_page(url) detailurl = get_detailurl(text1) for i in detailurl: text2 = get_one_page(i) for item in parse_one_page(text2): print(item) write_to_file(item) content.append(item) write_to_csv(content) write_to_json(content)
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 【自荐】一款简洁、开源的在线白板工具 Drawnix
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
· Docker 太简单,K8s 太复杂?w7panel 让容器管理更轻松!