![复制代码](https://common.cnblogs.com/images/copycode.gif)
import requests
from bs4 import BeautifulSoup
def get_soup(url):
req = requests.get(url)
req.encoding = 'utf-8'
soup = BeautifulSoup(req.text, 'html.parser')
return soup
soup = get_soup('http://news.gzcc.cn/html/xiaoyuanxinwen/')
li_list = soup.select('li')
title = list()
a = list()
for new in li_list:
if(len(new.select('.news-list-text'))>0):
title.append(new.select('.news-list-text')[0].select('.news-list-title')[0].text)
a.append(new.a.attrs['href'])
info_list = list()
con_list = list()
delw = ['\r','\n','\u3000','\xa0']
for curl in a:
con_soup = get_soup(curl)
con_list.append(con_soup.select('#content')[0].text)
info_list.append(con_soup.select('.show-info')[0].text.split("\xa0\xa0"))
cs = list()
for i in range(len(con_list)):
cs.append(''.join(con_list[0]))
for i in range(len(info_list)):
print(title[i] + " " + a[i])
for j in range(len(info_list[i])):
if(len(info_list[i][j])>0 and info_list[i][j]!=' '):
print(info_list[i][j])
print(cs[i])
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】凌霞软件回馈社区,博客园 & 1Panel & Halo 联合会员上线
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】博客园社区专享云产品让利特惠,阿里云新客6.5折上折
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步