一键下载新华社图片新闻

新华社图片新闻的地址正确

http://sn.news.cn/2022-02/26/c_1128419282.htm

import requests
import bs4

headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.102 Safari/537.36 Edg/98.0.1108.62'}

def downimg(name,img):
    h = requests.get(img,headers=headers)
    with open(f'./{name}.jpg','wb') as f:
        f.write(h.content)

def main(url):
    h = requests.get(url,headers=headers)
    print(h.status_code,h.url)
    if h.status_code != 200:return h.status_code
    h = bs4.BeautifulSoup(h.content.decode())
    标题 = h.find(id='title').text
    图片地址 = url[:29] + h.find(id="content").find('img').attrs['src']
    downimg('1',图片地址)
    txt = []
    for i in h.find(id="content").findAll('p'):
        if i.text == '':continue
        txt.append(''.join(i.text.split()))
    for u in range(2,10):
        ur = url[:-4] + f'_{u}.htm'
        h = requests.get(ur,headers=headers)
        print(h.status_code,h.url)
        if h.status_code != 200:
            print('错误代码:',h.status_code)
            return '错误代码'        
        h = bs4.BeautifulSoup(h.content.decode())
        图片地址 = url[:29] + h.find(id="content").find('img').attrs['src']
        downimg(f'{u}',图片地址)
        for i in  h.find(id="content").findAll('p'):
            if i.text == '':continue
            txt.append(''.join(i.text.split()))
    #写入文件
    with open('./新闻稿.txt','w') as f:
        f.write(f'{list(set(txt))}')


if __name__=='__main__':
    # url = input('新华社图片频道第一张地址:')
    url = 'http://sn.news.cn/2022-02/26/c_1128419282.htm'
    main(url)


posted @ 2022-02-27 16:48  幽见〆南山  阅读(168)  评论(0编辑  收藏  举报