寒假生活指导10

复制代码
# (1) 请求对象的定制
# (2)获取网页的源码
# (3)下载


# 需求 下载的前十页的图片
# https://sc.chinaz.com/tupian/qinglvtupian.html   1
# https://sc.chinaz.com/tupian/qinglvtupian_page.html

import urllib.request
from lxml import etree

def create_request(page):
    if(page == 1):
        url = 'https://sc.chinaz.com/tupian/xingganmeinvtupian.html'
    else:
        url = 'https://sc.chinaz.com/tupian/xingganmeinvtupian_' + str(page) + '.html'

    headers = {
        'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/92.0.4515.159 Safari/537.36',
    }

    request = urllib.request.Request(url = url, headers = headers)
    return request

def get_content(request):
    response = urllib.request.urlopen(request)
    content = response.read().decode('utf-8')
    return content


def down_load(content):
#     下载图片
    # urllib.request.urlretrieve('图片地址','文件的名字')
    tree = etree.HTML(content)

    name_list = tree.xpath('//div[@class="item"]//img/@alt')

    # 一般设计图片的网站都会进行懒加载
    src_list = tree.xpath('//div[@class="item"]//img/@data-original')

    print(len(name_list))
    for i in range(len(name_list)):
        name = name_list[i]
        src = src_list[i]
        url = 'https:' + src
        print(url)
        urllib.request.urlretrieve(url=url,filename='./ll/' + name + '.jpg')




if __name__ == '__main__':
    start_page = int(input('请输入起始页码'))
    end_page = int(input('请输入结束页码'))

    for page in range(start_page,end_page+1):
        # (1) 请求对象的定制
        request = create_request(page)
        # (2)获取网页的源码
        content = get_content(request)
        # print(content)
        # (3)下载
        down_load(content)
复制代码

今天学习了网页爬虫下载图片。

复制代码
import requests
import re
import xlwt
import linecache
url = 'https://baike.baidu.com/'
headers = {
    "user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.90 Safari/537.36"
}
def get_page(url):
    try:
        response = requests.get(url, headers=headers)
        response.encoding = 'utf-8'
        if response.status_code == 200:
            print('获取网页成功')
            #print(response.encoding)
            return response.text
        else:
            print('获取网页失败')
    except Exception as e:
        print(e)
f = xlwt.Workbook(encoding='utf-8')
sheet01 = f.add_sheet(u'sheet1', cell_overwrite_ok=True)
sheet01.write(0, 0, '热词')  # 第一行第一列
sheet01.write(0, 1, '热词解释')  # 第一行第二列
sheet01.write(0, 2, '网址')  # 第一行第三列
fopen = open('res1.txt', 'r',encoding='utf-8')
lines = fopen.readlines()
urls = ['https://baike.baidu.com/item/{}'.format(line) for line in lines]
i=0
for url in urls:
     print(url.replace("\n", ""))
     page = get_page(url.replace("\n", ""))
     items = re.findall('<meta name="description" content="(.*?)">',page,re.S)
     print(items)
     if len(items)>0:
            sheet01.write(i + 1, 0,linecache.getline("res1.txt", i+1).strip())
            sheet01.write(i + 1, 1,items[0])
            sheet01.write(i + 1, 2,url.replace("\n", ""))
            i+= 1
     print("总爬取完毕数量:" + str(i))
print("打印完!!!")
f.save('hotword_explain.xls')
复制代码

进行名词解释的爬虫代码。

 

posted @   一个小虎牙  阅读(3)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 无需6万激活码!GitHub神秘组织3小时极速复刻Manus,手把手教你使用OpenManus搭建本
· Manus爆火,是硬核还是营销?
· 终于写完轮子一部分:tcp代理 了,记录一下
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
点击右上角即可分享
微信分享提示