python 图片爬取

百度:

import requests
from lxml import etree

Search_term = input('请输入需要搜索的关键词')
page = input('请输入要爬取多少页(一页30章图片):')
page = int(page) + 1
header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
n = 0
pn = 1
# pn是从第几张图片获取 百度图片下滑时默认一次性显示30张
for m in range(1, page):
url = 'https://image.baidu.com/search/acjson?'

param = {
'tn': 'resultjson_com',
'logid': '8846269338939606587',
'ipn': 'rj',
'ct': '201326592',
'is': '',
'fp': 'result',
'queryWord': Search_term,
'cl': '2',
'lm': '-1',
'ie': 'utf-8',
'oe': 'utf-8',
'adpicid': '',
'st': '-1',
'z': '',
'ic': '',
'hd': '',
'latest': '',
'copyright': '',
'word': Search_term,
's': '',
'se': '',
'tab': '',
'width': '',
'height': '',
'face': '0',
'istype': '2',
'qc': '',
'nc': '1',
'fr': '',
'expermode': '',
'force': '',
'cg': 'girl',
'pn': pn, # 从第几张图片开始
'rn': '30',
'gsm': '1e',
}
page_text = requests.get(url=url, headers=header, params=param)
page_text.encoding = 'utf-8'
page_text = page_text.json()
info_list = page_text['data']
del info_list[-1]
img_path_list = []
for i in info_list:
img_path_list.append(i['thumbURL'])

for img_path in img_path_list:
img_data = requests.get(url=img_path, headers=header).content
img_path = './' + str(n) + '.jpg'
with open(img_path, 'wb') as fp:
fp.write(img_data)
n = n + 1

pn += 29


必应:
(2条消息) 基于Python爬取Bing图片_钱彬 (Qian Bin)的博客-CSDN博客_爬取bing图片
import requests
from lxml import etree

header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
word = '狗'
param = {
'q': word,
'first': '', #开始页面
'count': '35', #显示数量
'cw': '1177',
'ch': '500',
'relp': '35',
'tsc': 'ImageHoverTitle',
'datsrc': 'I',
'layout': 'RowBased_Landscape',
'mmasync': '1',
'dgState': 'x*292_y*1367_h*189_c*1_i*36_r*8',
'IG': 'AE35C967F6624BE1845980CFA2CA81CD',
'SFX': '2',
'iid': 'images.5559',
}



url = 'https://cn.bing.com/images/async?'
html = requests.get(url=url,headers=header,params=param)
html.encoding = 'utf-8'
print(html.text)
data = etree.HTML(html.text)
contents = data.xpath('//@src')
print(contents)
print(len(contents))


360:
import requests

header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
word = '猫'
param = {
'callback': '',
'q': word, #搜索词
'qtag': '',
'pd': '1',
'pn': '60', #采集图片数量
'correct': word, #搜索词
'adstar': '0',
'tab': 'all',
'sid': 'cb203d75cf822734afc85fb1ee85e2a6',
'ras': '0',
'cn': '0',
'gn': '0',
'kn': '0',
'crn': '0',
'bxn': '0',
'cuben': '0',
'pornn': '0',
'manun': '3',
'ie':'utf-8',
'src': 'hao_360so',
'sn': '60',
'ps': '63',
'pc': '63',
'_': '1658834743371'
}



url = 'https://image.so.com/j?'
html = requests.get(url=url,headers=header,params=param)
html.encoding = 'utf-8'
page_text = html.json()
print(page_text)

pages = page_text['list']
print(len(pages))
for i in pages:
print(i['img'])
print()

搜狗:
import requests

header = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'
}
word = '狗'
param = {
'mode': '1',
'start': '0', #开始页数
'xml_len': '480', #一页显示数量,最多100章
'query': word,
}



url = 'http://pic.sogou.com/napi/pc/searchList?'
html = requests.get(url=url,headers=header,params=param)
html.encoding = 'utf-8'
# print(html.text)
page_text = html.json()
print(page_text)
#
pages = page_text['data']
pages = pages['items']
for i in pages:
print(i['picUrl'])
print()
print(len(pages))


posted @ 2022-07-26 21:54  记录——去繁就简  阅读(84)  评论(0编辑  收藏  举报