requests高级用法
解析json
import requests
data = {
'cname':'',
'pid':'',
'keyword':'500',
'pageIndex':1,
'pageSize':10,
}
res = requests.post('http://www.kfc.com.cn/kfccda/ashx/GetStoreList.ashx?op=keyword',data=data)
print(type(res.json()))
ssl认证(了解)
http协议:明文传输
https协议:http+ssl/tls
HTTP+SSL/TLS,也就是在http上又加了一层处理加密信息的模块,比http安全,可防止数据在传输过程中被窃取、改变,确保数据的完整性
https://zhuanlan.zhihu.com/p/561907474
遇到证书提升错误问题 ssl xxx
1 不验证证书
import requests
respone=requests.get('https://www.12306.cn',verify=False)
print(respone.status_code)
2 关闭警告
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
respone=requests.get('https://www.12306.cn',verify=False)
print(respone.status_code)
3 手动携带证书(了解)
import requests
respone=requests.get('https://www.12306.cn',
cert=('/path/server.crt',
'/path/key'))
print(respone.status_code)
使用代理(重要)
如果爬虫使用自身ip地址访问,很有可能被封ip地址,以后就访问不了了
我们可以使用代理ip
代理:收费和免费(不稳定)
res = requests.post('https://www.cnblogs.com',proxies={'http':'地址+端口'})
res = requests.post('https://www.cnblogs.com',proxies={'http':'60.167.91.34:33080'})
print(res.status_code)
高匿代理和透明代理
高匿:服务端拿不到真实客户端的ip地址
透明:服务端能拿到真实客户端的ip地址
后端如何拿到真实客户端ip地址
http请求头中有个:X-Forwarded-For: client1, proxy1, proxy2, proxy3
x-forword-for
获得HTTP请求端真实的IP
超时设置
import requests
respone=requests.get('https://www.baidu.com',timeout=0.0001)
异常处理
import requests
from requests.exceptions import *
try:
r=requests.get('http://www.baidu.com',timeout=0.00001)
except ReadTimeout:
print('===:')
except RequestException:
print('Error')
上传文件
import requests
files = {'file': open('美女.png', 'rb')}
respone = requests.post('http://httpbin.org/post', files=files)
print(respone.status_code)
代理池搭建
requests 发送请求使用代理
代理从哪来
公司花钱买
搭建免费的代理池:https://github.com/jhao104/proxy_pool
python:爬虫+flask写的
架构:
搭建步骤:
1 git clone https://github.com/jhao104/proxy_pool.git
2 使用pycharm打开
3 安装依赖:pip install -r requirements.txt
4 修改配置文件(redis地址即可 注意)
HOST = '0.0.0.0'
PORT = 5010
DB_CONN = 'redis://127.0.0.1:6379/0'
PROXY_FETCHER
5 启动爬虫程序
python proxyPool.py schedule
6 启动服务端
python proxyPool.py server
7 使用随机一个免费代理 地址是服务启动下的地址
地址栏中输入:http://192.168.1.117:5010/get/
使用随机代理发送请求
import requests
from requests.packages import urllib3
urllib3.disable_warnings()
res = requests.get('http://192.168.1.117:5010/get/').json()
proxies = {}
if res['https']:
proxies['https'] = res['proxy']
else:
proxies['http'] = res['proxy']
print(proxies)
res = requests.post('https://www.cnblogs.com', proxies=proxies,verify=False)
print(res)
django后端获取客户端的ip
建立django后端---》index地址 ---》访问就返回访问者的ip
django代码 ---》 不要忘记改配置文件
路由
path('',index),
视图函数
def index(request):
ip = request.META.get('REMOTE_ADDR')
print('ip地址是', ip)
return HttpResponse(ip)
测试端
from threading import Thread
import requests
def task():
res = requests.get('http://101.43.19.239/')
print(res.text)
for i in range(10000000):
t = Thread(target=task)
t.start()
爬取某视频网站
import requests
import re
res = requests.get('https://www.pearvideo.com/category_loading.jsp?reqType=5&categoryId=1&start=0')
video_list = re.findall('<a href="(.*?)" class="vervideo-lilink actplay">', res.text)
for i in video_list:
video_id = i.split('_')[-1]
real_url = 'https://www.pearvideo.com/' + i
headers = {
'Referer': 'https://www.pearvideo.com/video_%s' % video_id
}
res1 = requests.get('https://www.pearvideo.com/videoStatus.jsp?contId=%s&mrd=0.29636538326105044' % video_id,
headers=headers).json()
mp4_url = res1["videoInfo"]['videos']['srcUrl']
mp4_url = mp4_url.replace(mp4_url.split('/')[-1].split('-')[0], 'cont-%s' % video_id)
print(mp4_url)
res2 = requests.get(mp4_url)
with open('./video/%s.mp4' % video_id, 'wb') as f:
for line in res2.iter_content():
f.write(line)
mp4_url = 'https://video.pearvideo.com/mp4/short/20171204/ 1678938313577-11212458-hd.mp4'
爬取新闻
import requests
from bs4 import BeautifulSoup
res = requests.get('https://www.autohome.com.cn/all/1/#liststart')
soup = BeautifulSoup(res.text, 'html.parser')
ul_list = soup.find_all(name='ul', class_='article')
for ul in ul_list:
li_list = ul.find_all(name='li')
for li in li_list:
h3 = li.find(name='h3')
if h3:
title = h3.text
url = 'https:' + li.find('a').attrs['href']
desc = li.find('p').text
img = li.find(name='img').attrs['src']
print('''
新闻标题:%s
新闻连接:%s
新闻摘要:%s
新闻图片:%s
''' % (title, url, desc, img))
小练习 多线程+代理池 爬取新闻关键字
建立代理池 根据上面的建立代理池步骤建立池子
git clone https://github.com/jhao104/proxy_pool.git
代码
"""
长风破浪会有时 直挂云帆济沧海
@time: 2023/3/17 15:09
"""
import requests
from threading import Thread
from requests.packages import urllib3
from bs4 import BeautifulSoup
urllib3.disable_warnings()
res = requests.get('http://127.0.0.1:5010/get/').json()
proxies = {}
if res['https']:
proxies['https'] = res['proxy']
else:
proxies['http'] = res['proxy']
print(proxies)
def task():
for i in range(2):
res = requests.get('https://www.autohome.com.cn/all/%s/#liststart' % i, proxies=proxies, verify=False)
soup = BeautifulSoup(res.text, 'html.parser')
ul_list = soup.find_all(name='ul', class_='article')
for ul in ul_list:
li_list = ul.find_all(name='li')
for li in li_list:
h3 = li.find(name='h3')
if h3:
title = h3.text
url = 'https:' + li.find('a').attrs['href']
desc = li.find('p').text
img = li.find(name='img').attrs['src']
print('''
新闻标题:%s
新闻连接:%s
新闻摘要:%s
新闻图片:%s
''' % (title, url, desc, img))
for i in range(10):
t = Thread(target=task)
t.start()
正、反向代理
https://www.cnblogs.com/liuqingzheng/p/10521675.html
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 震惊!C++程序真的从main开始吗?99%的程序员都答错了
· 别再用vector<bool>了!Google高级工程师:这可能是STL最大的设计失误
· 单元测试从入门到精通
· 【硬核科普】Trae如何「偷看」你的代码?零基础破解AI编程运行原理
· 上周热点回顾(3.3-3.9)