使用OpenCV实现视频去抖
使用OpenCV实现视频去抖
整体步骤:
- 设置输入输出视频
- 寻找帧之间的移动:使用opencv的特征检测器,检测前一帧的特征,并使用Lucas-Kanade光流算法在下一帧跟踪这些特征,根据两组点,将前一个坐标系映射到当前坐标系完成刚性(欧几里得)变换,最后使用数组纪录帧之间的运动。
- 计算帧之间的平滑运动:根据第二步的帧运动,计算轨迹,并使用移动平均滤波器对轨迹进行平滑处理,同时记录平滑轨迹与原始轨迹的差异。
- 将平滑运动应用到帧中,修复边界伪影:将第三步得到的差异加到原始轨迹,并将视频中心缩放4%缓解黑色的边界伪影。
1. 设置视频的输入输出
import numpy as np
import cv2
import os
def set_int_and_out_video(input_path: str, output_path: str, fps: int = None):
# 读取视频
cap = cv2.VideoCapture(input_path)
# 获取视频宽高
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 定义视频编码方式
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
# MJPG是一种视频输出格式,MJPG格式输出的是将视频图像采用JPEG格式压缩后得到的视频帧,优点是帧率高(视频开启快,曝光快),缺点是影像有马赛克,并且需要解码器,会占用PC系统资源。MJPG视频帧直接保存成jpg文件即可用常见的图片查看工具打开。
# 获取视频帧率
if fps is None:
fps = int(cap.get(cv2.CAP_PROP_FPS))
# 设置输出视频
basename = os.path.basename(input_path).split('.')[0]
video_compete_name = basename + '_compete.mp4'
video_stabilized_name = basename + '_stabilized.mp4'
output_path1 = os.path.join(output_path, video_compete_name)
output_path2 = os.path.join(output_path, video_stabilized_name)
out = cv2.VideoWriter(output_path1, fourcc, fps, (2*w, h)) # 作为原视频的对比视频
out2 = cv2.VideoWriter(output_path2, fourcc, fps, (w, h)) # 输出结果
return cap, out, out2
2. 找出帧之间的运动
def find_motion(cap) -> np.ndarray:
# 设置视频指针到第一帧
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# 读取第一帧,并转为灰度图类型为np.ndarray
_, prev = cap.read()
prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
# 获取视频帧数
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# 定义保存动作变化的数组
transforms = np.zeros((n_frames - 1, 3), np.float32)
for i in range(n_frames - 2):
# 找到前一帧的特征点
prev_pts = cv2.goodFeaturesToTrack(prev_gray, maxCorners=200, qualityLevel=0.01,
minDistance=30, blockSize=3)
# 读取下一帧
success, curr = cap.read()
if not success:
break
# 转化成灰度图
curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)
# 使用Lucas-Kanade光流算法在下一帧(当前帧)中跟踪前一帧的特征点
curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_pts, None)
# status :输出状态向量(无符号字符); 如果找到相应特征的流,则向量的每个元素设置为1,否则设置为0
# 完整性检查,通常用在对输入数据的检查。匹配特征点大小
assert prev_pts.shape == curr_pts.shape
# 根据calcOpticalFlowPyrLK的状态标志,过滤掉当前帧中,可能被下一帧另一对像遮挡的特征点
idx = np.where(status == 1)[0]
prev_pts = prev_pts[idx]
curr_pts = curr_pts[idx]
# 找出动作变化矩阵:使用这两组点来找到映射前一个坐标系到当前坐标系的刚性(欧几里德)变换
m, inl = cv2.estimateAffinePartial2D(prev_pts, curr_pts)
# 提取动作
dx = m[0, 2]
dy = m[1, 2]
da = np.arctan2(m[1, 0], m[0, 0]) # 提取旋转角度
transforms[i] = [dx, dy, da] # 保存动作变化
prev_gray = curr_gray # Move to next frame
print("Frame: " + str(i) + "/" + str(n_frames) + " - Tracked points : " + str(len(prev_pts)))
return transforms
3. 计算帧之间的平滑动作
# 在上一步中,计算了运动轨迹,共有三条曲线来显示运动(x,y,和角度)如何随时间变化。
# 此处将平滑这三条曲线:定义了一个移动平均滤波器,它接受任何曲线作为输入,并返回曲线的平滑版本。
def movingAverage(curve, radius):
window_size = 2 * radius + 1
# 定义过滤器(卷积核)
f = np.ones(window_size) / window_size
# 在边界增加padding
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
# 进行卷积操作
curve_smoothed = np.convolve(curve_pad, f, mode='same')
# 去掉padding
curve_smoothed = curve_smoothed[radius:-radius]
# 得到平滑曲线
return curve_smoothed
# 对轨迹的三部分即x, y和角度进行平滑处理
def smooth(trajectory, smoothing_radius:int = 50):
smoothed_trajectory = np.copy(trajectory)
for ii in range(3):
smoothed_trajectory[:, ii] = movingAverage(trajectory[:, ii], radius=smoothing_radius)
return smoothed_trajectory
def calculate_smooth_motion(transforms: np.ndarray):
# 通过累加计算轨迹
trajectory = np.cumsum(transforms, axis=0)
# 计算平滑变化
smoothed_trajectory = smooth(trajectory)
# 计算平滑轨迹与轨迹的差
difference = smoothed_trajectory - trajectory
# 计算新的运动数组
transforms_smooth = transforms + difference
return transforms_smooth
4. 将平滑动作应用到帧中
# 修复边界伪影
def fixBorder(frame):
s = frame.shape
# 在视频中心缩放4%
T = cv2.getRotationMatrix2D((s[1] / 2, s[0] / 2), 0, 1.04)
frame = cv2.warpAffine(frame, T, (s[1], s[0]))
return frame
def apply_smooth_motion(cap, transforms_smooth: np.ndarray, out, out2) -> bool:
# 设置视频指针到第一帧
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
# 获取视频帧数
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# 获取视频宽高
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# 处理剩下 n_frames - 2帧
for i in range(n_frames - 2):
# Read next frame
success, frame = cap.read()
if not success:
break
# 从新的动作数组中提取动作
dx = transforms_smooth[i, 0]
dy = transforms_smooth[i, 1]
da = transforms_smooth[i, 2]
# 根据新值,重新构建变换矩阵
m = np.zeros((2, 3), np.float32)
m[0, 0] = np.cos(da)
m[0, 1] = -np.sin(da)
m[1, 0] = np.sin(da)
m[1, 1] = np.cos(da)
m[0, 2] = dx
m[1, 2] = dy
# 对每一帧进行仿射变换
frame_stabilized = cv2.warpAffine(frame, m, (w, h))
# 修复边界瑕疵
frame_stabilized = fixBorder(frame_stabilized)
# frame为变换前,frame_stabilized为变换后,将两者拼接作为对比
frame_out = cv2.hconcat([frame, frame_stabilized]) # 拼接函数
# 视频过大时进行裁剪
if frame_out.shape[1] > 1920:
frame_out = cv2.resize(frame_out, (int(frame_out.shape[1] / 2), int(frame_out.shape[0] / 2)))
# cv2.imshow("Before and After", frame_out)
# cv2.waitKey(10)
out.write(frame_out)
# 把稳定后的视频单独保存一份
out2.write(frame_stabilized)
return True
最后对视频进行集中处理
videos = os.listdir('./Video_Stabilization_data')
output_path = './video_result'
if os.path.exists(output_path) is False:
os.mkdir(output_path)
for video in videos:
input_path = os.path.join('./Video_Stabilization_data', video)
print(f'{input_path} is processing')
cap, out, out2 = set_int_and_out_video(input_path, output_path, fps = 30)
transforms = find_motion(cap)
transforms_smooth = calculate_smooth_motion(transforms)
apply_smooth_motion(cap, transforms_smooth, out, out2)
cap.rele
out.release()
out2.release()
print(f'{input_path} completed')
# cv2.destroyAllWindows()
可以将视频转为GIF插入文档。
from moviepy.editor import VideoFileClip
def mp4_to_gif2(input_path, output_path):
clip = VideoFileClip(input_path)
clip.write_gif(output_path, fps=15)
受限于导出的GIF过大,博客不能上传MP4文件,最后的对比就不放了。
运行环境:
moviepy==1.0.3
numpy==1.26.2
opencv_contrib_python==4.6.0.66
opencv_python==4.6.0.66