obs lib屏幕捕捉相关流程整理
obs的图形线程是屏幕捕捉的源头,代码如下
//碧麟精简批注版
//obs图形线程外层框架
void *obs_graphics_thread(void *param)
{
//设定线程名
os_set_thread_name("libobs: graphics thread");
//设定图形线程context
struct obs_graphics_context context;
context.interval = interval;
context.frame_time_total_ns = 0;
context.fps_total_ns = 0;
context.fps_total_frames = 0;
context.last_time = 0;
context.video_thread_name = video_thread_name;
//图形线程loop函数
while (obs_graphics_thread_loop(&context))
;
return NULL;
}
我们看一下主循环
//碧麟精简批注版
//图形线程主循环
bool obs_graphics_thread_loop(struct obs_graphics_context *context)
{
uint64_t frame_start = os_gettime_ns();
uint64_t frame_time_ns;
update_active_states();
gs_enter_context(obs->video.graphics);
gs_begin_frame();
gs_leave_context();
context->last_time =
tick_sources(obs->video.video_time, context->last_time);
output_frames();
//渲染到UI
//render_displays();
execute_graphics_tasks();
//计算当前帧所花时间
frame_time_ns = os_gettime_ns() - frame_start;
//sleep
video_sleep(&obs->video, &obs->video.video_time, context->interval);
//context 统计帧数和时间
context->frame_time_total_ns += frame_time_ns;
context->fps_total_ns += (obs->video.video_time - context->last_time);
context->fps_total_frames++;
return !stop_requested();
}
render_displays();这个函数盲猜一下是渲染到UI,注释掉运行,见图
ui消失了,果然如此,盲猜正确。
图形线程内容很多,大概有几十个步骤,继续去叶存枝,下一个关键点是这里
//碧麟精简批注版
//obs渲染屏幕到texture
static inline void render_main_texture(struct obs_core_video_mix *video)
{
//设定宽和高
uint32_t base_width = video->ovi.base_width;
uint32_t base_height = video->ovi.base_height;
struct vec4 clear_color;
vec4_set(&clear_color, 0.0f, 0.0f, 0.0f, 0.0f);
//这里是第一个重点,屏幕渲染到哪里,这里明确给了答案,在video->render_texture
//材质格式是gs_texture_t
gs_set_render_target_with_color_space(video->render_texture, NULL,
video->render_space);
gs_clear(GS_CLEAR_COLOR, &clear_color, 1.0f, 0);
set_render_size(base_width, base_height);
pthread_mutex_lock(&obs->data.draw_callbacks_mutex);
for (size_t i = obs->data.draw_callbacks.num; i > 0; i--) {
struct draw_callback *callback;
callback = obs->data.draw_callbacks.array + (i - 1);
callback->draw(callback->param, base_width, base_height);
}
pthread_mutex_unlock(&obs->data.draw_callbacks_mutex);
obs_view_render(video->view);
video->texture_rendered = true;
GS_DEBUG_MARKER_END();
profile_end(render_main_texture_name);
}
上面set rendertarget之后,会将每一帧屏幕数据输出到rendertarge上。
但屏幕捕捉之后,视频输出到哪里,下面代码给了答案。
在obs studio点击录屏之后,会调用到这里。
//碧麟精简批注版
//从rendertarget copy数据的逻辑在这里
static inline void output_video_data(struct obs_core_video_mix *video,
struct video_data *input_frame, int count)
{
const struct video_output_info *info;
struct video_frame output_frame;
bool locked;
info = video_output_get_info(video->video);
locked = video_output_lock_frame(video->video, &output_frame, count,
input_frame->timestamp);
if (locked) {
if (video->gpu_conversion) {
set_gpu_converted_data(&output_frame, input_frame,
info);
} else {
copy_rgbx_frame(&output_frame, input_frame, info);
}
video_output_unlock_frame(video->video);
}
}
再来分析一下另一个线程,video_thread线程主循环,只要video->stop为false,会循环调用video_output_cur_frame来输出当前屏幕。
//碧麟标注裁剪版本
static void *video_thread(void *param)
{
struct video_output *video = param;
//设置线程名字
os_set_thread_name("video-io: video thread");
//线程主循环
while (os_sem_wait(video->update_semaphore) == 0) {
if (video->stop)
break;
//循环调用video_output_cur_frame获取当前屏幕
while (!video->stop && !video_output_cur_frame(video)) {
os_atomic_inc_long(&video->total_frames);
}
}
return NULL;
}
这里用到video_output核心结构体 ,结构体定义如下
//碧麟标注裁剪版本
struct video_output {
//video_output_info 视频输出设定info
struct video_output_info info;
//视频输出线程地址
pthread_t thread;
//是否停止
bool stop;
os_sem_t *update_semaphore;
uint64_t frame_time;
//视频来源信息
DARRAY(struct video_input) inputs;
struct cached_frame_info cache[MAX_CACHE_SIZE];
volatile bool raw_active;
volatile long gpu_refs;
};
捕捉当前帧核心逻辑
//捕捉当前帧
static inline bool video_output_cur_frame(struct video_output *video)
{
//帧信息
struct cached_frame_info *frame_info;
bool complete;
bool skipped;
/* -------------------------------- */
pthread_mutex_lock(&video->data_mutex);
frame_info = &video->cache[video->first_added];
pthread_mutex_unlock(&video->data_mutex);
/* -------------------------------- */
pthread_mutex_lock(&video->input_mutex);
for (size_t i = 0; i < video->inputs.num; i++) {
struct video_input *input = video->inputs.array + i;
struct video_data frame = frame_info->frame;
//这里是关键
//input->callback(input->param, &frame);会调用receive_video回调函数做进一步处理
//static void receive_video(void *param, struct video_data *frame)
if (scale_video_output(input, &frame))
input->callback(input->param, &frame);
}
pthread_mutex_unlock(&video->input_mutex);
/* -------------------------------- */
pthread_mutex_lock(&video->data_mutex);
frame_info->frame.timestamp += video->frame_time;
complete = --frame_info->count == 0;
skipped = frame_info->skipped > 0;
if (complete) {
if (++video->first_added == video->info.cache_size)
video->first_added = 0;
if (++video->available_frames == video->info.cache_size)
video->last_added = video->first_added;
} else if (skipped) {
--frame_info->skipped;
os_atomic_inc_long(&video->skipped_frames);
}
pthread_mutex_unlock(&video->data_mutex);
/* -------------------------------- */
return complete;
}
认真写好每一行代码