OpenGL全景视频

    全景视频其实在实现上和一般的视频播放基本差不多,解码可以用ffmpeg,只是对解码后的图片在绘制的时候要绘制在一个球上(我这里是球,好像有说有的格式要绘制在四面体上的,美做深入研究),而不是画在一个表面上。所以这里应该要用纹理。

1.计算球的顶点坐标和纹理坐标

    球的顶点坐标和纹理坐标的计算可以说是全景的关键。这里参考android opengl播放全景视频 

int cap_H = 1;//必须大于0,且cap_H应等于cap_W
int cap_W = 1;//绘制球体时,每次增加的角度

float* verticals;
float* UV_TEX_VERTEX;

........................

void getPointMatrix(GLfloat radius)
{
    verticals = new float[(180 / cap_H) * (360 / cap_W) * 6 * 3];
    UV_TEX_VERTEX = new float[(180 / cap_H) * (360 / cap_W) * 6 * 2];

    float x = 0;
    float y = 0;
    float z = 0;

    int index = 0;
    int index1 = 0;
    float r = radius;//球体半径
    double d = cap_H * PI / 180;//每次递增的弧度
    for (int i = 0; i < 180; i += cap_H) {
        double d1 = i * PI / 180;
        for (int j = 0; j < 360; j += cap_W) {
            //获得球体上切分的超小片矩形的顶点坐标(两个三角形组成,所以有六点顶点)
            double d2 = j * PI / 180;
            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2 + d));
            //获得球体上切分的超小片三角形的纹理坐标
            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2 + d));

            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2 + d));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2 + d));

            UV_TEX_VERTEX[index1++] = (j + cap_W) * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1 + d) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1 + d));
            verticals[index++] = (float)(z + r * sin(d1 + d) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = (i + cap_H) * 1.0f / 180;

            verticals[index++] = (float)(x + r * sin(d1) * cos(d2));
            verticals[index++] = (float)(y + r * cos(d1));
            verticals[index++] = (float)(z + r * sin(d1) * sin(d2));

            UV_TEX_VERTEX[index1++] = j * 1.0f / 360;
            UV_TEX_VERTEX[index1++] = i * 1.0f / 180;
        }
    }
}

2.文件解码

    我这里用ffmpeg来做文件的解码,用了一个最简单的单线程循环来做,没有做过多复杂的考虑。解出来的图像数据放到一个循环队列中。

DWORD WINAPI ThreadFunc(LPVOID n)
{
    AVFormatContext    *pFormatCtx;
    int                i, videoindex;
    AVCodec            *pCodec;
    AVCodecContext    *pCodecCtx = NULL;

    char filepath[] = "H:\\F-5飞行.mp4";

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
        printf("Couldn't open input stream.(无法打开输入流)\n");
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
    {
        printf("Couldn't find stream information.(无法获取流信息)\n");
        return -1;
    }

    videoindex = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
    if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Didn't find a video stream.(没有找到视频流)\n");
        return -1;
    }
    pCodecCtx = pFormatCtx->streams[videoindex]->codec;
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL)
    {
        printf("Codec not found.(没有找到解码器)\n");
        return -1;
    }
    if (avcodec_open2(pCodecCtx, pCodec, NULL)<0)
    {
        printf("Could not open codec.(无法打开解码器)\n");
        return -1;
    }

    AVFrame    *pFrame;
    pFrame = av_frame_alloc();
    int ret, got_picture;
    AVPacket *packet = (AVPacket *)av_malloc(sizeof(AVPacket));

    AVFrame *pFrameBGR = NULL;
    pFrameBGR = av_frame_alloc();

    struct SwsContext *img_convert_ctx;

    int index = 0;
    while (av_read_frame(pFormatCtx, packet) >= 0)
    {
        if (packet->stream_index == videoindex)
        {
            ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
            if (ret < 0)
            {
                printf("Decode Error.(解码错误)\n");
                continue;
            }
            if (got_picture)
            {
                index++;

flag_wait:
                if (frame_queue.size >= MAXSIZE)
                {
                    printf("size = %d   I'm WAITING ... \n", frame_queue.size);
                    Sleep(10);
                    goto flag_wait;
                }

                EnterCriticalSection(&frame_queue.cs);

                Vid_Frame *vp;
                vp = &frame_queue.queue[frame_queue.rear];

                vp->frame->pts = pFrame->pts;

                /* alloc or resize hardware picture buffer */
                if (vp->buffer == NULL || vp->width != pFrame->width || vp->height != pFrame->height)
                {
                    if (vp->buffer != NULL)
                    {
                        av_free(vp->buffer);
                        vp->buffer = NULL;
                    }

                    int iSize = avpicture_get_size(AV_PIX_FMT_BGR24, pFrame->width, pFrame->height);
                    av_free(vp->buffer);
                    vp->buffer = (uint8_t *)av_mallocz(iSize);


                    vp->width = pFrame->width;
                    vp->height = pFrame->height;

                }

                avpicture_fill((AVPicture *)vp->frame, vp->buffer, AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);

                if (vp->buffer)
                {

                    img_convert_ctx = sws_getContext(vp->width, vp->height, (AVPixelFormat)pFrame->format, vp->width, vp->height,
                        AV_PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);
                    sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0, vp->height, vp->frame->data, vp->frame->linesize);
                    sws_freeContext(img_convert_ctx);

                    vp->pts = pFrame->pts;
                }
                    
                frame_queue.size++;
                frame_queue.rear = (frame_queue.rear + 1) % MAXSIZE;

                LeaveCriticalSection(&frame_queue.cs);

                //MySaveBmp("f5.bmp", vp->buffer, vp->width, vp->height);

                //int nHeight = vp->height;
                //int nWidth = vp->width;

                //Mat tmp_mat = Mat::zeros(nHeight, nWidth, CV_32FC3);

                //int k = 0;
                //for (int i = 0; i < nHeight; i++)
                //{
                //    for (int j = 0; j < nWidth; j++)
                //    {
                //        tmp_mat.at<Vec3f>(i, j)[0] = vp->buffer[k++] / 255.0f;
                //        tmp_mat.at<Vec3f>(i, j)[1] = vp->buffer[k++] / 255.0f;
                //        tmp_mat.at<Vec3f>(i, j)[2] = vp->buffer[k++] / 255.0f;
                //    }
                //}

                //imwrite("mat_Image.jpg", tmp_mat);

                //namedWindow("Marc_Antony");
                //imshow("Marc_Antony", tmp_mat);

                //waitKey(0);
                
            }
        }
        av_free_packet(packet);
    }

    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}

其中frame_queue是一个循环队列,解码的时候入队,渲染的时候出队。虽然没有实际测,但我试用的几个视频文件都是4K的,所以解码时间估计有些长,解码这里如果能用硬解应该效果会更好。然后我这里没有考虑音频。

3.渲染

(1)初始化

void init(void)
{
    initQueue(&frame_queue);

    glGenTextures(1, &texturesArr);    //创建纹理

    glBindTexture(GL_TEXTURE_2D, texturesArr);

    //Mat image = imread("1.jpg");
    //glTexImage2D(GL_TEXTURE_2D, 0, 3, image.cols, image.rows, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image.data);

    //IplImage *image = cvLoadImage("4.png", 1);
    //IplImage *image = cvLoadImage("5.png", 1);
    //glTexImage2D(GL_TEXTURE_2D, 0, 3, image->width, image->height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, image->imageData);
    //printf("nChannels is %d \n", image->nChannels);
    //cvNamedWindow("1");
    //cvShowImage("1", image);
    //cvWaitKey(0);

    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);    //线形滤波
    glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);    //线形滤波


    glClearColor(0.0, 0.0, 0.0, 0.0);
    glClearDepth(1);
    glShadeModel(GL_SMOOTH);
    //GLfloat _ambient[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _diffuse[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _specular[] = { 1.0, 1.0, 1.0, 1.0 };
    //GLfloat _position[] = { 255, 255, 255, 0 };
    //glLightfv(GL_LIGHT0, GL_AMBIENT, _ambient);
    //glLightfv(GL_LIGHT0, GL_DIFFUSE, _diffuse);
    //glLightfv(GL_LIGHT0, GL_SPECULAR, _specular);
    //glLightfv(GL_LIGHT0, GL_POSITION, _position);
    //glEnable(GL_LIGHTING);
    //glEnable(GL_LIGHT0);

    glEnable(GL_TEXTURE_2D);
    glEnable(GL_DEPTH_TEST);
    glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST);

    glDisable(GL_CULL_FACE);    //禁用裁剪

    getPointMatrix(500);
}

初始化中包含队列的初始化,创建纹理,计算球的顶点坐标和纹理坐标,各种参数设置。其中注意急用光源,否则图片各处的明暗会依据光源位置的设置而有不同;其次是禁用剪裁,否则无法进入到球体内部,因为全景视频是在球体内部看的。

(2)设置投影矩阵

void reshape(int w, int h)
{
    glViewport(0, 0, (GLsizei)w, (GLsizei)h);
    glMatrixMode(GL_PROJECTION);
    glLoadIdentity();
    //glOrtho(-250.0, 250, -250.0, 250, -500, 500);
    //glFrustum(-250.0, 250, -250.0, 250, -5, -500);
    gluPerspective(45, (GLfloat)w / h, 1.0f, 1000.0f);    //设置投影矩阵
    glMatrixMode(GL_MODELVIEW);
    glLoadIdentity();
}

投影采用透视投影,这样可以进入球体内部。这里角度设置成45,可以自行设置,但不宜过大,过大效果不是很好。

(3)渲染

void display(void)
{
    glLoadIdentity();
    glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

    gluLookAt(0, 0, distance, 0, 0, 500.0, 0, 1, 0);
    printf("distance: %f \n", distance);
    glRotatef(xangle, 1.0f, 0.0f, 0.0f);    //绕X轴旋转
    glRotatef(yangle, 0.0f, 1.0f, 0.0f);    //绕Y轴旋转
    glRotatef(zangle, 0.0f, 0.0f, 1.0f);    //绕Z轴旋转

    EnterCriticalSection(&frame_queue.cs);

    printf("display size = %d \n", frame_queue.size);
    if (frame_queue.size > 0)
    {
        Vid_Frame *vp = &frame_queue.queue[frame_queue.front];

        glBindTexture(GL_TEXTURE_2D, texturesArr);
        glTexImage2D(GL_TEXTURE_2D, 0, 3, vp->width, vp->height, 0, GL_BGR_EXT, GL_UNSIGNED_BYTE, vp->buffer);

        frame_queue.size--;
        frame_queue.front = (frame_queue.front + 1) % MAXSIZE;
    }

    LeaveCriticalSection(&frame_queue.cs);

    //glColor3f(1.0, 0.0, 0.0);
    glEnableClientState(GL_VERTEX_ARRAY);
    glEnableClientState(GL_TEXTURE_COORD_ARRAY);
    glVertexPointer(3, GL_FLOAT, 0, verticals);
    glTexCoordPointer(2, GL_FLOAT, 0, UV_TEX_VERTEX);
    glPushMatrix();
    glDrawArrays(GL_TRIANGLES, 0, (180 / cap_H) * (360 / cap_W) * 6);
    glPopMatrix();
    glDisableClientState(GL_TEXTURE_COORD_ARRAY);
    glDisableClientState(GL_VERTEX_ARRAY);  // disable vertex arrays

    glFlush();

    av_usleep(25000);
}

渲染时把解出来的数据从队列中取出生成新的纹理。渲染采用glDrawArrays函数,使用的GL_TRIANGLES参数,使用这个参数对于计算球的顶点坐标和纹理坐标来说不需要考虑很多,比较方便,就是点数过多的时候可能会影响渲染的效率。

(5)画面更新与重绘

void reDraw(int millisec)
{
    glutTimerFunc(millisec, reDraw, millisec);
    glutPostRedisplay();
}

这里用OpenGL的定时器来对画面做一个定时的更新,从而实现视频播放的效果。

4.一些控制操作

(1)键盘控制

void keyboard(unsigned char key, int x, int y)
{
    switch (key)
    {
    case 'x':        //当按下键盘上d时,以沿X轴旋转为主
        xangle += 1.0f;    //设置旋转增量
        break;
    case 'X':
        xangle -= 1.0f;    //设置旋转增量
        break;
    case 'y':
        yangle += 1.0f;
        break;
    case 'Y':
        yangle -= 1.0f;
        break;
    case 'z':
        zangle += 1.0f;
        break;
    case 'Z':
        zangle -= 1.0f;
        break;
    case 'a':
        distance += 10.0f;
        break;
    case 'A':
        distance -= 10.0f;
        break;
    default:
        return;
    }
    glutPostRedisplay();    //重绘函数
}

用键盘来实现球体绕x,y,z轴的旋转,以及观察球体的距离。

(2)鼠标控制

//处理鼠标点击
void Mouse(int button, int state, int x, int y)
{
    if (state == GLUT_DOWN) //第一次鼠标按下时,记录鼠标在窗口中的初始坐标
    {
        //记住鼠标点击后光标坐标
        cx = x;
        cy = y;
    }
}

//处理鼠标拖动
void onMouseMove(int x, int y)
{
    float offset = 0.18;
    //计算拖动后的偏移量,然后进行xy叠加减
    yangle -= ((x - cx) * offset);

    if ( y > cy) {//往下拉
        xangle += ((y - cy) * offset);
    }
    else if ( y < cy) {//往上拉
        xangle += ((y - cy) * offset);
    }

    glutPostRedisplay();

    //保存好当前拖放后光标坐标点
    cx = x;
    cy = y;
}

5.主函数

int main(int argc, char* argv[])
{
    glutInitDisplayMode(GLUT_SINGLE | GLUT_RGB | GLUT_DEPTH);
    glutInitWindowSize(1640, 840);
    glutInitWindowPosition(100, 100);
    glutCreateWindow("OpenGL全景");
    init();
    glutReshapeFunc(reshape);
    glutDisplayFunc(display);
    glutKeyboardFunc(keyboard);
    glutMouseFunc(Mouse);
    glutMotionFunc(onMouseMove);

    glutTimerFunc(25, reDraw, 25);

    HANDLE hThrd = NULL;
    DWORD threadId;
    hThrd = CreateThread(NULL, 0, ThreadFunc, 0, 0, &threadId);

    glutMainLoop();

    WaitForSingleObject(hThrd, INFINITE);

    if (hThrd)
    {
        CloseHandle(hThrd);
    }

    return 0;
}

glutMainLoop()函数真是个恶心的函数,都没找到正常退出他的方法,要退出貌似必须得把整个程序都退出去,在实际使用的时候大多数时候我们都只是希望退出循环就够了,不一定要退出整个程序。所以如果用win32来做,最好就不要用这个函数,用一个独立的线程来做渲染,各种消息通过win32来实现,这样是比较方便的。

运行截图:

image

 

 

工程源码:http://download.csdn.net/download/qq_33892166/9856939

VR视频推荐:http://dl.pconline.com.cn/vr/list0_1_2007_2018.html

posted @ 2017-05-31 21:33  yeren2046  阅读(3458)  评论(0编辑  收藏  举报