《最长的一帧》 osg3.4 osgViewer::View::init() osgViewer::Viewer::getContexts()
void ViewerBase::startThreading() { if (_threadsRunning) return; OSG_INFO<<"Viewer::startThreading() - starting threading"<<std::endl; // release any context held by the main thread. releaseContext(); Contexts contexts; getContexts(contexts); OSG_INFO<<"Viewer::startThreading() - contexts.size()="<<contexts.size()<<std::endl; Cameras cameras; getCameras(cameras); unsigned int numThreadsOnStartBarrier = 0; unsigned int numThreadsOnEndBarrier = 0; switch(_threadingModel) { case(SingleThreaded): numThreadsOnStartBarrier = 1; numThreadsOnEndBarrier = 1; return; case(CullDrawThreadPerContext): numThreadsOnStartBarrier = contexts.size()+1; numThreadsOnEndBarrier = contexts.size()+1; break; case(DrawThreadPerContext): numThreadsOnStartBarrier = 1; numThreadsOnEndBarrier = 1; break; case(CullThreadPerCameraDrawThreadPerContext): numThreadsOnStartBarrier = cameras.size()+1; numThreadsOnEndBarrier = 1; break; default: OSG_NOTICE<<"Error: Threading model not selected"<<std::endl; return; } Scenes scenes; getScenes(scenes); for(Scenes::iterator scitr = scenes.begin(); scitr != scenes.end(); ++scitr) { if ((*scitr)->getSceneData()) { OSG_INFO<<"Making scene thread safe"<<std::endl; // make sure that existing scene graph objects are allocated with thread safe ref/unref (*scitr)->getSceneData()->setThreadSafeRefUnref(true); // update the scene graph so that it has enough GL object buffer memory for the graphics contexts that will be using it. (*scitr)->getSceneData()->resizeGLObjectBuffers(osg::DisplaySettings::instance()->getMaxNumberOfGraphicsContexts()); } } Contexts::iterator citr; unsigned int numViewerDoubleBufferedRenderingOperation = 0; bool graphicsThreadsDoesCull = _threadingModel == CullDrawThreadPerContext || _threadingModel==SingleThreaded; for(Cameras::iterator camItr = cameras.begin(); camItr != cameras.end(); ++camItr) { osg::Camera* camera = *camItr; Renderer* renderer = dynamic_cast<Renderer*>(camera->getRenderer()); if (renderer) { renderer->setGraphicsThreadDoesCull(graphicsThreadsDoesCull); renderer->setDone(false); renderer->reset(); ++numViewerDoubleBufferedRenderingOperation; } } if (_threadingModel==CullDrawThreadPerContext) { _startRenderingBarrier = 0; _endRenderingDispatchBarrier = 0; _endDynamicDrawBlock = 0; } else if (_threadingModel==DrawThreadPerContext || _threadingModel==CullThreadPerCameraDrawThreadPerContext) { _startRenderingBarrier = 0; _endRenderingDispatchBarrier = 0; _endDynamicDrawBlock = new osg::EndOfDynamicDrawBlock(numViewerDoubleBufferedRenderingOperation); #ifndef OSGUTIL_RENDERBACKEND_USE_REF_PTR if (!osg::Referenced::getDeleteHandler()) osg::Referenced::setDeleteHandler(new osg::DeleteHandler(2)); else osg::Referenced::getDeleteHandler()->setNumFramesToRetainObjects(2); #endif } if (numThreadsOnStartBarrier>1) { _startRenderingBarrier = new osg::BarrierOperation(numThreadsOnStartBarrier, osg::BarrierOperation::NO_OPERATION); } if (numThreadsOnEndBarrier>1) { _endRenderingDispatchBarrier = new osg::BarrierOperation(numThreadsOnEndBarrier, _endBarrierOperation); } osg::ref_ptr<osg::BarrierOperation> swapReadyBarrier = contexts.empty() ? 0 : new osg::BarrierOperation(contexts.size(), osg::BarrierOperation::NO_OPERATION); osg::ref_ptr<osg::SwapBuffersOperation> swapOp = new osg::SwapBuffersOperation(); for(citr = contexts.begin(); citr != contexts.end(); ++citr) { osg::GraphicsContext* gc = (*citr); if (!gc->isRealized()) { OSG_INFO<<"ViewerBase::startThreading() : Realizng window "<<gc<<std::endl; gc->realize(); } gc->getState()->setDynamicObjectRenderingCompletedCallback(_endDynamicDrawBlock.get()); // create the a graphics thread for this context gc->createGraphicsThread(); // add the startRenderingBarrier if (_threadingModel==CullDrawThreadPerContext && _startRenderingBarrier.valid()) gc->getGraphicsThread()->add(_startRenderingBarrier.get()); // add the rendering operation itself. gc->getGraphicsThread()->add(new osg::RunOperations()); if (_threadingModel==CullDrawThreadPerContext && _endBarrierPosition==BeforeSwapBuffers && _endRenderingDispatchBarrier.valid()) { // add the endRenderingDispatchBarrier gc->getGraphicsThread()->add(_endRenderingDispatchBarrier.get()); } if (swapReadyBarrier.valid()) gc->getGraphicsThread()->add(swapReadyBarrier.get()); // add the swap buffers gc->getGraphicsThread()->add(swapOp.get()); if (_threadingModel==CullDrawThreadPerContext && _endBarrierPosition==AfterSwapBuffers && _endRenderingDispatchBarrier.valid()) { // add the endRenderingDispatchBarrier gc->getGraphicsThread()->add(_endRenderingDispatchBarrier.get()); } } if (_threadingModel==CullThreadPerCameraDrawThreadPerContext && numThreadsOnStartBarrier>1) { Cameras::iterator camItr; for(camItr = cameras.begin(); camItr != cameras.end(); ++camItr) { osg::Camera* camera = *camItr; camera->createCameraThread(); osg::GraphicsContext* gc = camera->getGraphicsContext(); // add the startRenderingBarrier if (_startRenderingBarrier.valid()) camera->getCameraThread()->add(_startRenderingBarrier.get()); Renderer* renderer = dynamic_cast<Renderer*>(camera->getRenderer()); if (renderer) { renderer->setGraphicsThreadDoesCull(false); camera->getCameraThread()->add(renderer); } if (_endRenderingDispatchBarrier.valid()) { // add the endRenderingDispatchBarrier gc->getGraphicsThread()->add(_endRenderingDispatchBarrier.get()); } } for(camItr = cameras.begin(); camItr != cameras.end(); ++camItr) { osg::Camera* camera = *camItr; if (camera->getCameraThread() && !camera->getCameraThread()->isRunning()) { OSG_INFO<<" camera->getCameraThread()-> "<<camera->getCameraThread()<<std::endl; camera->getCameraThread()->startThread(); } } } for(citr = contexts.begin(); citr != contexts.end(); ++citr) { osg::GraphicsContext* gc = (*citr); if (gc->getGraphicsThread() && !gc->getGraphicsThread()->isRunning()) { OSG_INFO<<" gc->getGraphicsThread()->startThread() "<<gc->getGraphicsThread()<<std::endl; gc->getGraphicsThread()->startThread(); // OpenThreads::Thread::YieldCurrentThread(); } } _threadsRunning = true; OSG_INFO<<"Set up threading"<<std::endl; }
int ViewerBase::run() { if (!isRealized()) { realize(); } unsigned int runTillFrameNumber = osg::UNINITIALIZED_FRAME_NUMBER; osg::getEnvVar("OSG_RUN_FRAME_COUNT", runTillFrameNumber); while(!done() && (runTillFrameNumber==osg::UNINITIALIZED_FRAME_NUMBER || getViewerFrameStamp()->getFrameNumber()<runTillFrameNumber)) { double minFrameTime = _runMaxFrameRate>0.0 ? 1.0/_runMaxFrameRate : 0.0; osg::Timer_t startFrameTick = osg::Timer::instance()->tick(); if (_runFrameScheme==ON_DEMAND) { if (checkNeedToDoFrame()) { frame(); } else { // we don't need to render a frame but we don't want to spin the run loop so make sure the minimum // loop time is 1/100th of second, if not otherwise set, so enabling the frame microSleep below to // avoid consume excessive CPU resources. if (minFrameTime==0.0) minFrameTime=0.01; } } else { frame(); } // work out if we need to force a sleep to hold back the frame rate osg::Timer_t endFrameTick = osg::Timer::instance()->tick(); double frameTime = osg::Timer::instance()->delta_s(startFrameTick, endFrameTick); if (frameTime < minFrameTime) OpenThreads::Thread::microSleep(static_cast<unsigned int>(1000000.0*(minFrameTime-frameTime))); } return 0; }
osgViewer/ViewerBase.cpp 727行,frame()函数
void ViewerBase::frame(double simulationTime) { if (_done) return; // OSG_NOTICE<<std::endl<<"CompositeViewer::frame()"<<std::endl<<std::endl; if (_firstFrame) { viewerInit(); if (!isRealized()) { realize(); } _firstFrame = false; } advance(simulationTime); eventTraversal(); updateTraversal(); renderingTraversals(); }
解读:
void View::init() { OSG_INFO<<"View::init()"<<std::endl;
//View::init 函数中出现了两个重要的类成员变量:_eventQueue 和_cameraManipulator,并且还将一个 osgGA::GUIEventAdapter 的实例传入_cameraManipulator的初始化函数。
osg::ref_ptr<osgGA::GUIEventAdapter> initEvent = _eventQueue->createEvent(); initEvent->setEventType(osgGA::GUIEventAdapter::FRAME); if (_cameraManipulator.valid()) { _cameraManipulator->init(*initEvent, *this); } }
从变量的名称可以猜测出_eventQueue 的功能,它用于储存该视景器的事件队列。OSG中代表事件的类是 osgGA::GUIEventAdapter,它可以用于表达各种类型的鼠标、键盘、触压笔和窗口事件。在用户程序中,我们往往通过继承 osgGA::GUIEventHandler 类,并重写 handle函数的方法,获取实时的鼠标 / 键盘输入,并进而实现相应的用户代码。
_eventQueue 除了保存一个 GUIEventAdapter 的链表之外,还提供了一系列对链表及其元素的操作函数,这其中,createEvent 函数的作用是分配和返回一个新的 GUIEventAdapter事件的指针。
随后,这个新事件的类型被指定为 FRAME 事件,即每帧都会触发的一个事件。
那么,_cameraManipulator 呢?没错,它就是视景器中所用的场景漫游器的实例。通常我们都会使用 setCameraManipulator 来设置这个变量的内容,例如轨迹球漫游器(TrackballManipulator)可以使用鼠标拖动来观察场景,而驾驶漫游器(DriveManipulator)则使用类似于汽车驾驶的效果来实现场景的漫游。
上面的代码将新创建的 FRAME 事件和 Viewer 对象本身传递给_cameraManipulator 的init 函数,不同的漫游器(如 TrackballManipulator、DriveManipulator)会重写各自的 init 函数,实现自己所需的初始化工作。如果读者希望自己编写一个场景的漫游器,那么覆写并使用 osgGA::MatrixManipulator::init 就可以灵活地初始化自定义漫游器的功能了,它的调用时机就在这里。
②如果执行realize()函数
osgViewer/Viewer.cpp 第 496 行,void Viewer::realize()
Viewer::realize 函数是我们十分熟悉的另一个函数,从 OSG 问世以来,我们就习惯于在进入仿真循环之前调用它(现在的 OSG 会自动调用这个函数,如果我们忘记的话),以完成窗口和场景的“设置”工作。那么,什么叫做“设置”,这句简单的场景设置又包含了多少内容呢?艰辛的旅程就此开始吧。
void Viewer::realize() { //OSG_INFO<<"Viewer::realize()"<<std::endl; //setCameraWithFocus(0),其内容无非是设置类变量_cameraWithFocus 指向的内容为 NULL。 //这部分程序不在osgViewer/Viewer.cpp中,在osgViewer/CompositeViewer.cpp对应的realize()函数中 /*setCameraWithFocus(0); if (_views.empty()) { OSG_NOTICE<<"CompositeViewer::realize() - No views to realize."<<std::endl; _done = true; return; } */ /** * 变量 contexts 是一个保存了 osg::GraphicsContext 指针的向量组,而 Viewer::getContexts函数的作用是获取所有的图形上下文,并保存到这个向量组中来。 * 对于需要将 OSG 嵌合到各式各样的 GUI 系统(如 MFC,Qt,wxWidgets 等)的朋友来说,osg::GraphicsContext 类是经常要打交道的对象之一。一种常用的嵌入方式也许是这样实现的: * osg::ref_ptr<osg::GraphicsContext::Traits> traits = new osg::GraphicsContext::Traits; * osg::ref_ptr<osg::Referenced> windata = new osgViewer::GraphicsWindowWin32::WindowData(hWnd); * traits->x = 0; * traits->y = 0; * …… * traits->inheritedWindowData = windata; * osg::GraphicsContext* gc = osg::GraphicsContext::createGraphicsContext(traits.get()); * Camera* camera = viewer.getCamera(); * camera->setGraphicsContext(gc); * …… * viewer.setCamera(camera); * 这个过程虽然比较繁杂,但是顺序还是十分清楚的:首先设置嵌入窗口的特性(Traits),例如 X、Y 位置,宽度和高度,以及父窗口的句柄(inheritedWindowData);然后根据特性的设置创建一个新的图形设备上下文(GraphicsContext),将其赋予场景所用的摄像机。而我们在 getContexts 函数中所要获取的,也许就包括这样一个用户建立的 GraphicsContext 设备。 */ Contexts contexts; getContexts(contexts); /* 有一个显而易见的事实是:当程序还没有进入仿真循环,且对于 osgViewer::Viewer 还没有任何的操作之时, 系统是不会存在任何图形上下文的;创建一个新的 osg::Camera 对象也不会为其自动分配图形上下文。 但是,图形上下文 GraphicsContext 却是场景显示的唯一平台,系统有必要在开始渲染之前完成其创建工作。 假设用户已经在进入仿真循环之前,自行创建了新的 Camera 摄像机对象,为其分配了自定义的 GraphicsContext 设备, 并将 Camera 对象传递给视景器,就像 osgviewerMFC 和osgcamera 例子,以及我们在编写与 GUI 系统嵌合的仿真程序时常做的那样。 此时,系统已经不必为图形上下文的创建作任何多余的工作,因为用户不需要更多的窗口来显示自己的场景了。 所以就算主摄像机_camera 还没有分配 GraphicsContext,只要系统中已经存在图形上下文,即可以开始执行仿真程序了。 但是,如果 getContexts 没有得到任何图形上下文的话,就说明仿真系统还没有合适的显示平台, 此时就需要尝试创建一个缺省的 GraphicsContext 设备,并再次执行 getContexts, 如果还是没能得到任何图形上下文的话,那么就不得不退出程序了。 */ if (contexts.empty()) { OSG_INFO<<"Viewer::realize() - No valid contexts found, setting up view across all screens."<<std::endl; // no windows are already set up so set up a default view // 创建缺省 GraphicsContext 设备的方法有以下几种: std::string value; if (osg::getEnvVar("OSG_CONFIG_FILE", value)) { /* 1、读取 OSG_CONFIG_FILE 环境变量的内容:如果用户在这个环境变量中定义了一个 文件路径的话,那么系统会尝试用 osgDB::readObjectFile 函数读入这个文件,使用 cfg 插件 进行解析;如果成功的话,则调用 osgViewer::Viewer::take 函数,使用配置信息设置当前的 视景器。这些工作在 osgViewer::Viewer::readConfiguration 函数中实现。 */ readConfiguration(value); } else { int screenNum = -1; osg::getEnvVar("OSG_SCREEN", screenNum); int x = -1, y = -1, width = -1, height = -1; osg::getEnvVar("OSG_WINDOW", x, y, width, height); if (osg::getEnvVar("OSG_BORDERLESS_WINDOW", x, y, width, height)) { /* 2、读取 OSG_WINDOW 环境变量的内容:如果用户以“x y w h”的格式在其中定义了 窗口的左上角坐标(x,y)和尺寸(w,h)的话(注意要以空格为分隔符),系统会尝试使 用 osgViewer::View::setUpViewInWindow 函数来创建设备 */ osg::ref_ptr<osgViewer::SingleWindow> sw = new osgViewer::SingleWindow(x, y, width, height, screenNum); sw->setWindowDecoration(false); apply(sw.get()); } else if (width>0 && height>0) { if (screenNum>=0) setUpViewInWindow(x, y, width, height, screenNum); else setUpViewInWindow(x,y,width,height); } else if (screenNum>=0) { /* 3、读取 OSG_SCREEN 环境变量的内容:如果用户在其中定义了所用屏幕的数量的话, 系统会尝试用 osgViewer::View::setUpViewOnSingleScreen 函数,为每一个显示屏创建一个全 屏幕的图形窗口;如果同时还设置了 OSG_WINDOW,那么这两个环境变量都可以起到作 用,此时将调用 setUpViewInWindow 函数。 */ setUpViewOnSingleScreen(screenNum); } else { /* 4、如果上述环境变量都没有设置的话(事实上这也是最常见的情况),那么系统将调用 osgViewer::View::setUpViewAcrossAllScreens 函数,尝试创建一个全屏显示的图形设备。 */ setUpViewAcrossAllScreens(); } } getContexts(contexts); } if (contexts.empty()) { OSG_NOTICE<<"Viewer::realize() - failed to set up any windows"<<std::endl; _done = true; return; } // get the display settings that will be active for this viewer osg::DisplaySettings* ds = _displaySettings.valid() ? _displaySettings.get() : osg::DisplaySettings::instance().get(); osg::GraphicsContext::WindowingSystemInterface* wsi = osg::GraphicsContext::getWindowingSystemInterface(); // pass on the display settings to the WindowSystemInterface. if (wsi && wsi->getDisplaySettings()==0) wsi->setDisplaySettings(ds); unsigned int maxTexturePoolSize = ds->getMaxTexturePoolSize(); unsigned int maxBufferObjectPoolSize = ds->getMaxBufferObjectPoolSize(); for(Contexts::iterator citr = contexts.begin(); citr != contexts.end(); ++citr) { osg::GraphicsContext* gc = *citr; if (ds->getSyncSwapBuffers()) gc->setSwapCallback(new osg::SyncSwapBuffersCallback); // set the pool sizes, 0 the default will result in no GL object pools. gc->getState()->setMaxTexturePoolSize(maxTexturePoolSize); gc->getState()->setMaxBufferObjectPoolSize(maxBufferObjectPoolSize); gc->realize(); if (_realizeOperation.valid() && gc->valid()) { gc->makeCurrent(); (*_realizeOperation)(gc); gc->releaseContext(); } } // attach contexts to _incrementalCompileOperation if attached. if (_incrementalCompileOperation) _incrementalCompileOperation->assignContexts(contexts); bool grabFocus = true; if (grabFocus) { for(Contexts::iterator citr = contexts.begin(); citr != contexts.end(); ++citr) { osgViewer::GraphicsWindow* gw = dynamic_cast<osgViewer::GraphicsWindow*>(*citr); if (gw) { gw->grabFocusIfPointerInWindow(); } } } // initialize the global timer to be relative to the current time. osg::Timer::instance()->setStartTick(); // pass on the start tick to all the associated event queues setStartTick(osg::Timer::instance()->getStartTick()); // configure threading. setUpThreading(); if (osg::DisplaySettings::instance()->getCompileContextsHint()) { for(unsigned int i=0; i<= osg::GraphicsContext::getMaxContextID(); ++i) { osg::GraphicsContext* gc = osg::GraphicsContext::getOrCreateCompileContext(i); if (gc) { gc->createGraphicsThread(); gc->getGraphicsThread()->startThread(); } } } #if 0 osgGA::GUIEventAdapter* eventState = getEventQueue()->getCurrentEventState(); if (getCamera()->getViewport()) { osg::Viewport* viewport = getCamera()->getViewport(); eventState->setInputRange( viewport->x(), viewport->y(), viewport->x() + viewport->width(), viewport->y() + viewport->height()); } else { eventState->setInputRange(-1.0, -1.0, 1.0, 1.0); } #endif }
Viewer::getContexts函数的作用是获取所有的图形上下文,并保存到这个向量组中来。
当前位置:osgViewer/Viewer.cpp 第 1307 行,osgViewer::Viewer::getContexts()
/* 如果希望观察自己的程序中所有的图形设备上下文, 不妨使用这个函数来收集一下。简单的情形下,我们的程序中只有一个主摄像机, 也就只有一个 GraphicsContext 设备,它表达了一个全屏幕的图形窗口; 而 osgcamera 这个例子程序可以创建六个从摄像机,因此可以得到六个图形上下文设备,且各个图形窗口的 X 坐标各不相同,这也正是这个例子想要表达的。 */ void Viewer::getContexts(Contexts& contexts, bool onlyValid) { typedef std::set<osg::GraphicsContext*> ContextSet; ContextSet contextSet; contexts.clear(); /*首先判断场景的主摄像机_camera 是否包含了一个有效的GraphicsContext 设备, 然后再遍历所有的从摄像机_slaves(一个视景器可以包含一个主摄像机和多个从摄像机), 将所有找到的 GraphicsContext 图形上下文设备记录下来。 随后,将这些 GraphicsContext 的指针追加到传入参数(contexts 向量组)中, 并使用std::sort 执行了一步排序的工作(没看出来,因为context本身是一个 std::vector,没有插入数据自动实现排序的功能),所谓的排序是按照这样的原则来进行的: 1、屏幕数量较少的 GraphicsContext 设备排列靠前; 2、窗口 X 坐标较小的设备排列靠前; 3、窗口 Y 坐标较小的设备排列靠前。 */ if (_camera.valid() && _camera->getGraphicsContext() && (_camera->getGraphicsContext()->valid() || !onlyValid)) { contextSet.insert(_camera->getGraphicsContext()); contexts.push_back(_camera->getGraphicsContext()); } for(unsigned int i=0; i<getNumSlaves(); ++i) { Slave& slave = getSlave(i); osg::GraphicsContext* sgc = slave._camera.valid() ? slave._camera->getGraphicsContext() : 0; if (sgc && (sgc->valid() || !onlyValid)) { if (contextSet.count(sgc)==0) { contextSet.insert(sgc); contexts.push_back(sgc); } } } }