ahem.. Looking for some free debugging.. 8)
I've included a very slimmed down & modified osgprerender.cpp
example with a second camera added in an attempt to
get stereo-rtt working..
I've discovered that the
RenderImplementation = osg::Camera::SEPERATE_WINDOW
is a very useful tool (run with the --window option).
It seems to show that the program is basically working
in stereo but the final main window only shows the "right" eye.
I've tried this on OSX (anaglyph) and Linux (quad-buffer) and
they perform roughtly the same. (OSG-SVN 2.2.10+ or so)
Anyone done this already with the SVN version?
See anything obvious?
ml
#include <osg/GLExtensions>
#include <osg/DrawPixels>
#include <osg/Node>
#include <osg/Geometry>
#include <osg/Notify>
#include <osg/MatrixTransform>
#include <osg/Texture2D>
#include <osg/TextureRectangle>
#include <osg/Stencil>
#include <osg/ColorMask>
#include <osg/Depth>
#include <osg/Billboard>
#include <osg/Material>
#include <osg/AnimationPath>
#include <osgGA/TrackballManipulator>
#include <osgGA/FlightManipulator>
#include <osgGA/DriveManipulator>
#include <osgUtil/SmoothingVisitor>
#include <osgDB/Registry>
#include <osgDB/ReadFile>
#include <osgViewer/Viewer>
#include <iostream>
osg::Camera*
create_camera( const int& eye, const osg::BoundingSphere& bs,
unsigned int tex_width,
unsigned int tex_height,
osg::Camera::RenderTargetImplementation renderImplementation )
{
osg::Camera* camera = new osg::Camera;
// set up the background color and clear mask.
//camera->setClearColor(osg::Vec4(0.1f,0.1f,0.3f,1.0f));
camera->setClearMask(GL_COLOR_BUFFER_BIT |
GL_DEPTH_BUFFER_BIT);
float znear = 1.0f*bs.radius();
float zfar = 3.0f*bs.radius();
// 2:1 aspect ratio as per flag geometry above.
float proj_top = 0.25f*znear;
float proj_right = 0.5f*znear;
// 1:1 aspect ratio as per flag geometry above.
/*mal*/proj_top = 1.0f*znear;
/*mal*/proj_right = 1.0f*znear;
znear *= 0.9f;
zfar *= 1.1f;
// set up projection.
camera->setProjectionMatrixAsFrustum(-proj_right,proj_right,-
proj_top,proj_top,znear,zfar);
// set view
camera->setReferenceFrame(osg::Transform::ABSOLUTE_RF);
camera->setViewMatrixAsLookAt( bs.center()-osg::Vec3(0.0f,
1.0f,0.0f)*bs.radius(),
bs.center(),
osg::Vec3(0.0f,0.0f,1.0f));
// set viewport
camera->setViewport(0,0,tex_width,tex_height);
// set stereo parameters
camera->setCullMaskLeft( 0x1 );
camera->setCullMaskRight( 0x2 );
switch( eye ) {
case 0:
camera->setClearColor(osg::Vec4(0.3f,0.3f,0.1f,1.0f));
break;
case 0x1:
camera->setCullMask( 0x1 );
//camera->setViewport(0,0,tex_width/2,tex_height/2);
camera->setClearColor(osg::Vec4(0.3f,0.1f,0.1f,1.0f));
camera->setViewMatrixAsLookAt( bs.center()-osg::Vec3(0.1f,
1.0f,0.0f)*bs.radius(),
bs.center(),
osg::Vec3(0.0f,0.0f,1.0f));
break;
case 0x2:
camera->setCullMask( 0x2 );
//camera->setViewport(tex_width/2,0,tex_width/
2,tex_height/2);
camera->setClearColor(osg::Vec4(0.1f,0.1f,0.3f,1.0f));
camera->setViewMatrixAsLookAt( bs.center()-osg::Vec3(-0.1f,
1.0f,0.0f)*bs.radius(),
bs.center(),
osg::Vec3(0.0f,0.0f,1.0f));
break;
}
// set the camera to render before the main camera.
camera->setRenderOrder(osg::Camera::PRE_RENDER);
// tell the camera to use OpenGL frame buffer object where
supported.
camera->setRenderTargetImplementation(renderImplementation);
return camera;
}
osg::Node*
createPreRenderSubGraph(
osg::Node* subgraph,
unsigned int tex_width,
unsigned int tex_height,
osg::Camera::RenderTargetImplementation renderImplementation)
{
if (!subgraph) return 0;
// create a group to contain the flag and the pre rendering camera.
osg::Group* parent = new osg::Group;
// texture to render to and to use for rendering of flag.
osg::Texture* texture = 0;
osg::Texture2D* texture2D = new osg::Texture2D;
texture2D->setTextureSize(tex_width, tex_height);
texture2D->setInternalFormat(GL_RGBA);
texture2D->setFilter
(osg::Texture2D::MIN_FILTER,osg::Texture2D::LINEAR);
texture2D->setFilter
(osg::Texture2D::MAG_FILTER,osg::Texture2D::LINEAR);
texture = texture2D;
//
// create the object on which to drape our rendered 'canvas'..
//
osg::Geometry* polyGeom = new osg::Geometry();
polyGeom->setSupportsDisplayList(false);
osg::Vec3 origin(0.0f,0.0f,0.0f);
osg::Vec3 xAxis(1.0f,0.0f,0.0f);
osg::Vec3 yAxis(0.0f,0.0f,1.0f);
osg::Vec3 zAxis(0.0f,-1.0f,0.0f);
int noSteps = 20;
float width = 200.0f;
float height = 100.0f;
/*mal*/width = 256.0f;
/*mal*/height = 256.0f;
osg::Vec3Array* vertices = new osg::Vec3Array;
osg::Vec3 bottom = origin;
osg::Vec3 top = origin; top.z()+= height;
osg::Vec3 dv = xAxis*(width/((float)(noSteps-1)));
osg::Vec2Array* texcoords = new osg::Vec2Array;
// set the tex coords.
osg::Vec2 bottom_texcoord(0.0f,0.0f);
osg::Vec2 top_texcoord(0.0f, 1.0f);
osg::Vec2 dv_texcoord(1.0f/(float)(noSteps-1),0.0f);
for(int i=0;i<noSteps;++i)
{
vertices->push_back(top);
vertices->push_back(bottom);
top+=dv;
bottom+=dv;
texcoords->push_back(top_texcoord);
texcoords->push_back(bottom_texcoord);
top_texcoord+=dv_texcoord;
bottom_texcoord+=dv_texcoord;
}
// pass the created vertex array to the points geometry object.
polyGeom->setVertexArray(vertices);
polyGeom->setTexCoordArray(0,texcoords);
osg::Vec4Array* colors = new osg::Vec4Array;
colors->push_back(osg::Vec4(1.0f,1.0f,1.0f,1.0f));
polyGeom->setColorArray(colors);
polyGeom->setColorBinding(osg::Geometry::BIND_OVERALL);
polyGeom->addPrimitiveSet(new osg::DrawArrays
(osg::PrimitiveSet::QUAD_STRIP,0,vertices->size()));
// Now we need to add the texture to the Drawable. Do so by
creating a
// StateSet to contain the Texture StateAttribute.
osg::StateSet* stateset = new osg::StateSet;
stateset->setTextureAttributeAndModes(0,
texture,osg::StateAttribute::ON);
polyGeom->setStateSet(stateset);
osgUtil::SmoothingVisitor::smooth( *polyGeom );
osg::Geode* geode = new osg::Geode();
geode->addDrawable(polyGeom);
parent->addChild(geode);
//
// Then create the camera node to do the render to texture
//
{
const osg::BoundingSphere& bs = subgraph->getBound();
if (!bs.valid()) {
return subgraph;
}
osg::Camera* Lcamera = create_camera( 0x01, bs, tex_width,
tex_height, renderImplementation );
osg::Camera* Rcamera = create_camera( 0x02, bs, tex_width,
tex_height, renderImplementation );
// attach the texture and use it as the color buffer.
Lcamera->attach(osg::Camera::COLOR_BUFFER, texture);
Rcamera->attach(osg::Camera::COLOR_BUFFER, texture);
// add subgraph to render
Lcamera->addChild(subgraph);
Rcamera->addChild(subgraph);
parent->addChild(Lcamera);
parent->addChild(Rcamera);
}
return parent;
}
int main( int argc, char **argv )
{
// use an ArgumentParser object to manage the program arguments.
osg::ArgumentParser arguments(&argc,argv);
// set up the usage document, in case we need to print out how
to use this program.
arguments.getApplicationUsage()->setDescription
(arguments.getApplicationName()+" is the example which demonstrates
pre rendering of scene to a texture, and then apply this texture to
geometry.");
arguments.getApplicationUsage()->setCommandLineUsage
(arguments.getApplicationName()+" [options] filename ...");
arguments.getApplicationUsage()->addCommandLineOption("-h or --
help","Display this information");
arguments.getApplicationUsage()->addCommandLineOption("--
fbo","Use Frame Buffer Object for render to texture, where supported.");
arguments.getApplicationUsage()->addCommandLineOption("--
fb","Use FrameBuffer for render to texture.");
arguments.getApplicationUsage()->addCommandLineOption("--
pbuffer","Use Pixel Buffer for render to texture, where supported.");
arguments.getApplicationUsage()->addCommandLineOption("--
window","Use a seperate Window for render to texture.");
arguments.getApplicationUsage()->addCommandLineOption("--
width","Set the width of the render to texture.");
arguments.getApplicationUsage()->addCommandLineOption("--
height","Set the height of the render to texture.");
arguments.getApplicationUsage()->addCommandLineOption("--
image","Render to an image, then apply a post draw callback to it,
and use this image to update a texture.");
arguments.getApplicationUsage()->addCommandLineOption("--texture-
rectangle","Use osg::TextureRectangle for doing the render to texure
to.");
arguments.getApplicationUsage()->addCommandLineOption("--
draw","Use drawpixels and no texturing at all.");
// construct the viewer.
osgViewer::Viewer viewer(arguments);
// if user request help write it out to cout.
if (arguments.read("-h") || arguments.read("--help"))
{
arguments.getApplicationUsage()->write(std::cout);
return 1;
}
unsigned tex_width = 256;
unsigned tex_height = 256;
while (arguments.read("--width", tex_width)) {}
while (arguments.read("--height", tex_height)) {}
osg::Camera::RenderTargetImplementation renderImplementation =
osg::Camera::FRAME_BUFFER_OBJECT;
while (arguments.read("--fbo")) { renderImplementation =
osg::Camera::FRAME_BUFFER_OBJECT; }
while (arguments.read("--pbuffer")) { renderImplementation =
osg::Camera::PIXEL_BUFFER; }
while (arguments.read("--pbuffer-rtt")) { renderImplementation =
osg::Camera::PIXEL_BUFFER_RTT; }
while (arguments.read("--fb")) { renderImplementation =
osg::Camera::FRAME_BUFFER; }
while (arguments.read("--window")) { renderImplementation =
osg::Camera::SEPERATE_WINDOW; }
// load the nodes from the commandline arguments.
osg::Node* loadedModel = osgDB::readNodeFiles(arguments);
// if not loaded assume no arguments passed in, try use default
mode instead.
if (!loadedModel) loadedModel = osgDB::readNodeFile("cessna.osg");
if (!loadedModel)
{
return 1;
}
osg::Group* rootNode = new osg::Group();
#if no_spinning
// create a transform to spin the model.
osg::MatrixTransform* loadedModelTransform = new
osg::MatrixTransform;
loadedModelTransform->addChild(loadedModel);
osg::NodeCallback* nc =
new osg::AnimationPathCallback(loadedModelTransform->getBound
().center(),
osg::Vec3(0.0f,0.0f,1.0f),
osg::inDegrees(45.0f));
loadedModelTransform->setUpdateCallback(nc);
rootNode->addChild( createPreRenderSubGraph(loadedModelTransform,
tex_width,tex_height,
renderImplementation ) );
#else
rootNode->addChild( createPreRenderSubGraph(loadedModel,
tex_width,tex_height,
renderImplementation ));
#endif
// add model to the viewer.
viewer.setSceneData( rootNode );
return viewer.run();
}
Michael Logan
Perot Systems Govt. Services / NASA Ames Res. Ctr.
MS 269-1, Moffett Field, CA, 94035 (650)-604-4494
Task Lead - Visual Cueing & Simulation
Task Lead - Human Manual & Operational Control
Performance
Visualization Engineer - Adaptive Control Technologies
"If you want to start a revolution, don't pick up a gun.
Do it with science and technology." - Stanford R. Ovshinsky
_______________________________________________
osg-users mailing list
osg-users@lists.openscenegraph.org
http://lists.openscenegraph.org/listinfo.cgi/osg-users-openscenegraph.org