Hi,

I have to distort a scene to be consistent with the video that is diplayed 
behind the scene (basically some augmented reality) and then render everything 
in an image. I used 2 camera (not slaves) and a texture to distort the scene 
but the shadows are not rendered that way. I found the only way to get the 
shadows was to have slaves camera so I am now using 2 slaves camera and a 
texture to do it and I get a white texture.

I used several osg examples to test it before using it in my project 
(osgshadow, osgdistortion, osgscreencapture) and I am able to render a 
distorted animated scene into an image (from the osgshadow, I integrated all I 
needed to render the distorted texture into an image).

Here is the code of the function that does that distorted rendering.

Code:

osg::ref_ptr<osg::GraphicsContext::Traits> traits = new 
osg::GraphicsContext::Traits;
traits->x = 0;
traits->y = 0;
traits->width = 800;
traits->height = 600;
traits->red = 8;
traits->green = 8;
traits->blue = 8;
traits->alpha = 0;
traits->windowDecoration = false;
traits->pbuffer = true;
traits->doubleBuffer = true;
traits->sharedContext = 0;

osg::ref_ptr<osg::GraphicsContext> gc = 
osg::GraphicsContext::createGraphicsContext(traits.get());

unsigned int texWidth = 800;
unsigned int texHeight = 600;

osg::Texture2D* texture = new osg::Texture2D;
texture->setTextureSize(texWidth, texHeight);
texture->setInternalFormat(GL_RGBA);
texture->setFilter(osg::Texture2D::MIN_FILTER,osg::Texture2D::LINEAR);
texture->setFilter(osg::Texture2D::MAG_FILTER,osg::Texture2D::LINEAR);

{
  osg::Camera* camera = new osg::Camera;
  camera->setGraphicsContext(gc.get());
  camera->setViewport(0,0,texWidth,texHeight);
  camera->setDrawBuffer(GL_FRONT);
  camera->setReadBuffer(GL_FRONT);
  camera->setRenderTargetImplementation(osg::Camera::FRAME_BUFFER_OBJECT);

  camera->attach(osg::Camera::COLOR_BUFFER, texture, 0);
  _viewer->addSlave(camera, osg::Matrixd(), osg::Matrixd());
}

{
  osg::Geometry* polyGeom = new osg::Geometry();

  polyGeom->setSupportsDisplayList(false);

  osg::Vec3 origin(0.0f,0.0f,0.0f);
  osg::Vec3 xAxis(1.0f,0.0f,0.0f);
  osg::Vec3 yAxis(0.0f,1.0f,0.0f);
  float height = (float)texHeight;
  float width = (float)texWidth;;
  int noSteps = 20;
  float maxU = 1.0;
  float maxV = 1.0;

  osg::Vec3Array* vertices = new osg::Vec3Array;
  osg::Vec2Array* texcoords = new osg::Vec2Array;
  osg::Vec4Array* colors = new osg::Vec4Array;

  polyGeom->setVertexArray(vertices);
  polyGeom->setTexCoordArray(0,texcoords);

  unsigned int rows = 20, cols = 20;
  float rowSize = height / (float)rows;
  float colSize = width / (float)cols;
  double px, py, u, v;
  double * dist = _arManager->getDistFactor();
 
  for(unsigned int r = 0; r < rows; r++)
  {
    for(unsigned int c = 0; c <= cols; c++)
    {
      distortPoint(dist, colSize, rowSize, r, c, &px, &py);
      vertices->push_back(osg::Vec3(px, py, 0.0f));

      u = (c / (float)(cols)) * maxU;
      v = (r / (float)(rows)) * maxV;
      texcoords->push_back(osg::Vec2(u, v));

      distortPoint(dist, colSize, rowSize, r+1, c, &px, &py);
      vertices->push_back(osg::Vec3(px, py, 0.0f));

      u = (c / (float)(cols)) * maxU;
      v = ((r + 1) / (float)(rows)) * maxV;
      texcoords->push_back(osg::Vec2(u, v));
    }
    polyGeom->addPrimitiveSet(new 
osg::DrawArrays(osg::PrimitiveSet::QUAD_STRIP, r * 2 * (cols+1), 2 * (cols+1)));
  }

  osg::StateSet* stateset = polyGeom->getOrCreateStateSet();
            
  stateset->setTextureAttributeAndModes(0, texture, osg::StateAttribute::ON);

  stateset->setMode(GL_LIGHTING, osg::StateAttribute::ON | 
osg::StateAttribute::PROTECTED);
  stateset->setMode(GL_LIGHTING, osg::StateAttribute::OFF);
  stateset->setMode(GL_BLEND, osg::StateAttribute::OVERRIDE | 
osg::StateAttribute::ON);
  osg::Geode * geode = new osg::Geode();
  geode->addDrawable(polyGeom);

  // set up the camera to render the textured quad
  osg::Camera * camera = new osg::Camera;
  camera->setGraphicsContext(gc.get());
  camera->setClearMask(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT);
  camera->setClearColor(clearColour);
  camera->setViewport(0,0,texWidth,texHeight);
  camera->setDrawBuffer(GL_BACK);
  camera->setReadBuffer(GL_BACK);
  camera->setReferenceFrame(osg::Transform::ABSOLUTE_RF);
  camera->setAllowEventFocus(false);
  camera->setProjectionMatrixAsOrtho2D(0, texWidth, 0, texHeight);
  camera->setViewMatrix(osg::Matrix::identity());
  camera->addChild(geode);

  WindowCaptureCallback * wcc = new 
WindowCaptureCallback(WindowCaptureCallback::DOUBLE_PBO, 
WindowCaptureCallback::END_FRAME, GL_BACK);
  camera->setFinalDrawCallback(wcc);
  _cd = wcc->getContextData(gc.get());

  _viewer->addSlave(camera, osg::Matrixd(), osg::Matrixd(), false);
}




I found out that removing the last attribute when I add the second slave camera 
give me back the video + scene, but there is nothing distorted. Also, when the 
texture is composed by only one quad (so no distortion computed) and I add the 
second camera with the last parameter set to false, I can see all the scene (no 
white texture).

Maybe I am missing some important thing about slave camera but I can't find any 
solution.

Any hint on this weird problem is welcome :)
Thank you!

Cheers,
Zarra

------------------
Read this topic online here:
http://forum.openscenegraph.org/viewtopic.php?p=37219#37219





_______________________________________________
osg-users mailing list
[email protected]
http://lists.openscenegraph.org/listinfo.cgi/osg-users-openscenegraph.org

Reply via email to