2012-11-18 31 views
2

我正在尝试为三角形网格形成的任何对象写射线追踪器。我正在使用外部库从.ply格式加载多维数据集,然后对其进行跟踪。到目前为止,我已经实现了大部分示踪器,现在我试图用一个立方体来测试它,但出于某种原因,我在屏幕上看到的只是一条红线。我尝试了几种方法来修复它,但我根本无法弄清楚它了。对于这个初级测试,我只创建初级光线,如果它们打到我的立方体上,那么我会将该像素着色为立方体的漫反射颜色并返回。为了检查光线对象的交点,我要经过形成该对象的所有三角形,并将距离返回到最近的那个。如果你能看看代码并告诉我可能出错和在哪里,那将是非常棒的。我将不胜感激。射线追踪三角形网格对象

雷三角路口:

bool intersectTri(const Vec3D& ray_origin, const Vec3D& ray_direction, const Vec3D& v0, const Vec3D& v1, const Vec3D& v2, double &t, double &u, double &v) const 
    { 

     Vec3D edge1 = v1 - v0; 
     Vec3D edge2 = v2 - v0; 
     Vec3D pvec = ray_direction.cross(edge2); 
     double det = edge1.dot(pvec); 
     if (det > - THRESHOLD && det < THRESHOLD) 
      return false; 
     double invDet = 1/det; 
     Vec3D tvec = ray_origin - v0; 
     u = tvec.dot(pvec)*invDet; 
     if (u < 0 || u > 1) 
      return false; 
     Vec3D qvec = tvec.cross(edge1); 
     v = ray_direction.dot(qvec)*invDet; 
     if (v < 0 || u + v > 1) 
      return false; 
     t = edge2.dot(qvec)*invDet; 
     if (t < 0) 
      return false; 
     return true; 
    } 

//Object intersection 
bool intersect(const Vec3D& ray_origin, const Vec3D& ray_direction, IntersectionData& idata, bool enforce_max) const 
    { 

     double tClosest; 
     if (enforce_max) 
     { 
      tClosest = idata.t; 
     } 
     else 
     { 
      tClosest = TMAX; 
     } 

     for (int i = 0 ; i < indices.size() ; i++) 
     { 
      const Vec3D v0 = vertices[indices[i][0]]; 
      const Vec3D v1 = vertices[indices[i][1]]; 
      const Vec3D v2 = vertices[indices[i][2]]; 
      double t, u, v; 
      if (intersectTri(ray_origin, ray_direction, v0, v1, v2, t, u, v)) 
      { 
       if (t < tClosest) 
       { 
        idata.t = t; 
        tClosest = t;     
        idata.u = u; 
        idata.v = v; 
        idata.index = i; 
       } 
      } 
     } 
     return (tClosest < TMAX && tClosest > 0) ? true : false; 
    } 

Vec3D trace(World world, Vec3D &ray_origin, Vec3D &ray_direction) 
{ 

Vec3D objColor = world.background_color; 
IntersectionData idata; 
double coeff = 1.0; 
int depth = 0; 

double tClosest = TMAX; 
Object *hitObject = NULL; 
for (unsigned int i = 0 ; i < world.objs.size() ; i++) 
{  
    IntersectionData idata_curr; 
    if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
    { 
     if (idata_curr.t < tClosest && idata_curr.t > 0) 
     { 
      idata.t = idata_curr.t; 
      idata.u = idata_curr.u; 
      idata.v = idata_curr.v; 
      idata.index = idata_curr.index; 
      tClosest = idata_curr.t;    
      hitObject = &(world.objs[i]); 
     } 
    } 
} 
if (hitObject == NULL) 
{ 
    return world.background_color; 
} 
else 
{ 
    return hitObject->getDiffuse(); 
} 
} 

int main(int argc, char** argv) 
{ 

parse("cube.ply"); 
Vec3D diffusion1(1, 0, 0); 
Vec3D specular1(1, 1, 1); 
Object cube1(coordinates, connected_vertices, diffusion1, specular1, 0, 0); 
World wrld; 
// Add objects to the world 
wrld.objs.push_back(cube1); 
Vec3D background(0, 0, 0); 
wrld.background_color = background; 
// Set light color 
Vec3D light_clr(1, 1, 1); 
wrld.light_colors.push_back(light_clr); 
// Set light position 
Vec3D light(0, 64, -10); 
wrld.light_positions.push_back(light); 

int width = 128; 
int height = 128; 
Vec3D *image = new Vec3D[width*height]; 
Vec3D *pixel = image; 

// Trace rays 
for (int y = -height/2 ; y < height/2 ; ++y) 
{ 
    for (int x = -width/2 ; x < width/2 ; ++x, ++pixel) 
    { 
     Vec3D ray_dir(x+0.5, y+0.5, -1.0); 
     ray_dir.normalize(); 
     Vec3D ray_orig(0.5*width, 0.5*height, 0.0); 
     *pixel = trace(wrld, ray_orig, ray_dir);   
    } 
} 

savePPM("./test.ppm", image, width, height); 
return 0; 
} 

我刚跑了测试案例,我得到这个:

的单位立方体(0,0为中心, - 1.5),并在X轴和Y轴上缩放100倍。看起来投影有问题,但我无法确切地告诉结果。此外,不应该在这种情况下(立方体居中于(0,0))最终的对象也出现在图片的中间? FIX:在标准化和调用跟踪函数之前,我通过执行ray_dir = ray_dir - ray_orig来解决了居中问题。尽管如此,这个观点似乎是错误的。

+0

当你的射线方向从 - (宽度/ 2,高度/ 2)到(宽度/ 2,高度/ 2)时,为什么你的射线起源于(宽度/ 2,高度/ 2)?您需要将原点移动到原点,或者移动您的光线方向,以使最低值的角为(0,0,-1)。我认为这解释了结果图像,因为它看起来像一个非常广泛的视野。 – MikeMx7f

+0

非常感谢!我把射线的原点移动到(0,0),现在我得到这个:http://s10.postimage.org/i33s9eih5/test.png。我是否应该为立方体实施旋转以获得3D方面?因为这样才能看到正面。我也尝试翻译(x,y)上的立方体,但我得到的只是它的正面。 – franciscb

回答

0

我继续工作,现在我开始按照Phong实现漫反射。

Vec3D迹(世界世界,Vec3D & ray_origin,Vec3D & ray_direction) {

Vec3D objColor = Vec3D(0); 
IntersectionData idata; 
double coeff = 1.0; 
int depth = 0; 
do 
{ 
    double tClosest = TMAX; 
    Object *hitObject = NULL; 
    for (unsigned int i = 0 ; i < world.objs.size() ; i++) 
    {  
     IntersectionData idata_curr; 
     if (world.objs[i].intersect(ray_origin, ray_direction, idata_curr, false)) 
     { 
      if (idata_curr.t < tClosest && idata_curr.t > 0) 
      { 
       idata.t = idata_curr.t; 
       idata.u = idata_curr.u; 
       idata.v = idata_curr.v; 
       idata.index = idata_curr.index; 
       tClosest = idata_curr.t;    
       hitObject = &(world.objs[i]); 
      } 
     } 
    } 
    if (hitObject == NULL) 
    { 
     return world.background_color; 
    } 

    Vec3D newStart = ray_origin + ray_direction*idata.t; 

    // Compute normal at intersection by interpolating vertex normals (PHONG Idea) 
    Vec3D v0 = hitObject->getVertices()[hitObject->getIndices()[idata.index][0]]; 
    Vec3D v1 = hitObject->getVertices()[hitObject->getIndices()[idata.index][1]]; 
    Vec3D v2 = hitObject->getVertices()[hitObject->getIndices()[idata.index][2]]; 

    Vec3D n1 = hitObject->getNormals()[hitObject->getIndices()[idata.index][0]]; 
    Vec3D n2 = hitObject->getNormals()[hitObject->getIndices()[idata.index][1]]; 
    Vec3D n3 = hitObject->getNormals()[hitObject->getIndices()[idata.index][2]]; 

// Vec3D N = n1 + (n2 - n1)*idata.u + (n3 - n1)*idata.v; 
    Vec3D N = v0.computeFaceNrm(v1, v2); 
    if (ray_direction.dot(N) > 0) 
    { 
     N = N*(-1); 
    } 
    N.normalize(); 

    Vec3D lightray_origin = newStart; 

    for (unsigned int itr = 0 ; itr < world.light_positions.size() ; itr++) 
    { 

     Vec3D lightray_dir = world.light_positions[0] - newStart; 
     lightray_dir.normalize(); 

     double cos_theta = max(N.dot(lightray_dir), 0.0); 
     objColor.setX(objColor.getX() + hitObject->getDiffuse().getX()*hitObject->getDiffuseReflection()*cos_theta); 
     objColor.setY(objColor.getY() + hitObject->getDiffuse().getY()*hitObject->getDiffuseReflection()*cos_theta); 
     objColor.setZ(objColor.getZ() + hitObject->getDiffuse().getZ()*hitObject->getDiffuseReflection()*cos_theta); 
     return objColor; 
    } 

    depth++; 

} while(coeff > 0 && depth < MAX_RAY_DEPTH); 
return objColor; 

}

当我到与主射线的对象,我发送另一个射线向光源定位在(0,0,0)并根据Phong照明模型返回颜色进行漫反射,但结果确实不是预期的结果:http://s15.postimage.org/vc6uyyssr/test.png。立方体是一个以(0,0,0)为中心的单位立方体,然后被(1.5,-1.5,-1.5)翻译。从我的角度来看,立方体的左侧应该有更多的光线,实际上是这样。你怎么看呢?