美文网首页
基本的光线追踪器

基本的光线追踪器

作者: Dragon_boy | 来源:发表于2020-09-21 15:52 被阅读0次

一个光线追踪器的伪代码形式为:

#define MAX_RAY_DEPTH 3 
 
color Trace(const Ray &ray, int depth) 
{ 
    Object *object = NULL; 
    float minDist = INFINITY; 
    Point pHit; 
    Normal nHit; 
    for (int k = 0; k < objects.size(); ++k) { 
        if (Intersect(objects[k], ray, &pHit, &nHit)) { 
            // ray origin = eye position of it's the prim ray
            float distance = Distance(ray.origin, pHit); 
            if (distance < minDistance) { 
                object = objects[i]; 
                minDistance = distance; 
            } 
        } 
    } 
    if (object == NULL) 
        return 0; 
    // if the object material is glass, split the ray into a reflection
    // and a refraction ray.
    if (object->isGlass && depth < MAX_RAY_DEPTH) { 
        // compute reflection
        Ray reflectionRay; 
        reflectionRay = computeReflectionRay(ray.direction, nHit); 
        // recurse
        color reflectionColor = Trace(reflectionRay, depth + 1); 
        Ray refractioRay; 
        refractionRay = computeRefractionRay( 
            object->indexOfRefraction, 
            ray.direction, 
            nHit); 
        // recurse
        color refractionColor = Trace(refractionRay, depth + 1); 
        float Kr, Kt; 
        fresnel( 
            object->indexOfRefraction, 
            nHit, 
            ray.direction, 
            &Kr, 
            &Kt); 
        return reflectionColor * Kr + refractionColor * (1-Kr); 
    } 
    // object is a diffuse opaque object        
    // compute illumination
    Ray shadowRay; 
    shadowRay.direction = lightPosition - pHit; 
    bool isShadow = false; 
    for (int k = 0; k < objects.size(); ++k) { 
        if (Intersect(objects[k], shadowRay)) { 
            // hit point is in shadow so just return
            return 0; 
        } 
    } 
    // point is illuminated
    return object->color * light.brightness; 
} 
 
// for each pixel of the image
for (int j = 0; j < imageHeight; ++j) { 
    for (int i = 0; i < imageWidth; ++i) { 
        // compute primary ray direction
        Ray primRay; 
        computePrimRay(i, j, &primRay); 
        pixels[i][j] = Trace(primRay, 0); 
    } 
} 

这里我们尝试用光线追踪算法画几个球,它们都应用了菲涅尔等式来计算反射和折射效果。

我们首先定义一个球的类,包含中心点,半径,表面颜色,穿透颜色,透明度,反射率等属性,并且定义了一个判断光线交叉的成员方法:

class Sphere
{
public:
    glm::vec3 center;                           // position of the sphere 
    float radius, radius2;                  // sphere radius and radius^2 
    glm::vec3 surfaceColor, emissionColor;      // surface color and emission (light) 
    float transparency, reflection;         // surface transparency and reflectivity 
    Sphere(const glm::vec3& c, const float& r, const glm::vec3& sc, const float& refl = 0, const float& transp = 0, const glm::vec3& ec = { 0,0,0 })
        :center(c), radius(r), radius2(r* r), surfaceColor(sc), emissionColor(ec), transparency(transp), reflection(refl)
    {
    }
    bool intersect(const glm::vec3& rayorig, const glm::vec3& raydir, float& t0, float& t1) const
    {
        glm::vec3 l = center - rayorig;
        float tca = glm::dot(l, raydir);
        if (tca < 0)
            return false;
        float d2 = glm::dot(l, l) - tca * tca;
        if (d2 > radius2)
            return false;
        float thc = sqrt(radius2 - d2);
        t0 = tca - thc;
        t1 = tca + thc;

        return true;
    }
};

接着定义了一个菲涅尔函数:

float mix(const float& a, const float& b, const float& mix)
{
    return b * mix + a * (1 - mix);
}

追踪方法是最重要的,它将光线作为参数(通过其来源和方向定义)。我们对该光线进行测试,判断其是否与场景中的几何体交叉,如果光线与一个物体交叉,那么就计算交叉点,交叉点上的法线以及对该点进行着色。着色取决于表面的属性,如透明度,反射率和漫反射因数。方法返回一个颜色,如果交叉的话就返回物体交叉点的颜色,否则返回背景颜色:

glm::vec3 trace(const glm::vec3& rayorig,const glm::vec3& raydir,const std::vector<Sphere>& spheres,const int& depth)
{
    float tnear = INFINITY;
    const Sphere* sphere = NULL;
    // find intersection of this ray with the sphere in the scene
    for (unsigned i = 0; i < spheres.size(); ++i) 
    {
        float t0 = INFINITY, t1 = INFINITY;
        if (spheres[i].intersect(rayorig, raydir, t0, t1)) 
        {
            if (t0 < 0) 
                t0 = t1;
            if (t0 < tnear) 
            {
                tnear = t0;
                sphere = &spheres[i];
            }
        }
    }
    // if there's no intersection return black or background color
    if (!sphere) 
        return glm::vec3(2);
    glm::vec3 surfaceColor = {0,0,0}; // color of the ray/surfaceof the object intersected by the ray 
    glm::vec3 phit = rayorig + raydir * tnear; // point of intersection 
    glm::vec3 nhit = glm::normalize(phit - sphere->center); // normal at the intersection point 
    // If the normal and the view direction are not opposite to each other
    // reverse the normal direction. That also means we are inside the sphere so set
    // the inside bool to true. Finally reverse the sign of IdotN which we want
    // positive.
    float bias = 1e-4; // add some bias to the point from which we will be tracing 
    bool inside = false;
    if (glm::dot(raydir,nhit) > 0) 
        nhit = -nhit, inside = true;
    if ((sphere->transparency > 0 || sphere->reflection > 0) && depth < MAX_RAY_DEPTH) 
    {
        float facingratio = -glm::dot(raydir, nhit);
        // change the mix value to tweak the effect
        float fresneleffect = mix(pow(1 - facingratio, 3), 1, 0.1);
        // compute reflection direction (not need to normalize because all vectors
        // are already normalized)
        glm::vec3 refldir = raydir - 2 * glm::dot(raydir, nhit) * nhit;
        glm::normalize(refldir);
        glm::vec3 reflection = trace(phit + nhit * bias, refldir, spheres, depth + 1);
        glm::vec3 refraction = {0,0,0};
        // if the sphere is also transparent compute refraction ray (transmission)
        if (sphere->transparency) 
        {
            float ior = 1.1, eta = (inside) ? ior : 1 / ior; // are we inside or outside the surface? 
            float cosi = -glm::dot(nhit, raydir);
            float k = 1 - eta * eta * (1 - cosi * cosi);
            glm::vec3 refrdir = glm::normalize(raydir * eta + nhit * (eta * cosi - glm::sqrt(k)));
            refraction = trace(phit - nhit * bias, refrdir, spheres, depth + 1);
        }
        // the result is a mix of reflection and refraction (if the sphere is transparent)
        surfaceColor = (reflection * fresneleffect +refraction * (1 - fresneleffect) * sphere->transparency) * sphere->surfaceColor;
    }
    else 
    {
        // it's a diffuse object, no need to raytrace any further
        for (unsigned i = 0; i < spheres.size(); ++i) 
        {
            if (spheres[i].emissionColor.x > 0) 
            {
                // this is a light
                glm::vec3 transmission = { 1,1,1 };
                glm::vec3 lightDirection = glm::normalize(spheres[i].center - phit);
                for (unsigned j = 0; j < spheres.size(); j++) 
                {
                    if (i != j) 
                    {
                        float t0, t1;
                        if (spheres[j].intersect(phit + nhit * bias, lightDirection, t0, t1)) 
                        {
                            transmission = {0,0,0};
                            break;
                        }
                    }
                }
                surfaceColor += sphere->surfaceColor * transmission * glm::max(float(0), glm::dot(nhit,lightDirection)) * spheres[i].emissionColor;
            }
        }
    }

    return surfaceColor + sphere->emissionColor;
}

之后我们定义一个渲染器方法,我们对图片的每个像素计算一个摄像机射线,返回一个颜色。如果射线碰撞到球体,我们就返回交叉点储的颜色,否则返回背景颜色:

void render(const std::vector<Sphere>& spheres)
{
    unsigned width = 640, height = 480;
    glm::vec3* image = new glm::vec3[width * height], * pixel = image;
    float invWidth = 1 / float(width), invHeight = 1 / float(height);
    float fov = 30, aspectratio = width / float(height);
    float angle = tan(PI * 0.5 * fov / 180.);
    // Trace rays
    for (unsigned y = 0; y < height; y++) 
    {
        for (unsigned x = 0; x < width; x++, pixel++) 
        {
            float xx = (2 * ((x + 0.5) * invWidth) - 1) * angle * aspectratio;
            float yy = (1 - 2 * ((y + 0.5) * invHeight)) * angle;
            glm::vec3 raydir = glm::normalize(glm::vec3(xx, yy, -1));
            *pixel = trace(glm::vec3(0), raydir, spheres, 0);
        }
    }
    // Save result to a PPM image (keep these flags if you compile under Windows)
    std::ofstream ofs("./result.ppm", std::ios::out | std::ios::binary);
    ofs << "P6\n" << width << " " << height << "\n255\n";
    for (unsigned i = 0; i < width * height; i++) 
    {
        ofs << (unsigned char)(glm::min(float(1), image[i].x) * 255) <<
            (unsigned char)(glm::min(float(1), image[i].y) * 255) <<
            (unsigned char)(glm::min(float(1), image[i].z) * 255);
    }
    ofs.close();
    delete[] image;
}

主函数中,我们定义了5个球体和一个光源,然后传入渲染器方法:

int main()
{
    srand(13);
    std::vector<Sphere> spheres;
    // position, radius, surface color, reflectivity, transparency, emission color
    spheres.push_back(Sphere(glm::vec3(0.0, -10004, -20), 10000, glm::vec3(0.20, 0.20, 0.20), 0, 0.0));
    spheres.push_back(Sphere(glm::vec3(0.0, 0, -20), 4, glm::vec3(1.00, 0.32, 0.36), 1, 0.5));
    spheres.push_back(Sphere(glm::vec3(5.0, -1, -15), 2, glm::vec3(0.90, 0.76, 0.46), 1, 0.0));
    spheres.push_back(Sphere(glm::vec3(5.0, 0, -25), 3, glm::vec3(0.65, 0.77, 0.97), 1, 0.0));
    spheres.push_back(Sphere(glm::vec3(-5.5, 0, -15), 3, glm::vec3(0.90, 0.90, 0.90), 1, 0.0));
    // light
    spheres.push_back(Sphere(glm::vec3(0.0, 20, -30), 3, glm::vec3(0.00, 0.00, 0.00), 0, 0.0, glm::vec3(3)));
    render(spheres);

    return 0;
}

编译后可以查看结果:


相关文章

  • 基本的光线追踪器

    一个光线追踪器的伪代码形式为: 这里我们尝试用光线追踪算法画几个球,它们都应用了菲涅尔等式来计算反射和折射效果。 ...

  • 软件插件介绍之二十八 : 物理渲染器 MAXWELL

    【前言】 目前最流行的渲染器基本都是基于全局光、间接照明和光线追踪等算法,比如VRAY 、MENTALRA...

  • 实现光线追踪算法

    接下来我们介绍光线追踪算法,用于实现我们的第一个光线追踪器。 首先,我们可以发现自然实际中光线的传播只是大量从光源...

  • 光线追踪介绍

    正向追踪 如果我们试着在计算机生成图像这一过程中模拟光线与物体的交互,那么我们除了明白光线会在物体表面反射外,还需...

  • 光线追踪介绍

    摘要图像渲染就是一个这样的过程,输入一组物体,输出一个像素矩阵。把这个像素矩阵输送给显卡,显示器上就可以显示出来图...

  • 利用rayrender动态可视化全球人口密度

    介绍 rayrender 是一个R语言编写的开源包,用于创建光线跟踪场景。这个包为用 C++ 构建的光线追踪器提供...

  • Redshift渲染器界的珠穆朗玛峰

    redshift红移渲染器技术特点 习redshift的架构 GPU渲染器核心功能 GPU加速的光线追踪,全局照明...

  • GET技能点

    简单的parser:编译器的流程-Json解析 上面那篇文章的后续基于LLVM的Toy编译器实战 光线追踪 简单的...

  • 体积的使用 光线追踪

    简介: Unity HDRP 在 Unity2019 版已经支持光线追踪了,在设置中开启光线追踪后(开启方法[ht...

  • 光线追踪算法综述

    Problem Formulation Ray Tracing的目标是生成一张包含场景内物体,具有真实感的图像,因...

网友评论

      本文标题:基本的光线追踪器

      本文链接:https://www.haomeiwen.com/subject/lsqvyktx.html