OpenGL本身没有摄像机(Camera)的概念,但我们可以通过把场景中的所有物体往相反方向移动的方式来模拟出摄像机,产生一种我们在移动的感觉,而不是场景在移动。
LookAt矩阵
观察矩阵把所有的世界坐标变换为相对于摄像机位置与方向的观察坐标。定义一个摄像机,我们需要它在世界空间中的位置、观察的方向、一个指向它右测的向量以及一个指向它上方的向量。实际上创建了一个三个单位轴相互垂直的、以摄像机的位置为原点的坐标系。
使用这些摄像机向量就可以创建一个LookAt矩阵,把这个LookAt矩阵作为观察矩阵可以很高效地把所有世界坐标变换到刚刚定义的观察空间。LookAt矩阵就像它的名字表达的那样:它会创建一个看着(Look at)给定目标的观察矩阵(摄像机平移与旋转的逆变换),其中R是右向量,U是上向量,D是方向向量,P是摄像机位置向量:
image.png实现一个LookAt矩阵
//m_vec3Pos, m_vec3Front, m_vec3Up分别是摄像机的位置,朝向,上向量
glm::mat4 Camera::ownLookAt()
{
//这里矩阵的索引是先列再行
glm::mat4 translation = glm::mat4(1.0f);
translation[3][0] = -m_vec3Pos.x;
translation[3][1] = -m_vec3Pos.y;
translation[3][2] = -m_vec3Pos.z;
glm::mat4 rotation = glm::mat4(1.0f);
glm::vec3 right = glm::cross(-m_vec3Front, m_vec3Up);
rotation[0][0] = right.x;
rotation[1][0] = right.y;
rotation[2][0] = right.z;
rotation[0][1] = m_vec3Up.x;
rotation[1][1] = m_vec3Up.y;
rotation[2][1] = m_vec3Up.z;
rotation[0][2] = -m_vec3Front.x;
rotation[1][2] = -m_vec3Front.y;
rotation[2][2] = -m_vec3Front.z;
return rotation * translation;
}
GLM已经提供了这些支持。是定义一个摄像机位置,一个目标位置(我习惯定义一个方向,然后算出目标位置)和一个表示世界空间中的上向量的向量(右向量可以通过上向量和指向目标的向量的叉乘得到)。接着GLM就会创建一个LookAt矩阵,可以把它当作我们的观察矩阵:
glm::mat4 view = glm::lookAt(glm::vec3(0.0f, 0.0f, 3.0f),
glm::vec3(0.0f, 0.0f, 0.0f),
glm::vec3(0.0f, 1.0f, 0.0f));
FPS摄像机
自由移动
读取键盘的WASD实现摄像机的移动:
glm::vec3 cameraPos = glm::vec3(0.0f, 0.0f, 3.0f);
glm::vec3 cameraFront = glm::vec3(0.0f, 0.0f, -1.0f);
glm::vec3 cameraUp = glm::vec3(0.0f, 1.0f, 0.0f);
glm::vec3 cameraWorldUp = glm::vec3(0.0f, 1.0f, 0.0f);
float deltaTime = 0.0f;
float lastFrame = 0.0f;
int main()
{
···
while (!glfwWindowShouldClose(window))
{
float currentFrame = glfwGetTime();
deltaTime = currentFrame - lastFrame;
lastFrame = currentFrame;
processInput(window);
···
glm::mat4 view = glm::mat4(1.0f);
view = glm::lookAt(cameraPos, cameraPos + cameraFront, cameraUp);
glUniformMatrix4fv(glGetUniformLocation(shader.shaderProgramId, "view"), 1, GL_FALSE, glm::value_ptr(view));
···
}
···
}
void processInput(GLFWwindow *window)
{
···
float cameraSpeed = 2.5 * deltaTime;
if (glfwGetKey(window, GLFW_KEY_W) == GLFW_PRESS)
cameraPos += cameraFront * cameraSpeed;
if (glfwGetKey(window, GLFW_KEY_S) == GLFW_PRESS)
cameraPos -= cameraFront * cameraSpeed;
if (glfwGetKey(window, GLFW_KEY_A) == GLFW_PRESS)
cameraPos -= glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;
if (glfwGetKey(window, GLFW_KEY_D) == GLFW_PRESS)
cameraPos += glm::normalize(glm::cross(cameraFront, cameraUp)) * cameraSpeed;
}
实际情况下根据处理器的能力不同,有些人可能会比其他人每秒绘制更多帧,也就是以更高的频率调用processInput函数。结果就是,根据配置的不同,有些人可能移动很快,而有些人会移动很慢。当发布程序的时候,必须确保它在所有硬件上移动速度都一样。
图形程序和游戏通常会跟踪一个时间差(Deltatime)变量,它储存了渲染上一帧所用的时间。我们把所有速度都去乘以deltaTime值。结果就是,如果我们的deltaTime很大,就意味着上一帧的渲染花费了更多时间,所以这一帧的速度需要变得更高来平衡渲染所花去的时间。使用这种方法时,无论你的电脑快还是慢,摄像机的速度都会相应平衡,这样每个用户的体验就都一样了。
视角移动
欧拉角(Euler Angle)
欧拉角(Euler Angle)是可以表示3D空间中任何旋转的3个值,由莱昂哈德·欧拉(Leonhard Euler)在18世纪提出。一共有3种欧拉角:俯仰角(Pitch)、偏航角(Yaw)和滚转角(Roll)。俯仰角是描述我们如何往上或往下看的角,偏航角表示我们往左和往右看的程度。滚转角代表我们如何翻滚摄像机,通常在太空飞船的摄像机中使用。每个欧拉角都有一个值来表示,把三个角结合起来我们就能够计算3D空间中任何的旋转向量了。对于FPS摄像机系统来说,只关心俯仰角和偏航角,这里不会讨论滚转角。
glm::vec3 direction;
direction.x = cos(glm::radians(pitch)) * cos(glm::radians(yaw));
direction.y = sin(glm::radians(pitch));
direction.z = cos(glm::radians(pitch)) * sin(glm::radians(yaw));
cameraFront = glm::normalize(direction);
这个地方不好结束,可以通过欧拉角对三条坐标系的影响来理解,这样就有了一个可以把俯仰角和偏航角转化为用来自由旋转视角的摄像机的3维向量了。
注意,使用欧拉角的摄像机系统并不完美。根据视角限制或者是配置,仍然可能引入万向节死锁问题。最好的摄像机系统是使用四元数(Quaternions)的,这个留到后面再看。(这里可以查看四元数摄像机的实现)。
通过鼠标获取俯仰角和偏航角
bool firstMouse = true;
float lastX = 800.0f / 2.0;
float lastY = 600.0f / 2.0;
float yaw = -90.0f;//偏航为0.0会导致方向指向右侧,
float pitch = 0.0f;
float sensitivity = 0.1f;
float fov = 45.0f;
int main()
{
···
//隐藏并捕捉光标
//glfwSetInputMode(window, GLFW_CURSOR, GLFW_CURSOR_DISABLED);
glfwSetCursorPosCallback(window, mouse_callback);
glfwSetScrollCallback(window, scroll_callback);
···
while (!glfwWindowShouldClose(window))
{
···
glm::mat4 view = glm::mat4(1.0f);
view = glm::lookAt(cameraPos, cameraPos + cameraFront, cameraUp);
glUniformMatrix4fv(glGetUniformLocation(shader.shaderProgramId, "view"), 1, GL_FALSE, glm::value_ptr(view));
glm::mat4 projection = glm::mat4(1.0f);
projection = glm::perspective(glm::radians(fov), (float)800 / (float)600, 0.1f, 100.0f);
glUniformMatrix4fv(glGetUniformLocation(shader.shaderProgramId, "projection"), 1, GL_FALSE, glm::value_ptr(projection));
···
}
}
void mouse_callback(GLFWwindow* window, double xPos, double yPos)
{
//鼠标移动进窗口时,鼠标回调函数就会被调用,离屏幕中心很远,产生一个很大的偏移量,会突然跳一下
if (firstMouse)
{
lastX = xPos;
lastY = yPos;
firstMouse = false;
}
float xOffset = xPos - lastX;
float yOffset = lastY - yPos;
lastX = xPos;
lastY = yPos;
yaw += xOffset * sensitivity;
pitch += yOffset * sensitivity;
if (pitch > 89.0f)//避免发生逆转
pitch = 89.0f;
if (pitch < -89.0f)
pitch = -89.0f;
glm::vec3 direction;
direction.x = cos(glm::radians(yaw)) * cos(glm::radians(pitch));
direction.y = sin(glm::radians(pitch));
direction.z = sin(glm::radians(yaw)) * cos(glm::radians(pitch));
cameraFront = glm::normalize(direction);
//旋转之后上向量要重新计算
glm::vec3 right = glm::cross(cameraFront , cameraWorldUp);
cameraUp = glm::cross(cameraFront , right);
}
万向节死锁
欧拉在三维空间中定义了一个静止不动的参考系,即惯性系。还定义了一个运动的坐标系,即物体坐标系。惯性系与物体坐标系的区别在于,当物体取向发生改变之后,物体坐标系也随之改变,而惯性系却不变。欧拉角的三个旋转是绕物体坐标系的三个轴复合形成。为何不使用惯性系?因为物体坐标系在数学处理上是简单的。
欧拉角的万向节死锁是这样:我们依次绕物体坐标系的X轴、Y轴、Z轴旋转,当Y轴旋转了90度之后,Z就会指向原来的X轴。这样一来,我们事实上只绕了X轴和Y轴两个轴旋转,第三根轴的自由度就丢失了!
四元数
未完待续
摄像机类
一个摄像机会占用很大的篇幅,从细节抽象出来,创建摄像机对象是一个更好的选择,也便于拓展。
#ifndef CAMERA_H
#define CAMERA_H
#include <glm/glm.hpp>
#include <glm/gtc/matrix_transform.hpp>
const glm::vec3 DEFAULT_POS = glm::vec3(0.0f, 0.0f, 3.0f);
const glm::vec3 DEFAULT_FRONT = glm::vec3(0.0f, 0.0f, -1.0f);
const glm::vec3 DEFAULT_UP = glm::vec3(0.0f, 1.0f, 0.0f);
const float DEFAULT_FOV = 45.0f;
const float DEFAULT_YAW= -90.0f;
const float DEFAULT_PATCH = 0.0f;
class Camera
{
public:
glm::vec3 m_vec3Pos;
glm::vec3 m_vec3Front;
glm::vec3 m_vec3Up;
glm::vec3 m_vec3WorldUp;
float m_fFov;
float m_fYaw;
float m_fPatch;
Camera(glm::vec3 pos = DEFAULT_POS, glm::vec3 front = DEFAULT_FRONT, glm::vec3 up = DEFAULT_UP, float fov = DEFAULT_FOV, float paw = DEFAULT_YAW, float patch = DEFAULT_PATCH);
void moveCamera(glm::vec3 offset);
void rotateCamera(float yaw, float patch, float sensitivity);
void changeFov(float angle);
glm::mat4 getViewMatrix();
glm::mat4 ownLookAt();
};
Camera::Camera(glm::vec3 pos, glm::vec3 front, glm::vec3 up, float fov, float paw, float patch) :m_vec3Pos(pos), m_vec3Front(front), m_vec3Up(up), m_vec3WorldUp(up), m_fFov(fov), m_fYaw(paw), m_fPatch(patch)
{
}
void Camera::moveCamera(glm::vec3 offset)
{
m_vec3Pos += offset;
}
void Camera::rotateCamera(float yaw, float patch, float sensitivity)
{
m_fYaw += yaw * sensitivity;
m_fPatch += patch * sensitivity;
if (m_fPatch > 89.0f)
m_fPatch = 89.0f;
if (m_fPatch < -89.0f)
m_fPatch = -89.0f;
glm::vec3 front;
front.x = cos(glm::radians(m_fYaw)) * cos(glm::radians(m_fPatch));
front.y = sin(glm::radians(m_fPatch));
front.z = sin(glm::radians(m_fYaw)) * cos(glm::radians(m_fPatch));
m_vec3Front = glm::normalize(front);
//旋转之后上向量要重新计算
glm::vec3 right = glm::cross(m_vec3Front, m_vec3WorldUp);
m_vec3Up = glm::cross(m_vec3Front, right);
}
void Camera::changeFov(float angle)
{
m_fFov -= angle;
if (m_fFov < 1.0f)
m_fFov = 1.0f;
if (m_fFov > 45.0f)
m_fFov = 45.0f;
}
#endif
glm::mat4 Camera::getViewMatrix()
{
return ownLookAt();
//return glm::lookAt(m_vec3Pos, m_vec3Front + m_vec3Pos, m_vec3Up);
}
glm::mat4 Camera::ownLookAt()
{
//这里矩阵的索引是先列再行
glm::mat4 translation = glm::mat4(1.0f);
translation[3][0] = -m_vec3Pos.x;
translation[3][1] = -m_vec3Pos.y;
translation[3][2] = -m_vec3Pos.z;
glm::mat4 rotation = glm::mat4(1.0f);
glm::vec3 right = glm::cross(-m_vec3Front, m_vec3Up);
rotation[0][0] = right.x;
rotation[1][0] = right.y;
rotation[2][0] = right.z;
rotation[0][1] = m_vec3Up.x;
rotation[1][1] = m_vec3Up.y;
rotation[2][1] = m_vec3Up.z;
rotation[0][2] = -m_vec3Front.x;
rotation[1][2] = -m_vec3Front.y;
rotation[2][2] = -m_vec3Front.z;
return rotation * translation;
}
网友评论