计算机图形学入门----作业
作业1
需要听完第4讲
首先是配置环境
opencv挺好配的
Eigen注意一定要把目录写成最外层!
eg:我放的位置是\(\text{C:\Eigen}\)
所以包含目录就要写\(\text{C:\ }\)
直接上代码,附注释
Eigen::Matrix4f get_model_matrix(float rotation_angle)//绕z轴的旋转矩阵
{
Eigen::Matrix4f model = Eigen::Matrix4f::Identity();
#define r rotation_angle
r = r / 180 * MY_PI;
Eigen::Matrix4f translate;
translate << cos(r), -sin(r), 0, 0,
sin(r), cos(r), 0, 0,
0, 0, 1, 0,
0, 0, 0, 1;
#undef r
model = translate * model;
return model;
}
Eigen::Matrix4f get_projection_matrix(float eye_fov, float aspect_ratio,
float zNear, float zFar)//投影矩阵,各项参数在4、5课讲过
{
Eigen::Matrix4f projection = Eigen::Matrix4f::Identity();
Eigen::Matrix4f ProToOrt, Translation, Scaling;
//zNear = -zNear, zFar = -zFar;//某些情况下要把这个反过来,因为这里是默认z轴为正,然而在真正渲染时相机看向-z
ProToOrt << zNear, 0, 0, 0,
0, zNear, 0, 0,
0, 0, zNear + zFar, -zNear * zFar,
0, 0, 1, 0;
float theta = eye_fov / 360 * MY_PI;//divide into 2,360=180*2
float t = fabs(zNear) * tan(theta);//top(y axis)
float b = -t;//bottom
float r = t * aspect_ratio;//right(x axis)
float l = -r;//left
Scaling << 2 / (r - l), 0, 0, 0,
0, 2 / (t - b), 0, 0,
0, 0, 2 / (zNear - zFar), 0,
0, 0, 0, 1;
Translation << 1, 0, 0, -(r + l) / 2,
0, 1, 0, -(t + b) / 2,
0, 0, 1, -(zNear + zFar) / 2,
0, 0, 0, 1;
projection = Scaling * Translation * ProToOrt;//from left to right calculate
return projection;
}
Eigen::Matrix4f get_rotation(Eigen::Vector3f axis, float angle)//任意轴旋转矩阵
//使用时需要把r.set_model(get_model_matrix(angle))换成r.set_model(get_rotation(Eigen::Vector3f(,,),angle))
//注意有两个地方要换
{
Eigen::Matrix4f Rotation = Eigen::Matrix4f::Identity();
angle = angle / 180 * MY_PI;
Eigen::Matrix4f CrossMatrix;
CrossMatrix << 0, -axis[2], axis[1], 0,
axis[2], 0, -axis[0], 0,
-axis[1], axis[0], 0, 0,
0, 0, 0, 1;
Rotation += sin(angle) * CrossMatrix + (1 - cos(angle)) * CrossMatrix * CrossMatrix;
return Rotation;
}
作业2
需要听完第7讲的前半部分
主要是注意一下Eigen怎么用吧
直接上代码
static bool insideTriangle(float x, float y, const Vector3f* _v)
{
// TODO : Implement this function to check if the point (x, y) is inside the triangle represented by _v[0], _v[1], _v[2]
Eigen::Vector3f p = { x, y, 0.0f };
Eigen::Vector3f p10 = _v[1] - _v[0];
Eigen::Vector3f p21 = _v[2] - _v[1];
Eigen::Vector3f p02 = _v[0] - _v[2];
Eigen::Vector3f p0 = p - _v[0];
Eigen::Vector3f p1 = p - _v[1];
Eigen::Vector3f p2 = p - _v[2];
Eigen::Vector3f c0 = p10.cross(p0);
Eigen::Vector3f c1 = p21.cross(p1);
Eigen::Vector3f c2 = p02.cross(p2);
if (c0[2] > 0 && c1[2] > 0 && c2[2] > 0)//只要是同一顺序,就必定全为正数
return true;
return false;
}
bool msaa(int ratio, float x, float y, const Eigen::Vector3f* _v, float& result)//每个像素划分成ratio*ratio个小格子,像素点坐标,三角形,结果
{
result = 0;
float unit_v = 1.0 / ratio / ratio, unit_d = 1 / ratio;//每有一个小格子在三角形里面,结果增加量;每一次的步进距离
x += unit_d / 2, y += unit_d / 2;//初始坐标(注意我们判断的是中心点)
for (int i = 0; i < ratio; ++i)
for (int j = 0; j < ratio; ++j)
if (insideTriangle(x + i * unit_d, y + j * unit_d, _v)) result += unit_v;
return result == 0.0f;//如果等于0就可以直接跳掉了
}
void rst::rasterizer::rasterize_triangle(const Triangle& t)
{
auto v = t.toVector4();
// TODO : Find out the bounding box of current triangle.
// iterate through the pixel and find if the current pixel is inside the triangle
// If so, use the following code to get the interpolated z value.
float tx[3] = { t.v[0][0], t.v[1][0], t.v[2][0] };
float ty[3] = { t.v[0][1], t.v[1][1], t.v[2][1] };
std::pair<float*, float*> xrange = std::minmax_element(tx, tx + 3);//最小最大值,学到了
std::pair<float*, float*> yrange = std::minmax_element(ty, ty + 3);
for (int x = std::floor(*xrange.first); x < std::ceil(*xrange.second); ++x)
for (int y = std::floor(*yrange.first); y < std::ceil(*yrange.second); ++y)//两层循环代表bounding box
{
float rat;
if (msaa(1, x, y, t.v, rat)) continue;//如果没有一个点那还渲染个锤子
auto [alpha, beta, gamma] = computeBarycentric2D(x, y, t.v);
float w_reciprocal = 1.0 / (alpha / v[0].w() + beta / v[1].w() + gamma / v[2].w());
float z_interpolated = alpha * v[0].z() / v[0].w() + beta * v[1].z() / v[1].w() + gamma * v[2].z() / v[2].w();
z_interpolated *= w_reciprocal;
if (z_interpolated >= depth_buf[y * width + x]) continue;//z-Buffer算法
depth_buf[y * width + x] = z_interpolated;
set_pixel(Eigen::Vector3f(x, y, 0), t.getColor() * rat);
}
// TODO : set the current pixel (use the set_pixel function) to the color of the triangle (use getColor function) if it should be painted.
}
作业3
需要听完第10讲的前半部分
如果你用了双线性的话,那头牛的黑色部分的界线会变模糊一些(其他地方我真没看出来)
//Rasterizer.cpp
void rst::rasterizer::rasterize_triangle(const Triangle& t, const std::array<Eigen::Vector3f, 3>& view_pos)
{
// TODO: From your HW3, get the triangle rasterization code.
std::array v = t.toVector4();
// TODO: Inside your rasterization loop:
// * v[i].w() is the vertex view space depth value z.
// * Z is interpolated view space depth for the current pixel
// * zp is depth between zNear and zFar, used for z-buffer
float tx[3] = { t.v[0][0], t.v[1][0], t.v[2][0] };
float ty[3] = { t.v[0][1], t.v[1][1], t.v[2][1] };
std::pair<float*, float*> xrange = std::minmax_element(tx, tx + 3);
std::pair<float*, float*> yrange = std::minmax_element(ty, ty + 3);
for (int x = std::floor(*xrange.first); x < std::ceil(*xrange.second); ++x)
for (int y = std::floor(*yrange.first); y < std::ceil(*yrange.second); ++y)//以上和作业2一样
{
if (!insideTriangle(x, y, t.v)) continue;
auto [alpha, beta, gamma] = computeBarycentric2D(x, y, t.v);//获得重心坐标
float Z = 1.0 / (alpha / v[0].w() + beta / v[1].w() + gamma / v[2].w());
float zp = alpha * v[0].z() / v[0].w() + beta * v[1].z() / v[1].w() + gamma * v[2].z() / v[2].w();
zp *= Z;
if (zp >= depth_buf[y * width + x]) continue;//Z-Buffer
depth_buf[y * width + x] = zp;
auto interpolated_color = alpha * t.color[0] + beta * t.color[1] + gamma * t.color[2];//以下的一堆东西看注释吧
auto interpolated_normal = alpha * t.normal[0] + beta * t.normal[1] + gamma * t.normal[2];
auto interpolated_texcoords = alpha * t.tex_coords[0] + beta * t.tex_coords[1] + gamma * t.tex_coords[2];
auto interpolated_shadingcoords = alpha * view_pos[0] + beta * view_pos[1] + gamma * view_pos[2];
fragment_shader_payload payload(interpolated_color, interpolated_normal.normalized(), interpolated_texcoords, texture ? &*texture : nullptr);
payload.view_pos = interpolated_shadingcoords;
auto pixel_color = fragment_shader(payload);
set_pixel(Eigen::Vector2i(x, y), pixel_color);
}
// TODO: Interpolate the attributes:
// auto interpolated_color
// auto interpolated_normal
// auto interpolated_texcoords
// auto interpolated_shadingcoords
// Use: fragment_shader_payload payload( interpolated_color, interpolated_normal.normalized(), interpolated_texcoords, texture ? &*texture : nullptr);
// Use: payload.view_pos = interpolated_shadingcoords;
// Use: Instead of passing the triangle's color directly to the frame buffer, pass the color to the shaders first to get the final color;
// Use: auto pixel_color = fragment_shader(payload);
}
//phong_fragment_shader
for (auto& light : lights)
{
// TODO: For each light source in the code, calculate what the *ambient*, *diffuse*, and *specular*
// components are. Then, accumulate that result on the *result_color* object.
double r2 = (light.position - point).squaredNorm();//范数(模)的平方,也就是距离平方
Vector3f l = (light.position - point).normalized();
Vector3f v = (eye_pos - point).normalized();
Vector3f h = (l + v).normalized();
Vector3f la = ka.cwiseProduct(amb_light_intensity);
Vector3f ld = kd.cwiseProduct(light.intensity / r2 * std::max(0.0f, normal.dot(l)));
Vector3f ls = ks.cwiseProduct(light.intensity / r2 * std::pow(std::max(0.0f, normal.dot(h)), p));
result_color += la + ld + ls;
}
//texture_fragment_shader
if (payload.texture)
{
// TODO: Get the texture value at the texture coordinates of the current fragment
return_color = payload.texture->getColor(payload.tex_coords.x(), payload.tex_coords.y());//双线性
//return_color = payload.texture->getColorBilinear(payload.tex_coords.x(), payload.tex_coords.y());//普通
}
//下面那个for循环的直接蒯上面那个
//texture.hpp
Eigen::Vector3f getColor2(float x, float y)
{
auto color = image_data.at<cv::Vec3b>(y, x);
return Eigen::Vector3f(color[0], color[1], color[2]);
}
Eigen::Vector3f getColorBilinear(float u, float v)
{
auto u_img = u * (width - 1);
auto v_img = (1 - v) * (height - 1);
Eigen::Vector2f u00{ std::floor(u_img), std::floor(v_img) };
Eigen::Vector2f u01{ std::ceil(u_img), std::floor(v_img) };
Eigen::Vector2f u10{ std::floor(u_img), std::ceil(v_img) };
Eigen::Vector2f u11{ std::ceil(u_img), std::ceil(v_img) };
float s = u_img - std::floor(u_img);
float t = v_img - std::floor(v_img);
Eigen::Vector3f u0 = (1 - s) * getColor2(u00.x(), u00.y()) + s * (getColor2(u01.x(), u01.y()));
Eigen::Vector3f u1 = (1 - s) * getColor2(u10.x(), u10.y()) + s * (getColor2(u11.x(), u11.y()));
return (1 - t) * u0 + t * u1;
}
//bump_fragment_shader 直接模拟注释
// TODO: Implement bump mapping here
// Let n = normal = (x, y, z)
// Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z))
// Vector b = n cross product t
// Matrix TBN = [t b n]
// dU = kh * kn * (h(u+1/w,v)-h(u,v))
// dV = kh * kn * (h(u,v+1/h)-h(u,v))
// Vector ln = (-dU, -dV, 1)
// Normal n = normalize(TBN * ln)
Vector3f n = normal;
float x = n.x(), y = n.y(), z = n.z();
Vector3f t = Vector3f(x * y / sqrt(x * x + z * z), sqrt(x * x + z * z), z * y / sqrt(x * x + z * z));
t.normalize();
Vector3f b = n.cross(t);
b.normalize();
Matrix3f TBN;
TBN << t.x(), b.x(), n.x(),
t.y(), b.y(), n.y(),
t.z(), b.z(), n.z();
float u = payload.tex_coords.x();
float v = payload.tex_coords.y();
float w = payload.texture->width;
float h = payload.texture->height;
float uc = payload.texture->getColor(u + 1.0f / w, v).norm() - payload.texture->getColor(u, v).norm();
float vc = payload.texture->getColor(u, v + 1.0f / h).norm() - payload.texture->getColor(u, v).norm();
float dU = kh * kn * uc;
float dV = kh * kn * vc;
Vector3f ln = Vector3f(-dU, -dV, 1);
ln.normalize();
n = (TBN * ln).normalized();//吐槽一下,为什么这个变成了normalized?
Eigen::Vector3f result_color = { 0, 0, 0 };
result_color = n;
return result_color * 255.f;
//displacement_fragment_shader 还是模拟
// TODO: Implement displacement mapping here
// Let n = normal = (x, y, z)
// Vector t = (x*y/sqrt(x*x+z*z),sqrt(x*x+z*z),z*y/sqrt(x*x+z*z))
// Vector b = n cross product t
// Matrix TBN = [t b n]
// dU = kh * kn * (h(u+1/w,v)-h(u,v))
// dV = kh * kn * (h(u,v+1/h)-h(u,v))
// Vector ln = (-dU, -dV, 1)
// Position p = p + kn * n * h(u,v)
// Normal n = normalize(TBN * ln)
Vector3f n = normal;
float x = n.x(), y = n.y(), z = n.z();
Vector3f t = Vector3f(x * y / sqrt(x * x + z * z), sqrt(x * x + z * z), z * y / sqrt(x * x + z * z));
t.normalize();
Vector3f b = n.cross(t);
b.normalize();
Matrix3f TBN;
TBN << t.x(), b.x(), n.x(),
t.y(), b.y(), n.y(),
t.z(), b.z(), n.z();
float u = payload.tex_coords.x();
float v = payload.tex_coords.y();
float w = payload.texture->width;
float h = payload.texture->height;
float uc = payload.texture->getColor(u + 1.0f / w, v).norm() - payload.texture->getColor(u, v).norm();
float vc = payload.texture->getColor(u, v + 1.0f / h).norm() - payload.texture->getColor(u, v).norm();
float dU = kh * kn * uc;
float dV = kh * kn * vc;
Vector3f ln = Vector3f(-dU, -dV, 1);
ln.normalize();
point = point + kn * n * (payload.texture->getColor(u, v).norm());
n = (TBN * ln).normalized();
Eigen::Vector3f result_color = { 0, 0, 0 };
//下面那个for循环还是照抄
作业4-7
鸽,是鸽子的鸽,是和平的象征
其实只是我懒得搬运了
请看最上面那位神仙的
作业8
我还没写
请看最上面那位神仙的
顺便,我某个时候会回来把这堆框架全部补上注释的(flag
一切伟大的行动和思想,都有一个微不足道的开始。
There is a negligible beginning in all great action and thought.
There is a negligible beginning in all great action and thought.