[toc]
# 作业要求
- 实现 gltf 格式下的骨骼动画
- PBR 材质
toneMap
模型是这个 Buster Drone
# 实现 gltf 格式下的骨骼动画
gltf 格式解析:
gltf 骨骼动画解析笔记 参考作业中的 gltfskinning
代码,先读取 gltf 文件中的 Animation 数据,需要用到的是 gltf 中的 skin
、 animations
、 samplers
、 nodes
、 accessors
、 joints
等字段
但是注意到上面 gltf 文件中实际上是没有 skin
和 joints
的,所以这并非一个蒙皮动画,而是一个骨骼动画,既然是骨骼动画,我们就需要通过设置骨骼节点 node
的转换矩阵来对模型进行动画变更
我们需要作如下操作
-
- 读取 animation 文件,建立数据结构
-
- 每个顶点记录当前顶点属于哪个
node
-
- 建立一个 node 动画变换时的 Matrix 的缓冲区,
这里直接使用相机的UBOScene buffer
-
- 每次更新动画时更新缓冲区
-
- 注意骨骼动画需要更新每一个 node 的矩阵
-
- shader 支持
# 读取 animation & 更新节点 matrix
VulkanglTFModel 类的变更
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247
| struct Vertex { glm::vec3 pos; glm::vec3 normal; glm::vec2 uv; glm::vec3 color; int nodeIdx; };
struct Node { Node* parent; uint32_t index; std::vector<Node*> children; Mesh mesh; glm::mat4 matrix; ~Node() { for (auto& child : children) { delete child; } } glm::vec3 translation{0.0f}; glm::vec3 scale{ 1.0f }; glm::quat rotation{}; int32_t skin = -1; public: glm::mat4 m_getLocalMatrix() { return glm::translate(glm::mat4(1.0f), translation) * glm::mat4(rotation) * glm::scale(glm::mat4(1.0f), scale) * matrix; }
glm::mat4 getNodeMatrix() { glm::mat4 nodeMatirx = m_getLocalMatrix(); Node* currentParent = parent; while (currentParent) { nodeMatirx = currentParent->m_getLocalMatrix() * nodeMatirx; currentParent = currentParent->parent; } return nodeMatirx; } };
struct AnimationSampler { std::string interpolation; std::vector<float> inputs; std::vector<glm::vec4> outputsVec4; };
struct AnimationChannel { std::string path; Node* node; uint32_t samplerIndex; };
struct Animation { std::string name; std::vector<AnimationSampler> samplers; std::vector<AnimationChannel> channels; float start = std::numeric_limits<float>::max(); float end = std::numeric_limits<float>::min(); float currentTime = 0.0f; };
std::vector<Animation> animations;
glm::mat4 originNodeMatrix[51];
void loadNode(const tinygltf::Node& inputNode, const tinygltf::Model& input, Node* parent, uint32_t nodeIndex, std::vector<uint32_t>& indexBuffer, std::vector<Vertex>& vertexBuffer) { Node* node = new Node{}; node->parent = parent; node->matrix = glm::mat4(1.0f); node->index = nodeIndex; node->skin = inputNode.skin;
....
originNodeMatrix[nodeIndex] = node->getNodeMatrix(); }
void loadAnimations(tinygltf::Model& input) { animations.resize(input.animations.size());
for (size_t i = 0; i < input.animations.size(); i++) { tinygltf::Animation glTFAnimation = input.animations[i]; animations[i].name = glTFAnimation.name;
animations[i].samplers.resize(glTFAnimation.samplers.size()); for (size_t j = 0; j < glTFAnimation.samplers.size(); j++) { tinygltf::AnimationSampler glTFSampler = glTFAnimation.samplers[j]; AnimationSampler& dstSampler = animations[i].samplers[j]; dstSampler.interpolation = glTFSampler.interpolation;
{ const tinygltf::Accessor& accessor = input.accessors[glTFSampler.input]; const tinygltf::BufferView& bufferView = input.bufferViews[accessor.bufferView]; const tinygltf::Buffer& buffer = input.buffers[bufferView.buffer]; const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; const float* buf = static_cast<const float*>(dataPtr); for (size_t index = 0; index < accessor.count; index++) { dstSampler.inputs.push_back(buf[index]); } for (auto input : animations[i].samplers[j].inputs) { if (input < animations[i].start) { animations[i].start = input; }; if (input > animations[i].end) { animations[i].end = input; } } }
{ const tinygltf::Accessor& accessor = input.accessors[glTFSampler.output]; const tinygltf::BufferView& bufferView = input.bufferViews[accessor.bufferView]; const tinygltf::Buffer& buffer = input.buffers[bufferView.buffer]; const void* dataPtr = &buffer.data[accessor.byteOffset + bufferView.byteOffset]; switch (accessor.type) { case TINYGLTF_TYPE_VEC3: { const glm::vec3* buf = static_cast<const glm::vec3*>(dataPtr); for (size_t index = 0; index < accessor.count; index++) { dstSampler.outputsVec4.push_back(glm::vec4(buf[index], 0.0f)); } break; } case TINYGLTF_TYPE_VEC4: { const glm::vec4* buf = static_cast<const glm::vec4*>(dataPtr); for (size_t index = 0; index < accessor.count; index++) { dstSampler.outputsVec4.push_back(buf[index]); } break; } default: { std::cout << "unknown type" << std::endl; break; } } } }
animations[i].channels.resize(glTFAnimation.channels.size()); for (size_t j = 0; j < glTFAnimation.channels.size(); j++) { tinygltf::AnimationChannel glTFChannel = glTFAnimation.channels[j]; AnimationChannel& dstChannel = animations[i].channels[j]; dstChannel.path = glTFChannel.target_path; dstChannel.samplerIndex = glTFChannel.sampler; dstChannel.node = nodeFromIndex(glTFChannel.target_node); } } }
void updateAnimMatrix() { }
void VulkanglTFModel::updateAnimation(float deltaTime) { if (activeAnimation > static_cast<uint32_t>(animations.size()) - 1) { std::cout << "No animation with index " << activeAnimation << std::endl; return; } Animation& animation = animations[activeAnimation]; animation.currentTime += deltaTime; if (animation.currentTime > animation.end) { animation.currentTime -= animation.end; }
for (auto& channel : animation.channels) { AnimationSampler& sampler = animation.samplers[channel.samplerIndex]; for (size_t i = 0; i < sampler.inputs.size() - 1; i++) { if (sampler.interpolation != "LINEAR") { std::cout << "This sample only supports linear interpolations\n"; continue; }
if ((animation.currentTime >= sampler.inputs[i]) && (animation.currentTime <= sampler.inputs[i + 1])) { float a = (animation.currentTime - sampler.inputs[i]) / (sampler.inputs[i + 1] - sampler.inputs[i]); if (channel.path == "translation") { channel.node->translation = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], a); } if (channel.path == "rotation") { glm::quat q1; q1.x = sampler.outputsVec4[i].x; q1.y = sampler.outputsVec4[i].y; q1.z = sampler.outputsVec4[i].z; q1.w = sampler.outputsVec4[i].w;
glm::quat q2; q2.x = sampler.outputsVec4[i + 1].x; q2.y = sampler.outputsVec4[i + 1].y; q2.z = sampler.outputsVec4[i + 1].z; q2.w = sampler.outputsVec4[i + 1].w;
channel.node->rotation = glm::normalize(glm::slerp(q1, q2, a)); } if (channel.path == "scale") { channel.node->scale = glm::mix(sampler.outputsVec4[i], sampler.outputsVec4[i + 1], a); } } } } }
void drawNodeAnim(Node* node) {
originNodeMatrix[node->index] = node->getNodeMatrix();
for (auto& child : node->children) { drawNodeAnim(child); } }
void drawAnim() { for (auto& node : nodes) { drawNodeAnim(node); } }
|
VulkanExample 类的变更
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57
| struct ShaderData { vks::Buffer buffer; struct Values { glm::mat4 projection; glm::mat4 model; glm::vec4 lightPos = glm::vec4(5.0f, 5.0f, -5.0f, 1.0f); glm::vec4 viewPos; glm::mat4 animMatri[51]; } values; } shaderData;
void loadglTFFile(std::string filename) { ... if (fileLoaded) { glTFModel.loadImages(glTFInput); glTFModel.loadMaterials(glTFInput); glTFModel.loadTextures(glTFInput); const tinygltf::Scene& scene = glTFInput.scenes[0]; for (size_t i = 0; i < scene.nodes.size(); i++) { const tinygltf::Node node = glTFInput.nodes[scene.nodes[i]]; glTFModel.loadNode(node, glTFInput, nullptr, scene.nodes[i], indexBuffer, vertexBuffer); } glTFModel.loadAnimations(glTFInput); ... } ... }
void prepareUniformBuffers(){ for (int i = 0; i < 51; ++i) { shaderData.values.animMatri[i] = glTFModel.originNodeMatrix[i]; } ... }
void updateAnimUniformBuffer() { for (int i = 0; i < 51; ++i) { shaderData.values.animMatri[i] = glTFModel.originNodeMatrix[i]; }
memcpy(shaderData.buffer.mapped, &shaderData.values, sizeof(shaderData.values)); }
virtual void render() { renderFrame(); if (camera.updated) { updateUniformBuffers(); } if (!paused) { glTFModel.updateAnimation(frameTimer); glTFModel.drawAnim(); updateAnimUniformBuffer(); } }
|
mesh.vert 变更
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43
| #version 450
layout (location = 0) in vec3 inPos; layout (location = 1) in vec3 inNormal; layout (location = 2) in vec2 inUV; layout (location = 3) in vec3 inColor; layout (location = 4) in int nodeIdx;
layout (set = 0, binding = 0) uniform UBOScene { mat4 projection; mat4 view; vec4 lightPos; vec4 viewPos; mat4 animMatri[51]; } uboScene;
layout(push_constant) uniform PushConsts { mat4 model; } primitive;
layout (location = 0) out vec3 outNormal; layout (location = 1) out vec3 outColor; layout (location = 2) out vec2 outUV; layout (location = 3) out vec3 outViewVec; layout (location = 4) out vec3 outLightVec;
void main() { outNormal = inNormal; outColor = inColor; outUV = inUV;
gl_Position = uboScene.projection * uboScene.view * uboScene.animMatri[int(nodeIdx)] * vec4((nodeIdx==0?vec3(-2,-1,0):inPos.xyz), 1.0 ); vec4 pos = uboScene.view * vec4(inPos, 1.0); outNormal = mat3(uboScene.view) * inNormal; vec3 lPos = mat3(uboScene.view) * uboScene.lightPos.xyz; outLightVec = uboScene.lightPos.xyz - pos.xyz; outViewVec = uboScene.viewPos.xyz - pos.xyz; }
|
# 编译 shader
需要先下载 Vulkan SDK
之后找到 VulkanSDK/版本号/Bin/
目录
这个目录下有很多工具,我们使用 glslc
来编译 glsl 的 shader, 生成 spv 文件
1 2 3
| glslc mesh.vert -o mesh.vert.spv glslc mesh.frag -o mesh.frag.spv
|
# 需要注意的点
一开始在实现的时候,只关注了 channel里有动画的node
的更新,后面发现机器人的中间始终不更新位置,后面发现,骨骼动画任何节点发生改变后, 其所有子节点的矩阵都需要更新,哪怕动画里没有更新对应的node
# 结果