本文整理汇总了C++中Matrix4f::Inverse方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix4f::Inverse方法的具体用法?C++ Matrix4f::Inverse怎么用?C++ Matrix4f::Inverse使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Matrix4f
的用法示例。
在下文中一共展示了Matrix4f::Inverse方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: RebuildBuffer
void LightingCompositeStage::RebuildBuffer(RenderState& renderState)
{
Matrix4f projectionMatrix = renderState.GetProjectionMatrix();
Matrix4f viewMatrix = renderState.GetViewMatrix();
Rectf viewport = renderState.GetViewport();
Vector2f screenSize = renderState.GetCurrentRenderTargetSize();
Matrix4f compositeTransform = Matrix4f::Scale(Vector3f(screenSize / viewport.size, 1.0f)) * Matrix4f::Translation(Vector3f(-viewport.topLeft / viewport.size, 0.0f));
if (mVertexContantBuffer == NULL)
{
ConstantBufferLayout vertexLayout;
vertexLayout.MarkSpecialType(ConstantBufferLayout::TypeProjectionMatrix, offsetof(LightVertexCompositeData, projectionMatrix));
vertexLayout.MarkSpecialType(ConstantBufferLayout::TypeInvProjectionMatrix, offsetof(LightVertexCompositeData, projectionViewInverseMatrix));
vertexLayout.MarkSpecialType(ConstantBufferLayout::TypeProjectionMatrix, offsetof(LightVertexCompositeData, compositeTransform));
vertexLayout.MarkSpecialType(ConstantBufferLayout::TypeProjectionMatrix, offsetof(LightVertexCompositeData, projectionMatrixRasterSpace));
mVertexContantBuffer = renderState.GetGraphics().CreateConstantBuffer(vertexLayout);
ConstantBufferLayout pixelLayout;
pixelLayout.MarkSpecialType(ConstantBufferLayout::TypeProjectionMatrix, offsetof(LightPixelCompositeData, projectionMatrix));
pixelLayout.MarkSpecialType(ConstantBufferLayout::TypeInvProjectionMatrix, offsetof(LightPixelCompositeData, projectionViewInverseMatrix));
mPixelContantBuffer = renderState.GetGraphics().CreateConstantBuffer(pixelLayout);
}
LightVertexCompositeData vertexData;
vertexData.projectionMatrix = projectionMatrix;
vertexData.projectionViewInverseMatrix = (projectionMatrix * viewMatrix).Inverse();
vertexData.compositeTransform = compositeTransform;
vertexData.projectionMatrixRasterSpace = Matrix4f::Translation(Vector3f(0.5f, 0.5f, 0.0f)) * Matrix4f::Scale(Vector3f(0.5f, -0.5f, 1.0f)) * vertexData.compositeTransform * projectionMatrix;
mVertexContantBuffer->Set<LightVertexCompositeData>(vertexData);
LightPixelCompositeData pixelData;
pixelData.projectionMatrix = projectionMatrix;
pixelData.projectionViewInverseMatrix = vertexData.projectionViewInverseMatrix;
pixelData.screenSize = Vector4f(screenSize, 1.0f / screenSize);
pixelData.viewDireciton = Vector4f(0.0f, 0.0f, 1.0f, 0.0f) * viewMatrix;
pixelData.viewPosition = viewMatrix.Inverse() * Vector4f(0.0f, 0.0f, 0.0f, 1.0f);
mPixelContantBuffer->Set<LightPixelCompositeData>(pixelData);
}
示例2: RenderMesh
void VulkanReplay::RenderMesh(uint32_t eventId, const vector<MeshFormat> &secondaryDraws,
const MeshDisplay &cfg)
{
if(cfg.position.vertexResourceId == ResourceId() || cfg.position.numIndices == 0)
return;
auto it = m_OutputWindows.find(m_ActiveWinID);
if(m_ActiveWinID == 0 || it == m_OutputWindows.end())
return;
OutputWindow &outw = it->second;
// if the swapchain failed to create, do nothing. We will try to recreate it
// again in CheckResizeOutputWindow (once per render 'frame')
if(outw.swap == VK_NULL_HANDLE)
return;
VkDevice dev = m_pDriver->GetDev();
VkCommandBuffer cmd = m_pDriver->GetNextCmd();
const VkLayerDispatchTable *vt = ObjDisp(dev);
VkResult vkr = VK_SUCCESS;
VkCommandBufferBeginInfo beginInfo = {VK_STRUCTURE_TYPE_COMMAND_BUFFER_BEGIN_INFO, NULL,
VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT};
vkr = vt->BeginCommandBuffer(Unwrap(cmd), &beginInfo);
RDCASSERTEQUAL(vkr, VK_SUCCESS);
VkRenderPassBeginInfo rpbegin = {
VK_STRUCTURE_TYPE_RENDER_PASS_BEGIN_INFO,
NULL,
Unwrap(outw.rpdepth),
Unwrap(outw.fbdepth),
{{
0, 0,
},
{m_DebugWidth, m_DebugHeight}},
0,
NULL,
};
vt->CmdBeginRenderPass(Unwrap(cmd), &rpbegin, VK_SUBPASS_CONTENTS_INLINE);
VkViewport viewport = {0.0f, 0.0f, (float)m_DebugWidth, (float)m_DebugHeight, 0.0f, 1.0f};
vt->CmdSetViewport(Unwrap(cmd), 0, 1, &viewport);
Matrix4f projMat =
Matrix4f::Perspective(90.0f, 0.1f, 100000.0f, float(m_DebugWidth) / float(m_DebugHeight));
Matrix4f InvProj = projMat.Inverse();
Matrix4f camMat = cfg.cam ? ((Camera *)cfg.cam)->GetMatrix() : Matrix4f::Identity();
Matrix4f ModelViewProj = projMat.Mul(camMat);
Matrix4f guessProjInv;
if(cfg.position.unproject)
{
// the derivation of the projection matrix might not be right (hell, it could be an
// orthographic projection). But it'll be close enough likely.
Matrix4f guessProj =
cfg.position.farPlane != FLT_MAX
? Matrix4f::Perspective(cfg.fov, cfg.position.nearPlane, cfg.position.farPlane, cfg.aspect)
: Matrix4f::ReversePerspective(cfg.fov, cfg.position.nearPlane, cfg.aspect);
if(cfg.ortho)
{
guessProj = Matrix4f::Orthographic(cfg.position.nearPlane, cfg.position.farPlane);
}
guessProjInv = guessProj.Inverse();
ModelViewProj = projMat.Mul(camMat.Mul(guessProjInv));
}
if(!secondaryDraws.empty())
{
size_t mapsUsed = 0;
for(size_t i = 0; i < secondaryDraws.size(); i++)
{
const MeshFormat &fmt = secondaryDraws[i];
if(fmt.vertexResourceId != ResourceId())
{
// TODO should move the color to a push constant so we don't have to map all the time
uint32_t uboOffs = 0;
MeshUBOData *data = (MeshUBOData *)m_MeshRender.UBO.Map(&uboOffs);
data->mvp = ModelViewProj;
data->color = Vec4f(fmt.meshColor.x, fmt.meshColor.y, fmt.meshColor.z, fmt.meshColor.w);
data->homogenousInput = cfg.position.unproject;
data->pointSpriteSize = Vec2f(0.0f, 0.0f);
data->displayFormat = MESHDISPLAY_SOLID;
data->rawoutput = 0;
m_MeshRender.UBO.Unmap();
mapsUsed++;
if(mapsUsed + 1 >= m_MeshRender.UBO.GetRingCount())
//.........这里部分代码省略.........