本文整理汇总了C++中ovr::Matrix4f::Transform方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix4f::Transform方法的具体用法?C++ Matrix4f::Transform怎么用?C++ Matrix4f::Transform使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ovr::Matrix4f
的用法示例。
在下文中一共展示了Matrix4f::Transform方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: AssembleViewMatrix
/// From the OVR SDK.
void OculusAppSkeleton::AssembleViewMatrix()
{
// Rotate and position m_oculusView Camera, using YawPitchRoll in BodyFrame coordinates.
//
OVR::Matrix4f rollPitchYaw = GetRollPitchYaw();
OVR::Vector3f up = rollPitchYaw.Transform(UpVector);
OVR::Vector3f forward = rollPitchYaw.Transform(ForwardVector);
// Minimal head modelling.
float headBaseToEyeHeight = 0.15f; // Vertical height of eye from base of head
float headBaseToEyeProtrusion = 0.09f; // Distance forward of eye from base of head
OVR::Vector3f eyeCenterInHeadFrame(0.0f, headBaseToEyeHeight, -headBaseToEyeProtrusion);
OVR::Vector3f shiftedEyePos = EyePos + rollPitchYaw.Transform(eyeCenterInHeadFrame);
shiftedEyePos.y -= eyeCenterInHeadFrame.y; // Bring the head back down to original height
m_oculusView = OVR::Matrix4f::LookAtRH(shiftedEyePos, shiftedEyePos + forward, up);
// This is what transformation would be without head modeling.
// m_oculusView = Matrix4f::LookAtRH(EyePos, EyePos + forward, up);
/// Set up a third person(or otherwise) view for control window
{
OVR::Vector3f txFollowDisp = rollPitchYaw.Transform(FollowCamDisplacement);
FollowCamPos = EyePos + txFollowDisp;
m_controlView = OVR::Matrix4f::LookAtRH(FollowCamPos, EyePos, up);
}
}
示例2: AccumulateInputs
/// Handle input's influence on orientation variables.
void OculusAppSkeleton::AccumulateInputs(float dt)
{
// Handle Sensor motion.
// We extract Yaw, Pitch, Roll instead of directly using the orientation
// to allow "additional" yaw manipulation with mouse/controller.
if (m_ok.SensorActive())
{
OVR::Quatf hmdOrient = m_ok.GetOrientation();
float yaw = 0.0f;
hmdOrient.GetEulerAngles<OVR::Axis_Y, OVR::Axis_X, OVR::Axis_Z>(&yaw, &EyePitch, &EyeRoll);
EyeYaw += (yaw - LastSensorYaw);
LastSensorYaw = yaw;
}
// Gamepad rotation.
EyeYaw -= GamepadRotate.x * dt;
if (!m_ok.SensorActive())
{
// Allow gamepad to look up/down, but only if there is no Rift sensor.
EyePitch -= GamepadRotate.y * dt;
EyePitch -= MouseRotate.y * dt;
EyeYaw -= MouseRotate.x * dt;
const float maxPitch = ((3.1415f/2)*0.98f);
if (EyePitch > maxPitch)
EyePitch = maxPitch;
if (EyePitch < -maxPitch)
EyePitch = -maxPitch;
}
if (GamepadMove.LengthSq() > 0)
{
OVR::Matrix4f yawRotate = OVR::Matrix4f::RotationY(EyeYaw);
OVR::Vector3f orientationVector = yawRotate.Transform(GamepadMove);
orientationVector *= MoveSpeed * dt;
EyePos += orientationVector;
}
if (MouseMove.LengthSq() > 0)
{
OVR::Matrix4f yawRotate = OVR::Matrix4f::RotationY(EyeYaw);
OVR::Vector3f orientationVector = yawRotate.Transform(MouseMove);
orientationVector *= MoveSpeed * dt;
EyePos += orientationVector;
}
if (KeyboardMove.LengthSq() > 0)
{
OVR::Matrix4f yawRotate = OVR::Matrix4f::RotationY(EyeYaw);
OVR::Vector3f orientationVector = yawRotate.Transform(KeyboardMove);
orientationVector *= MoveSpeed * dt;
EyePos += orientationVector;
}
}
示例3:
OVR::Matrix4f vx_ovr_namespace_::OVRHMDHandleWithDevice::getViewMatrix(ovrEyeType eye, float pos_x, float pos_y, float pos_z, float yaw) const
{
auto height = ovr_GetFloat(session_, OVR_KEY_EYE_HEIGHT, 1.8f);
OVR::Matrix4f rollPitchYaw = OVR::Matrix4f::RotationY(yaw);
OVR::Matrix4f finalRollPitchYaw = rollPitchYaw * OVR::Matrix4f(eyeRenderPosef_[eye].Orientation);
OVR::Vector3f finalUp = finalRollPitchYaw.Transform(OVR::Vector3f(0.0, 1.0, 0.0));
OVR::Vector3f finalForward = finalRollPitchYaw.Transform(OVR::Vector3f(0.0, 0.0, -1.0));
OVR::Vector3f shiftedEyePos = OVR::Vector3f(pos_x, pos_y + height, pos_z) + rollPitchYaw.Transform(eyeRenderPosef_[eye].Position);
return OVR::Matrix4f::LookAtRH(shiftedEyePos, shiftedEyePos + finalForward, finalUp);
}
示例4: _StoreHmdPose
// Store HMD position and direction for gaze tracking in timestep.
// OVR SDK requires head pose be queried between ovrHmd_BeginFrameTiming and ovrHmd_EndFrameTiming.
void RiftAppSkeleton::_StoreHmdPose(const ovrPosef& eyePose)
{
m_hmdRo.x = eyePose.Position.x + m_chassisPos.x;
m_hmdRo.y = eyePose.Position.y + m_chassisPos.y;
m_hmdRo.z = eyePose.Position.z + m_chassisPos.z;
const OVR::Matrix4f rotmtx = OVR::Matrix4f::RotationY(-m_chassisYaw) // Not sure why negative...
* OVR::Matrix4f(eyePose.Orientation);
const OVR::Vector4f rotvec = rotmtx.Transform(OVR::Vector4f(0.0f, 0.0f, -1.0f, 0.0f));
m_hmdRd.x = rotvec.x;
m_hmdRd.y = rotvec.y;
m_hmdRd.z = rotvec.z;
}
示例5: Render
void Render()
{
ovrFrameTiming frameTiming = ovrHmd_BeginFrameTiming(HMD, 0);
// 箱の回転の値を更新
rotationBoxValue += 2.0f*frameTiming.DeltaSeconds;
// キーボード等で操作する場合の目の位置を指定します。
static OVR::Vector3f EyePos;
EyePos.x = 0.0f, EyePos.y = 0.0f, EyePos.z = 0.0f;
// マウスの回転等でYawを操作する場合に使用する。
static float eyeYaw = 0;
// センサーから取得
ovrPosef movePose = ovrHmd_GetSensorState(HMD, frameTiming.ScanoutMidpointSeconds).Predicted.Pose;
static ovrPosef eyeRenderPose[2];
//身長ぶんの考慮をする際の計算
//EyePos.y = ovrHmd_GetFloat(HMD, OVR_KEY_EYE_HEIGHT, EyePos.y);
// 今回は TriangleList しか使わない。
g_pImmediateContext->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLELIST);
//レンダーターゲットの設定
g_pImmediateContext->OMSetRenderTargets(1, &g_pRenderTargetViewOculus, g_pDepthStencilViewOculus);
//画面のクリア・深度バッファクリア
float ClearColor[4] = { 0.0f, 0.125f, 0.3f, 1.0f }; // R,G,B,A の順番
g_pImmediateContext->ClearRenderTargetView(g_pRenderTargetViewOculus, ClearColor);
g_pImmediateContext->ClearDepthStencilView(g_pDepthStencilViewOculus, D3D11_CLEAR_DEPTH, 1.0f, 0);
//それぞれの目に対応するシーンを描画します。
for (int eyeIndex = 0; eyeIndex < ovrEye_Count; eyeIndex++)
{
ConstantBuffer cb;
ovrEyeType eye = HMDDesc.EyeRenderOrder[eyeIndex];
eyeRenderPose[eye] = ovrHmd_GetEyePose(HMD, eye);
// ビュー行列を計算します。
OVR::Matrix4f rotation = OVR::Matrix4f::RotationY(eyeYaw); // あらかじめ(マウスなどで)計算された回転行列を適用する
OVR::Matrix4f resultRotation = rotation * OVR::Matrix4f(eyeRenderPose[eye].Orientation) * // 目の姿勢(回転)を計算する
OVR::Matrix4f(1, 0, 0, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, 0, 0, 1); // 軸に合うように方向を合わせる
OVR::Vector3f resultUp = resultRotation.Transform(OVR::Vector3f(0, 1, 0)); // 上ベクトルを計算
OVR::Vector3f forward = resultRotation.Transform(OVR::Vector3f(0, 0, -1)); // 前ベクトルを計算
OVR::Vector3f resultEyePos = EyePos + rotation.Transform(eyeRenderPose[eye].Position); // 最終的な目の位置を計算する
OVR::Vector3f resultEyeAt = EyePos + rotation.Transform(eyeRenderPose[eye].Position) + forward; // 最終的な目視先を計算する
// 計算した値から xnamath でビュー行列を計算します。
XMVECTOR Eye = XMVectorSet(resultEyePos.x, resultEyePos.y, resultEyePos.z, 0.0f); //カメラの位置
XMVECTOR At = XMVectorSet(resultEyeAt.x, resultEyeAt.y, resultEyeAt.z, 0.0f); //カメラの注視先
XMVECTOR Up = XMVectorSet(resultUp.x, resultUp.y, resultUp.z, 0.0f); //カメラの真上のベクトル
g_View = XMMatrixLookAtLH(Eye, At,Up) * XMMatrixTranslation(EyeRenderDesc[eye].ViewAdjust.x, EyeRenderDesc[eye].ViewAdjust.y, EyeRenderDesc[eye].ViewAdjust.z);
// EyeRenderDesc からプロジェクション行列を計算します。
// 目の中心からそれぞれ上下左右のfovの正接値(tan)が格納されているので libovr 専用の関数で計算します。
// OVR::Matrix4f は xnamath と違い行と列が反対なので転置にしておきます。
OVR::Matrix4f proj = OVR::CreateProjection(false, EyeRenderDesc[eye].Fov, 0.01f, 100.0f);
proj.Transpose();
memcpy_s(&g_Projection, 64, &proj, 64);
//ビューポートの設定(片目ぶんずつ設定)
D3D11_VIEWPORT vp;
vp.TopLeftX = EyeRenderViewport[eye].Pos.x;
vp.TopLeftY = EyeRenderViewport[eye].Pos.y;
vp.Width = EyeRenderViewport[eye].Size.w;
vp.Height = EyeRenderViewport[eye].Size.h;
vp.MinDepth = 0.0f;
vp.MaxDepth = 1.0f;
g_pImmediateContext->RSSetViewports(1, &vp);
// コンスタントバッファに投げるための行列を設定
// シェーダーに渡す際に転置行列になるため、ここで転置しておきます。
cb.mView = XMMatrixTranspose(g_View);
cb.mProjection = XMMatrixTranspose(g_Projection);
//シーンを描画
Scene(cb);
}
//ここでレンダーターゲットに描画したシーンをゆがませてバックバッファに描画します。
DistortionMeshRender(3, HMD, frameTiming.TimewarpPointSeconds,eyeRenderPose);
g_pSwapChain->Present(0, 0);
//pRender->WaitUntilGpuIdle(); //今回はクエリ実装してない
ovrHmd_EndFrameTiming(HMD);
}
示例6: timestep
void RiftAppSkeleton::timestep(float dt)
{
for (std::vector<IScene*>::iterator it = m_scenes.begin();
it != m_scenes.end();
++it)
{
IScene* pScene = *it;
if (pScene != NULL)
{
pScene->timestep(dt);
}
}
glm::vec3 hydraMove = glm::vec3(0.0f, 0.0f, 0.0f);
#ifdef USE_SIXENSE
const sixenseAllControllerData& state = m_fm.GetCurrentState();
for (int i = 0; i<2; ++i)
{
const sixenseControllerData& cd = state.controllers[i];
const float moveScale = pow(10.0f, cd.trigger);
hydraMove.x += cd.joystick_x * moveScale;
const FlyingMouse::Hand h = static_cast<FlyingMouse::Hand>(i);
if (m_fm.IsPressed(h, SIXENSE_BUTTON_JOYSTICK)) ///@note left hand does not work
hydraMove.y += cd.joystick_y * moveScale;
else
hydraMove.z -= cd.joystick_y * moveScale;
}
if (m_fm.WasJustPressed(FlyingMouse::Right, SIXENSE_BUTTON_START))
{
ToggleShaderWorld();
}
// Adjust cinemascope feel with left trigger
// Mouse wheel will still work if Hydra is not present or not pressed(0.0 trigger value).
const float trigger = m_fm.GetTriggerValue(FlyingMouse::Left); // [0,1]
if (trigger > 0.0f)
{
const float deadzone = 0.1f;
const float topval = 0.95f;
const float trigScaled = (trigger - deadzone) / (1.0f - deadzone);
m_cinemaScopeFactor = std::max(0.0f, topval * trigScaled);
}
#endif
const glm::vec3 move_dt = m_headSize * (m_keyboardMove + m_joystickMove + m_mouseMove + hydraMove) * dt;
ovrVector3f kbm;
kbm.x = move_dt.x;
kbm.y = move_dt.y;
kbm.z = move_dt.z;
// Move in the direction the viewer is facing.
const OVR::Matrix4f rotmtx =
OVR::Matrix4f::RotationY(-m_chassisYaw)
* OVR::Matrix4f(m_eyeOri);
const OVR::Vector3f kbmVec = rotmtx.Transform(OVR::Vector3f(kbm));
m_chassisPos.x += kbmVec.x;
m_chassisPos.y += kbmVec.y;
m_chassisPos.z += kbmVec.z;
m_chassisYaw += (m_keyboardYaw + m_joystickYaw + m_mouseDeltaYaw) * dt;
m_fm.updateHydraData();
m_hyif.updateHydraData(m_fm, 1.0f);
}