本文整理汇总了C++中Transform函数的典型用法代码示例。如果您正苦于以下问题:C++ Transform函数的具体用法?C++ Transform怎么用?C++ Transform使用的例子?那么, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了Transform函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: _update_properties
void AnimationTree::_process_graph(float p_delta) {
_update_properties(); //if properties need updating, update them
//check all tracks, see if they need modification
root_motion_transform = Transform();
if (!root.is_valid()) {
ERR_PRINT("AnimationTree: root AnimationNode is not set, disabling playback.");
set_active(false);
cache_valid = false;
return;
}
if (!has_node(animation_player)) {
ERR_PRINT("AnimationTree: no valid AnimationPlayer path set, disabling playback");
set_active(false);
cache_valid = false;
return;
}
AnimationPlayer *player = Object::cast_to<AnimationPlayer>(get_node(animation_player));
ObjectID current_animation_player = 0;
if (player) {
current_animation_player = player->get_instance_id();
}
if (last_animation_player != current_animation_player) {
if (last_animation_player) {
Object *old_player = ObjectDB::get_instance(last_animation_player);
if (old_player) {
old_player->disconnect("caches_cleared", this, "_clear_caches");
}
}
if (player) {
player->connect("caches_cleared", this, "_clear_caches");
}
last_animation_player = current_animation_player;
}
if (!player) {
ERR_PRINT("AnimationTree: path points to a node not an AnimationPlayer, disabling playback");
set_active(false);
cache_valid = false;
return;
}
if (!cache_valid) {
if (!_update_caches(player)) {
return;
}
}
{ //setup
process_pass++;
state.valid = true;
state.invalid_reasons = "";
state.animation_states.clear(); //will need to be re-created
state.valid = true;
state.player = player;
state.last_pass = process_pass;
state.tree = this;
// root source blends
root->blends.resize(state.track_count);
float *src_blendsw = root->blends.ptrw();
for (int i = 0; i < state.track_count; i++) {
src_blendsw[i] = 1.0; //by default all go to 1 for the root input
}
}
//process
{
if (started) {
//if started, seek
root->_pre_process(SceneStringNames::get_singleton()->parameters_base_path, NULL, &state, 0, true, Vector<StringName>());
started = false;
}
root->_pre_process(SceneStringNames::get_singleton()->parameters_base_path, NULL, &state, p_delta, false, Vector<StringName>());
}
if (!state.valid) {
return; //state is not valid. do nothing.
}
//apply value/transform/bezier blends to track caches and execute method/audio/animation tracks
{
//.........这里部分代码省略.........
示例2: validate_contacts
bool BodyPairSW::setup(float p_step) {
//cannot collide
if ((A->get_layer_mask()&B->get_layer_mask())==0 || A->has_exception(B->get_self()) || B->has_exception(A->get_self()) || (A->get_mode()<=PhysicsServer::BODY_MODE_KINEMATIC && B->get_mode()<=PhysicsServer::BODY_MODE_KINEMATIC && A->get_max_contacts_reported()==0 && B->get_max_contacts_reported()==0)) {
collided=false;
return false;
}
offset_B = B->get_transform().get_origin() - A->get_transform().get_origin();
validate_contacts();
Vector3 offset_A = A->get_transform().get_origin();
Transform xform_Au = Transform(A->get_transform().basis,Vector3());
Transform xform_A = xform_Au * A->get_shape_transform(shape_A);
Transform xform_Bu = B->get_transform();
xform_Bu.origin-=offset_A;
Transform xform_B = xform_Bu * B->get_shape_transform(shape_B);
ShapeSW *shape_A_ptr=A->get_shape(shape_A);
ShapeSW *shape_B_ptr=B->get_shape(shape_B);
bool collided = CollisionSolverSW::solve_static(shape_A_ptr,xform_A,shape_B_ptr,xform_B,_contact_added_callback,this,&sep_axis);
this->collided=collided;
if (!collided) {
//test ccd (currently just a raycast)
if (A->is_continuous_collision_detection_enabled() && A->get_mode()>PhysicsServer::BODY_MODE_KINEMATIC && B->get_mode()<=PhysicsServer::BODY_MODE_KINEMATIC) {
_test_ccd(p_step,A,shape_A,xform_A,B,shape_B,xform_B);
}
if (B->is_continuous_collision_detection_enabled() && B->get_mode()>PhysicsServer::BODY_MODE_KINEMATIC && A->get_mode()<=PhysicsServer::BODY_MODE_KINEMATIC) {
_test_ccd(p_step,B,shape_B,xform_B,A,shape_A,xform_A);
}
return false;
}
real_t max_penetration = space->get_contact_max_allowed_penetration();
float bias = 0.3f;
if (shape_A_ptr->get_custom_bias() || shape_B_ptr->get_custom_bias()) {
if (shape_A_ptr->get_custom_bias()==0)
bias=shape_B_ptr->get_custom_bias();
else if (shape_B_ptr->get_custom_bias()==0)
bias=shape_A_ptr->get_custom_bias();
else
bias=(shape_B_ptr->get_custom_bias()+shape_A_ptr->get_custom_bias())*0.5;
}
real_t inv_dt = 1.0/p_step;
for(int i=0;i<contact_count;i++) {
Contact &c = contacts[i];
c.active=false;
Vector3 global_A = xform_Au.xform(c.local_A);
Vector3 global_B = xform_Bu.xform(c.local_B);
real_t depth = c.normal.dot(global_A - global_B);
if (depth<=0) {
c.active=false;
continue;
}
c.active=true;
#ifdef DEBUG_ENABLED
if (space->is_debugging_contacts()) {
space->add_debug_contact(global_A+offset_A);
space->add_debug_contact(global_B+offset_A);
}
#endif
int gather_A = A->can_report_contacts();
int gather_B = B->can_report_contacts();
c.rA = global_A;
c.rB = global_B-offset_B;
// contact query reporting...
#if 0
if (A->get_body_type() == PhysicsServer::BODY_CHARACTER)
static_cast<CharacterBodySW*>(A)->report_character_contact( global_A, global_B, B );
//.........这里部分代码省略.........
示例3: assert
void AmbientOcclusion::run(const render::SceneContextPointer& sceneContext, const render::RenderContextPointer& renderContext) {
assert(renderContext->getArgs());
assert(renderContext->getArgs()->_viewFrustum);
RenderArgs* args = renderContext->getArgs();
gpu::doInBatch(args->_context, [=](gpu::Batch& batch) {
auto framebufferCache = DependencyManager::get<FramebufferCache>();
QSize framebufferSize = framebufferCache->getFrameBufferSize();
float fbWidth = framebufferSize.width();
float fbHeight = framebufferSize.height();
float sMin = args->_viewport.x / fbWidth;
float sWidth = args->_viewport.z / fbWidth;
float tMin = args->_viewport.y / fbHeight;
float tHeight = args->_viewport.w / fbHeight;
glm::mat4 projMat;
Transform viewMat;
args->_viewFrustum->evalProjectionMatrix(projMat);
args->_viewFrustum->evalViewTransform(viewMat);
batch.setProjectionTransform(projMat);
batch.setViewTransform(viewMat);
batch.setModelTransform(Transform());
// Occlusion step
getOcclusionPipeline();
batch.setResourceTexture(0, framebufferCache->getPrimaryDepthTexture());
batch.setResourceTexture(1, framebufferCache->getDeferredNormalTexture());
_occlusionBuffer->setRenderBuffer(0, _occlusionTexture);
batch.setFramebuffer(_occlusionBuffer);
// Occlusion uniforms
g_scale = 1.0f;
g_bias = 1.0f;
g_sample_rad = 1.0f;
g_intensity = 1.0f;
// Bind the first gpu::Pipeline we need - for calculating occlusion buffer
batch.setPipeline(getOcclusionPipeline());
batch._glUniform1f(_gScaleLoc, g_scale);
batch._glUniform1f(_gBiasLoc, g_bias);
batch._glUniform1f(_gSampleRadiusLoc, g_sample_rad);
batch._glUniform1f(_gIntensityLoc, g_intensity);
// setup uniforms for unpacking a view-space position from the depth buffer
// This is code taken from DeferredLightEffect.render() method in DeferredLightingEffect.cpp.
// DeferredBuffer.slh shows how the unpacking is done and what variables are needed.
// initialize the view-space unpacking uniforms using frustum data
float left, right, bottom, top, nearVal, farVal;
glm::vec4 nearClipPlane, farClipPlane;
args->_viewFrustum->computeOffAxisFrustum(left, right, bottom, top, nearVal, farVal, nearClipPlane, farClipPlane);
float depthScale = (farVal - nearVal) / farVal;
float nearScale = -1.0f / nearVal;
float depthTexCoordScaleS = (right - left) * nearScale / sWidth;
float depthTexCoordScaleT = (top - bottom) * nearScale / tHeight;
float depthTexCoordOffsetS = left * nearScale - sMin * depthTexCoordScaleS;
float depthTexCoordOffsetT = bottom * nearScale - tMin * depthTexCoordScaleT;
// now set the position-unpacking unforms
batch._glUniform1f(_nearLoc, nearVal);
batch._glUniform1f(_depthScaleLoc, depthScale);
batch._glUniform2f(_depthTexCoordOffsetLoc, depthTexCoordOffsetS, depthTexCoordOffsetT);
batch._glUniform2f(_depthTexCoordScaleLoc, depthTexCoordScaleS, depthTexCoordScaleT);
batch._glUniform2f(_renderTargetResLoc, fbWidth, fbHeight);
batch._glUniform2f(_renderTargetResInvLoc, 1.0f / fbWidth, 1.0f / fbHeight);
glm::vec4 color(0.0f, 0.0f, 0.0f, 1.0f);
glm::vec2 bottomLeft(-1.0f, -1.0f);
glm::vec2 topRight(1.0f, 1.0f);
glm::vec2 texCoordTopLeft(0.0f, 0.0f);
glm::vec2 texCoordBottomRight(1.0f, 1.0f);
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
// Vertical blur step
getVBlurPipeline();
batch.setResourceTexture(0, _occlusionTexture);
_vBlurBuffer->setRenderBuffer(0, _vBlurTexture);
batch.setFramebuffer(_vBlurBuffer);
// Bind the second gpu::Pipeline we need - for calculating blur buffer
batch.setPipeline(getVBlurPipeline());
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
// Horizontal blur step
getHBlurPipeline();
batch.setResourceTexture(0, _vBlurTexture);
_hBlurBuffer->setRenderBuffer(0, _hBlurTexture);
batch.setFramebuffer(_hBlurBuffer);
// Bind the third gpu::Pipeline we need - for calculating blur buffer
batch.setPipeline(getHBlurPipeline());
DependencyManager::get<GeometryCache>()->renderQuad(batch, bottomLeft, topRight, texCoordTopLeft, texCoordBottomRight, color);
// Blend step
//.........这里部分代码省略.........
示例4: Transform
void LightSource::Scale(const Vector3d&, const TRANSFORM *tr)
{
Transform(tr);
}
示例5: MakeTestPoint
//.........这里部分代码省略.........
Object *obj = CreateNURBSObject(mpIp, &nset, mat);
INode *node = mpIp->CreateObjectNode(obj);
node->SetName(GetString(IDS_TEST_OBJECT));
NURBSSet addNset;
// build a point surface
int addptSurf = AddTestPointSurface(addNset);
// add an iso curve to the previously created CV Surface
NURBSId id = nset.GetNURBSObject(cvSurf)->GetId();
int addIsoCrv = AddTestIsoCurve(addNset, id);
AddNURBSObjects(obj, mpIp, &addNset);
// now test some changing functionality
// Let's change the name of the CVSurface
NURBSObject* nObj = nset.GetNURBSObject(cvSurf);
nObj->SetName(_T("New CVSurf Name")); // testing only, no need to localize
// now let's change the position of one of the points in the point curve
NURBSPointCurve* ptCrvObj = (NURBSPointCurve*)nset.GetNURBSObject(ptCrv);
ptCrvObj->GetPoint(0)->SetPosition(0, Point3(10, 160, 0)); // moved from 0,150,0
// now let's change the position and weight of one of the CVs
// in the CV Surface
NURBSCVSurface* cvSurfObj = (NURBSCVSurface*)nset.GetNURBSObject(cvSurf);
cvSurfObj->GetCV(0, 0)->SetPosition(0, Point3(-150.0, -100.0, 20.0)); // moved from 0,0,0
cvSurfObj->GetCV(0, 0)->SetWeight(0, 2.0); // from 1.0
// now let's do a transform of a curve.
NURBSIdTab xfmTab;
NURBSId nid = nset.GetNURBSObject(jc1)->GetId();
xfmTab.Append(1, &nid);
Matrix3 xfmMat;
xfmMat = TransMatrix(Point3(10, 10, -10));
SetXFormPacket xPack(xfmMat);
NURBSResult res = Transform(obj, xfmTab, xPack, xfmMat, 0);
// Now let's Join two curves
NURBSId jc1id = nset.GetNURBSObject(jc1)->GetId(),
jc2id = nset.GetNURBSObject(jc2)->GetId();
JoinCurves(obj, jc1id, jc2id, FALSE, TRUE, 20.0, 1.0f, 1.0f, 0);
// Now let's Join two surfaces
NURBSId js1id = nset.GetNURBSObject(js1)->GetId(),
js2id = nset.GetNURBSObject(js2)->GetId();
JoinSurfaces(obj, js1id, js2id, 1, 0, 20.0, 1.0f, 1.0f, 0);
// Break a Curve
NURBSId bcid = nset.GetNURBSObject(bc)->GetId();
BreakCurve(obj, bcid, .5, 0);
// Break a Surface
NURBSId bsid = nset.GetNURBSObject(bs)->GetId();
BreakSurface(obj, bsid, TRUE, .5, 0);
mpIp->RedrawViews(mpIp->GetTime());
nset.DeleteObjects();
addNset.DeleteObjects();
// now do a detach
NURBSSet detset;
Matrix3 detmat;
detmat.IdentityMatrix();
// build a cv curve
int detcvCrv = MakeTestCVCurve(detset, detmat);
// now a point curve
int detptCrv = MakeTestPointCurve(detset, detmat);
// Blend the two curves
int detblendCrv = MakeTestBlendCurve(detset, detcvCrv, detptCrv);
Object *detobj = CreateNURBSObject(mpIp, &detset, detmat);
INode *detnode = mpIp->CreateObjectNode(detobj);
detnode->SetName("Detach From");
BOOL copy = TRUE;
BOOL relational = TRUE;
NURBSIdList detlist;
NURBSId oid = detset.GetNURBSObject(detblendCrv)->GetId();
detlist.Append(1, &oid);
DetachObjects(GetCOREInterface()->GetTime(), detnode, detobj,
detlist, "Detach Test", copy, relational);
mpIp->RedrawViews(mpIp->GetTime());
}
示例6: autoRestoreTransform
void
BasicCompositor::DrawQuad(const gfx::Rect& aRect,
const gfx::Rect& aClipRect,
const EffectChain &aEffectChain,
gfx::Float aOpacity,
const gfx::Matrix4x4& aTransform,
const gfx::Rect& aVisibleRect)
{
RefPtr<DrawTarget> buffer = mRenderTarget->mDrawTarget;
// For 2D drawing, |dest| and |buffer| are the same surface. For 3D drawing,
// |dest| is a temporary surface.
RefPtr<DrawTarget> dest = buffer;
buffer->PushClipRect(aClipRect);
AutoRestoreTransform autoRestoreTransform(dest);
Matrix newTransform;
Rect transformBounds;
Matrix4x4 new3DTransform;
IntPoint offset = mRenderTarget->GetOrigin();
if (aTransform.Is2D()) {
newTransform = aTransform.As2D();
} else {
// Create a temporary surface for the transform.
dest = gfxPlatform::GetPlatform()->CreateOffscreenContentDrawTarget(RoundOut(aRect).Size(), SurfaceFormat::B8G8R8A8);
if (!dest) {
return;
}
dest->SetTransform(Matrix::Translation(-aRect.x, -aRect.y));
// Get the bounds post-transform.
transformBounds = aTransform.TransformAndClipBounds(aRect, Rect(offset.x, offset.y, buffer->GetSize().width, buffer->GetSize().height));
transformBounds.RoundOut();
// Propagate the coordinate offset to our 2D draw target.
newTransform = Matrix::Translation(transformBounds.x, transformBounds.y);
// When we apply the 3D transformation, we do it against a temporary
// surface, so undo the coordinate offset.
new3DTransform = Matrix4x4::Translation(aRect.x, aRect.y, 0) * aTransform;
}
newTransform.PostTranslate(-offset.x, -offset.y);
buffer->SetTransform(newTransform);
RefPtr<SourceSurface> sourceMask;
Matrix maskTransform;
if (aEffectChain.mSecondaryEffects[EffectTypes::MASK]) {
EffectMask *effectMask = static_cast<EffectMask*>(aEffectChain.mSecondaryEffects[EffectTypes::MASK].get());
sourceMask = effectMask->mMaskTexture->AsSourceBasic()->GetSurface(dest);
MOZ_ASSERT(effectMask->mMaskTransform.Is2D(), "How did we end up with a 3D transform here?!");
MOZ_ASSERT(!effectMask->mIs3D);
maskTransform = effectMask->mMaskTransform.As2D();
maskTransform.PreTranslate(-offset.x, -offset.y);
}
CompositionOp blendMode = CompositionOp::OP_OVER;
if (Effect* effect = aEffectChain.mSecondaryEffects[EffectTypes::BLEND_MODE].get()) {
blendMode = static_cast<EffectBlendMode*>(effect)->mBlendMode;
}
switch (aEffectChain.mPrimaryEffect->mType) {
case EffectTypes::SOLID_COLOR: {
EffectSolidColor* effectSolidColor =
static_cast<EffectSolidColor*>(aEffectChain.mPrimaryEffect.get());
FillRectWithMask(dest, aRect, effectSolidColor->mColor,
DrawOptions(aOpacity, blendMode), sourceMask, &maskTransform);
break;
}
case EffectTypes::RGB: {
TexturedEffect* texturedEffect =
static_cast<TexturedEffect*>(aEffectChain.mPrimaryEffect.get());
TextureSourceBasic* source = texturedEffect->mTexture->AsSourceBasic();
if (texturedEffect->mPremultiplied) {
DrawSurfaceWithTextureCoords(dest, aRect,
source->GetSurface(dest),
texturedEffect->mTextureCoords,
texturedEffect->mFilter,
DrawOptions(aOpacity, blendMode),
sourceMask, &maskTransform);
} else {
RefPtr<DataSourceSurface> srcData = source->GetSurface(dest)->GetDataSurface();
// Yes, we re-create the premultiplied data every time.
// This might be better with a cache, eventually.
RefPtr<DataSourceSurface> premultData = gfxUtils::CreatePremultipliedDataSurface(srcData);
DrawSurfaceWithTextureCoords(dest, aRect,
premultData,
texturedEffect->mTextureCoords,
texturedEffect->mFilter,
DrawOptions(aOpacity, blendMode),
sourceMask, &maskTransform);
}
break;
//.........这里部分代码省略.........
示例7: Transform
void Plane::Transform(const Quat &transform)
{
float3x3 r = transform.ToFloat3x3();
Transform(r);
}
示例8: Transform
Cone& Cone::Transform(const NX::Matrix<float, 3, 3> &R, const NX::vector<float, 3> &T){
return Transform(NX::GetRTMatrix(R, T));
}
示例9: m_vNormal
Cone::Cone(const float fLongAxis, const float fShortAxis, const float fHeight, const NX::Matrix<float, 4, 4> &M):m_vCenter(0.0f, 0.0f, 0.0f), m_vNormal(0.0f, 1.0f, 0.0f), m_vLongAxis(1.0f, 0.0f, 0.0f), m_vShortAxis(0.0f, 0.0f, 1.0f){
m_fLongAxis = fLongAxis;
m_fShortAxis = fShortAxis;
m_fHeight = fHeight;
Transform(M);
}
示例10: LogTexturePackerInfo
void GrassBlendingGame::Initialize()
{
window->SetTitle("TestApp - Enjoy Game Dev, Have Fun.");
window->SetAllowUserResizing(true);
auto assets = gameHost->AssetManager();
auto clientBounds = window->GetClientBounds();
{
commandList = std::make_shared<GraphicsCommandList>(*graphicsDevice);
samplerPoint = std::make_shared<SamplerState>(graphicsDevice,
SamplerDescription::CreateLinearWrap());
texture = std::make_shared<Texture2D>(graphicsDevice,
1, 1, false, SurfaceFormat::R8G8B8A8_UNorm);
std::array<std::uint32_t, 1> pixelData = {0xffffffff};
texture->SetData(pixelData.data());
renderTarget = std::make_shared<RenderTarget2D>(graphicsDevice,
clientBounds.Width, clientBounds.Height,
false, SurfaceFormat::R8G8B8A8_UNorm, DepthFormat::None);
}
{
spriteRenderer = std::make_unique<SpriteRenderer>(graphicsDevice, *assets);
fxaa = std::make_unique<FXAA>(graphicsDevice, *assets);
fxaa->SetViewport(clientBounds.Width, clientBounds.Height);
screenQuad = std::make_unique<ScreenQuad>(graphicsDevice);
polygonBatch = std::make_unique<PolygonBatch>(graphicsContext, graphicsDevice, *assets);
}
{
gameEditor = std::make_unique<SceneEditor::InGameEditor>(gameHost);
editorBackground = std::make_unique<SceneEditor::EditorBackground>(gameHost);
}
{
mainCamera = gameWorld.CreateObject();
mainCamera.AddComponent<Transform2D>();
mainCamera.AddComponent<Camera2D>();
}
{
auto textureAtlas = TexturePacker::TextureAtlasLoader::Load(*assets, "MaidChan2/skeleton.atlas");
auto skeletonDesc = Spine::SkeletonDescLoader::Load(*assets, "MaidChan2/skeleton.json");
maidTexture = assets->Load<Texture2D>("MaidChan2/skeleton.png");
LogTexturePackerInfo(textureAtlas);
LogSkeletalInfo(skeletonDesc);
maidSkeleton = std::make_shared<Skeleton>(Spine::CreateSkeleton(skeletonDesc.Bones));
maidSkeletonPose = std::make_shared<SkeletonPose>(SkeletonPose::CreateBindPose(*maidSkeleton));
auto animationClip = std::make_shared<AnimationClip>(Spine::CreateAnimationClip(skeletonDesc, "Walk"));
maidAnimationState = std::make_shared<AnimationState>(animationClip, 1.0f, true);
maidAnimationClipIdle = std::make_shared<AnimationClip>(Spine::CreateAnimationClip(skeletonDesc, "Idle"));
maidSkin = Spine::CreateSkin(skeletonDesc, textureAtlas, "default");
maidSpriteAnimationTracks = Spine::CreateSpriteAnimationTrack(skeletonDesc, textureAtlas, "Walk");
animationSystem.Add(maidAnimationState, maidSkeleton, maidSkeletonPose);
maidGlobalPose = SkeletonHelper::ToGlobalPose(*maidSkeleton, *maidSkeletonPose);
// NOTE: for Skinning
auto bindPose = SkeletonPose::CreateBindPose(*maidSkeleton);
maidSkinnedMesh = Spine::CreateSkinnedMesh(*graphicsDevice,
SkeletonHelper::ToGlobalPose(*maidSkeleton, bindPose),
skeletonDesc, textureAtlas,
Vector2(maidTexture->Width(), maidTexture->Height()), "default");
maidSkinningEffect = std::make_unique<SkinnedEffect>(*graphicsDevice, *assets);
}
{
scenePanel = std::make_shared<UI::ScenePanel>(clientBounds.Width, clientBounds.Height);
scenePanel->cameraObject = mainCamera;
gameEditor->AddView(scenePanel);
}
{
auto stackPanel = std::make_shared<UI::StackPanel>(124, 170);
stackPanel->Transform(Matrix3x2::CreateTranslation(Vector2{5, 10}));
gameEditor->AddView(stackPanel);
{
auto navigator = std::make_shared<UI::DebugNavigator>(gameHost->Clock());
stackPanel->AddChild(navigator);
}
{
slider1 = std::make_shared<UI::Slider>(-2.0, 2.0);
slider1->Value(1.0);
stackPanel->AddChild(slider1);
}
{
slider2 = std::make_shared<UI::Slider>(0.0, 1.0);
slider2->Value(1.0);
stackPanel->AddChild(slider2);
}
{
toggleSwitch1 = std::make_shared<UI::ToggleSwitch>();
toggleSwitch1->IsOn(true);
toggleSwitch1->OnContent("Play");
toggleSwitch1->OffContent("Stop");
//.........这里部分代码省略.........
示例11: Transform
void Ovus::Scale(const Vector3d&, const TRANSFORM *tr)
{
Transform(tr);
}
示例12: Jade
void Jade (
double *B, /* Output. Separating matrix. nbc*nbc */
double *X, /* Input. Data set nbc x nbs */
int nbc, /* Input. Number of sensors */
int nbs /* Input. Number of samples */
)
{
double threshold_JD = RELATIVE_JD_THRESHOLD / sqrt((double)nbs) ;
int rots = 1 ;
double *Transf = (double *) calloc(nbc*nbc, sizeof(double)) ;
double *CumTens = (double *) calloc(nbc*nbc*nbc*nbc, sizeof(double)) ;
if ( Transf == NULL || CumTens == NULL ) OutOfMemory() ;
/* Init */
Message0(2, "Init...\n") ;
Identity(B, nbc);
MeanRemoval(X, nbc, nbs) ;
printf ("mean\n");
PrintMat (X, nbc, nbs) ;
printf ("\n");
Message0(2, "Whitening...\n") ;
ComputeWhitener(Transf, X, nbc, nbs) ;
printf ("Whitener:\n");
PrintMat (Transf, nbc, nbc) ;
printf ("\n");
Transform (X, Transf, nbc, nbs) ;
printf ("Trans X\n");
PrintMat (X, nbc, nbs) ;
printf ("\n");
Transform (B, Transf, nbc, nbc) ;
Message0(2, "Estimating the cumulant tensor...\n") ;
EstCumTens (CumTens, X, nbc, nbs) ;
printf ("cum tens \n");
PrintMat (CumTens, nbc*nbc, nbc*nbc) ;
printf ("\n");
Message0(2, "Joint diagonalization...\n") ;
rots = JointDiago (CumTens, Transf, nbc, nbc*nbc, threshold_JD) ;
MessageI(3, "Total number of plane rotations: %6i.\n", rots) ;
MessageF(3, "Size of the total rotation: %10.7e\n", NonIdentity(Transf,nbc) ) ;
printf ("Trans mat\n");
PrintMat (Transf, nbc, nbc) ;
printf ("\n");
Message0(2, "Updating...\n") ;
Transform (X, Transf, nbc, nbs ) ;
Transform (B, Transf, nbc, nbc ) ;
printf ("resultant \n");
PrintMat (X, nbc, nbs) ;
printf ("\n");
printf ("estimated mix \n");
PrintMat (B, nbc, nbc) ;
printf ("\n");
free(Transf) ;
free(CumTens) ;
}
示例13: pushTransform
void Painter::pushTransform(iXY translate)
{
transforms.push(Transform(translate));
currentTransform += transforms.top();
}
示例14: Final
HashReturn Final(hashState* ctx,
BitSequence* output) {
int i, j = 0, hashbytelen = ctx->hashbitlen/8;
u8 *s = (BitSequence*)ctx->chaining;
u64 kbyts=0,kbits; //ADDED
/* pad with '1'-bit and first few '0'-bits */
if (BILB) {
ctx->buffer[(int)ctx->buf_ptr-1] &= ((1<<BILB)-1)<<(8-BILB);
ctx->buffer[(int)ctx->buf_ptr-1] ^= 0x1<<(7-BILB);
BILB = 0;
}
else ctx->buffer[(int)ctx->buf_ptr++] = 0x80;
/* pad with '0'-bits */
//modified :1 byte for r value & 8 bytes for length
if (ctx->buf_ptr > ctx->blocksize-LENGTHFIELDLEN-1) { ////Modified 1 byte for r value & 8 bytes for length
/* padding requires two blocks */
while (ctx->buf_ptr < ctx->blocksize) {
ctx->buffer[(int)ctx->buf_ptr++] = 0;
kbyts++; //ADDED: no of zeros appended
}
/* digest first padding block */
Transform(ctx, ctx->buffer, ctx->blocksize); //Modified
ctx->buf_ptr = 0;
}
while (ctx->buf_ptr < ctx->blocksize-LENGTHFIELDLEN-1) { //Modified
ctx->buffer[(int)ctx->buf_ptr++] = 0;
kbyts++; //no of zeros appended
}
//ADDSED bY GURPREET-- Feb14,2012
//for k zeros
while (ctx->buf_ptr < ctx->blocksize-LENGTHFIELDLEN-1) {
ctx->buffer[(int)ctx->buf_ptr++] = 0;
kbyts++; //no of zeros appended
}
//ADDED BY gurpreet FOR R-BYTES
kbits=(kbyts*8)+ctx->bits_in_last_byte;
kbits+=8; //7bits as 10000000 & added 1 bit as space is 7bits for rbytes
int r_bytes;
// for 7 bit r_bytes padding
r_bytes=(952-kbits)%1024;
while (r_bytes<0)
{r_bytes+=1024;} // convert -vr to +ve mod value
r_bytes/=8; // convert it into bytes
//for 7bit r_bytes value
while (ctx->buf_ptr >= ctx->blocksize-LENGTHFIELDLEN-1) {
ctx->buffer[ctx->buf_ptr--] = (u8)r_bytes;
r_bytes >>= 8;
}
//-----
/* length padding */
ctx->block_counter++;
ctx->buf_ptr = ctx->blocksize; //modified
//ADDED
ctx->cnt_block=ctx->block_counter;
//------
/*for padding block counter*/
while (ctx->buf_ptr > ctx->blocksize-LENGTHFIELDLEN) { //modified
ctx->buffer[(int)--ctx->buf_ptr] = (u8)ctx->block_counter;
ctx->block_counter >>= 8;
}
/* digest final padding block */
Transform(ctx, ctx->buffer, ctx->blocksize); //modified
/* perform output transformation */
OutputTransformation(ctx);
j=0;
/* store hash result in output */
for (i = ctx->size-hashbytelen; i < ctx->size; i++,j++) {
output[j] = s[i];
}
/* zeroise relevant variables and deallocate memory */
if (ctx->size == SHORT) {
memset(ctx->chaining, 0, COLS512*sizeof(u64));
memset(ctx->buffer, 0, SIZE512);
}
else {
memset(ctx->chaining, 0, COLS1024*sizeof(u64));
memset(ctx->buffer, 0, SIZE1024);
}
free(ctx->chaining);
free(ctx->buffer);
//.........这里部分代码省略.........
示例15: SetTarget
void
SeekerAI::FindObjective()
{
if (!shot || !target) return;
if (target->Life() == 0) {
if (target != orig_target)
SetTarget(orig_target,0);
else
SetTarget(0,0);
return;
}
Point tloc = target->Location();
tloc = Transform(tloc);
// seeker head limit of 45 degrees:
if (tloc.z < 0 || tloc.z < fabs(tloc.x) || tloc.z < fabs(tloc.y)) {
overshot = true;
SetTarget(0,0);
return;
}
// distance from self to target:
distance = Point(target->Location() - self->Location()).length();
// are we being spoofed?
CheckDecoys(distance);
Point cv = ClosingVelocity();
// time to reach target:
double time = distance / cv.length();
double predict = time;
if (predict > 15)
predict = 15;
// pure pursuit:
if (pursuit == 1 || time < 0.1) {
obj_w = target->Location();
}
// lead pursuit:
else {
// where the target will be when we reach it:
Point run_vec = target->Velocity();
obj_w = target->Location() + (run_vec * predict);
}
// subsystem offset:
if (subtarget) {
Point offset = target->Location() - subtarget->MountLocation();
obj_w -= offset;
}
else if (target->Type() == SimObject::SIM_SHIP) {
Ship* tgt_ship = (Ship*) target;
if (tgt_ship->IsGroundUnit())
obj_w += Point(0,150,0);
}
distance = Point(obj_w - self->Location()).length();
time = distance / cv.length();
// where we will be when the target gets there:
if (predict > 0.1 && predict < 15) {
Point self_dest = self->Location() + cv * predict;
Point err = obj_w - self_dest;
obj_w += err;
}
// transform into camera coords:
objective = Transform(obj_w);
objective.Normalize();
shot->SetEta((int) time);
if (shot->Owner())
((Ship*) shot->Owner())->SetMissileEta(shot->Identity(), (int) time);
}