本文整理汇总了C++中Matrix3::Set方法的典型用法代码示例。如果您正苦于以下问题:C++ Matrix3::Set方法的具体用法?C++ Matrix3::Set怎么用?C++ Matrix3::Set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Matrix3
的用法示例。
在下文中一共展示了Matrix3::Set方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
Matrix3 Matrix3::CreateIdentity()
{
Matrix3 out;
out.Set(0, 0, 1);
out.Set(1, 1, 1);
out.Set(2, 2, 1);
return out;
}
示例2: GetRotationMatrixFromQuaternion
void GetRotationMatrixFromQuaternion(const Vector& quat, Matrix3 mat)
{
//vector should be a normalized quaternion
if(quat.Size() != 4)
{
return;
}
else
{
//see wikipedia - quaternion and spatial rotation
double a = quat(0), b = quat(1), c = quat(2), d = quat(3);
double rot11 = a * a + b * b - c * c - d * d;
double rot12 = 2 * b * c - 2 * a * d;
double rot13 = 2 * b * d + 2 * a * c;
double rot21 = 2 * b * c + 2 * a * d;
double rot22 = a * a - b * b + c * c - d * d;
double rot23 = 2 * c * d - 2 * a * b;
double rot31 = 2 * b * d - 2 * a * c;
double rot32 = 2 * c * d + 2 * a * b;
double rot33 = a * a - b * b - c * c + d * d;
mat.Set(rot11, rot12, rot13,
rot21, rot22, rot23,
rot31, rot32, rot33);
}
return;
}
示例3: BeginRender
void BeginRender()
{
cout<<"\nBeginning Render...";
float alpha = camera.fov;
float l = 1.0;
float h = l * tan(alpha/2.0 *(M_PI/180));
float aspectRatio = (float)camera.imgWidth/camera.imgHeight;
float s = aspectRatio * h;
float dx = (2 * s)/camera.imgWidth;
float dy = -(2 * h)/camera.imgHeight;
float dxx = dx/2,dyy=dy/2;
Point3 K(-s,h,-l);
K.x += dxx;
K.y += dyy;
for(int i = 0; i< camera.imgHeight; i++){
for(int j = 0; j<camera.imgWidth; j++){
K.x += dx;
Matrix3 RotMat;
Point3 dvec = camera.dir - camera.pos;
Point3 svec = camera.up.Cross(dvec);
dvec.Normalize();
svec.Normalize();
camera.up.Normalize();
RotMat.Set(svec,camera.up, dvec);
Ray r(camera.pos, K);
r.dir=r.dir*RotMat;
r.dir.Normalize();
HitInfo hInfo;
hInfo.Init();
if(rootNode.GetNumChild()>0){
// for(int k=0; k < rootNode.GetNumChild(); ++k){
// RayTrace(rootNode.GetChild(k),r,i * camera.imgWidth + j);
// }
if(RayTrace_2(r, hInfo))
{
renderImage.PutPixel(i *camera.imgWidth+j, white, hInfo.z);
}
else renderImage.PutPixel(i *camera.imgWidth+j, black, BIGFLOAT);
}
}
K.x = -s;
K.x += dxx;
K.y += dy;
}
cout<<"Render Complete"<<endl;
renderImage.ComputeZBufferImage();
renderImage.SaveZImage("/Users/varunk/Desktop/RayTracerProj1/RayTracerProj1/zbuffer.ppm");
renderImage.SaveImage("/Users/varunk/Desktop/RayTracerProj1/RayTracerProj1/renderimage.ppm");
}
示例4: PopulateImageParams
void PopulateImageParams()
{
cout<<"Populating ImageParams..."<<endl;
float alpha = camera.fov;
float l = 1.0;
float h = l * tan(alpha/2.0 *(M_PI/180));
float aspectRatio = (float)camera.imgWidth/camera.imgHeight;
float s = aspectRatio * abs(h);
float dx = (2 * abs(s))/camera.imgWidth;
float dy = -(2 * abs(h))/camera.imgHeight;
float dxx = dx/2,dyy=dy/2;
Point3 K(-s,h,-l);
K.x += (dxx );
K.y += (dyy );
for(int i = 0; i< camera.imgHeight ; i++){
for(int j = 0; j< camera.imgWidth; j++){
K.x += dx;
Matrix3 RotMat;
cyPoint3f f = camera.dir;
f.Normalize();
cyPoint3f s = f.Cross(camera.up);
s.Normalize();
cyPoint3f u = s.Cross(f);
const float pts[9]={s.x,u.x,-f.x,s.y,u.y,-f.y,s.z,u.z,-f.z};
RotMat.Set(pts);
Ray r(camera.pos, K);
r.dir=r.dir*RotMat;
r.dir.Normalize();
/* Populating the Struct */
Point2 pixLoc = Point2(j,i);
//imageParams.K.push_back(K);
imageParams.rendered.push_back(false);
imageParams.PixLocation.push_back(pixLoc);
imageParams.PixIndex.push_back( i * camera.imgWidth + j);
imageParams.Ray.push_back(r);
}
K.x = -s;
K.x += dxx;
K.y += dy;
}
}
示例5: Get
Matrix3 Matrix3::operator*(const Matrix3& other) const
{
Matrix3 out;
int i, j, k;
double t;
for (i = 0; i < 3; i++)
{
for (j = 0; j < 3; j++)
{
t = 0.0;
for (k = 0; k < 3; k++)
{
t += Get(i, k) * other.Get(k, j);
}
out.Set(i, j, t);
}
}
return out;
}
示例6: ExportNodeTM
void XsiExp::ExportNodeTM( INode * node, int indentLevel)
{
// dump the full matrix
Matrix3 matrix = node->GetNodeTM(GetStaticFrame());
TSTR indent = GetIndent(indentLevel);
fprintf(pStream,"%s\t%s {\n\n", indent.data(), "FrameTransformMatrix");
Object * obj = node->EvalWorldState(0).obj;
BOOL isBone = obj && obj->ClassID() == Class_ID(BONE_CLASS_ID, 0) ? TRUE : FALSE;
if (node->GetParentNode() && node->GetParentNode()->IsRootNode())
{
// bone chains get grafted into the hierarchy tree
//
if (!isBone)
{
// root mesh
oTopMatrix = matrix;
AffineParts ap;
decomp_affine( matrix, &ap);
topMatrix.Set( Point3( ap.k.x,0.0f,0.0f), Point3( 0.0f,ap.k.z,0.0f), Point3(0.0f,0.0f,ap.k.y), Point3(0,0,0));
// root transform is controlled by the engine
matrix.IdentityMatrix();
}
}
else
{
matrix = matrix * Inverse(node->GetParentTM(GetStaticFrame()));
if (!isBone)
{
matrix.SetRow( 3, topMatrix * matrix.GetRow(3));
}
}
// write the matrix values
DumpMatrix3( &matrix, indentLevel+2);
// transform close brace
fprintf(pStream,"%s\t}\n", indent.data());
}
示例7: PopulateImageParams
void PopulateImageParams()
{
_f = camera.dir;
_f.Normalize();
_s = _f.Cross(camera.up);
_s.Normalize();
_u = _s.Cross(_f);
cout<<"Populating ImageParams..."<<endl;
float alpha = camera.fov;
float l = camera.focaldist;
float h = l * tan(alpha/2.0 *(M_PI/180.0));
float aspectRatio = (float)camera.imgWidth/camera.imgHeight;
float s = aspectRatio * abs(h);
float dx = (2 * abs(s))/camera.imgWidth;
float dy = -(2 * abs(h))/camera.imgHeight;
float dxx = dx/2.0 , dyy=dy/2.0;
Point3 K(-s,h,-l);
K.x += (dxx );
K.y += (dyy );
for(int i = 0; i< camera.imgHeight ; i++){
for(int j = 0; j< camera.imgWidth; j++){
K.x += dx;
Matrix3 RotMat;
const float pts[9]={_s.x,_u.x,-_f.x,_s.y,_u.y,-_f.y,_s.z,_u.z,-_f.z};
RotMat.Set(pts);
// K = RotMat*K;
Cone r = Cone(camera.pos, K);
r.dir = r.dir * RotMat;
r.dir.Normalize();
r.radius = 0.0;
r.tanAngle = tan(abs(dyy)/(float)l);
/* Populating the Struct */
Point2 pixLoc = Point2(j,i);
imageParams.K.push_back(K);
imageParams.rendered.push_back(false);
imageParams.PixLocation.push_back(pixLoc);
imageParams.PixIndex.push_back( i * camera.imgWidth + j);
imageParams.Ray.push_back(r);
Point2 pixDimensions = Point2(dx,dy);
imageParams.PixParams.push_back(pixDimensions);
vector<Point3> ConfCirclePts;
float randAng = rand()/ (float) RAND_MAX;
randAng *= M_PI * 2.0;
for(int i = 1; i<=MAX_N_SAMPLES; i++){
float hx = camera.dof * Halton(i, H_BASE_1);
float hy = Halton(i, H_BASE_2);
float r = sqrtf(hx);
float theta = hy * M_PI * 2.0 + randAng;
float x = r * cosf(theta);
float y = r * sinf(theta);
Point3 newCamPos(x, y, 0);
newCamPos = newCamPos * RotMat;
newCamPos += camera.pos;
ConfCirclePts.push_back(newCamPos);
}
imageParams.ConfusionCirclePoints.push_back(ConfCirclePts);
}
K.x = -s;
K.x += dxx;
K.y += dy;
}
}
示例8: doRender
void doRender(void* arg){
RenderParams rarg = *((RenderParams *)arg);
//cout<<"Do render...."<<endl;
bool pixelHit=false;
HitInfo hitInfo;
hitInfo.Init();
Point2 pixLoc = rarg.pixLocation;
Cone r = rarg.ray;
int PixIndex = rarg.pixIndex;
Color shade(0,0,0);
vector<Point2> haltonXY;
float dx = rarg.PixParams.x;
float dy = rarg.PixParams.y;
float x=0;
float y=0;
_f.Normalize();
_s.Normalize();
const float pts[9]={_s.x,_u.x,-_f.x,_s.y,_u.y,-_f.y,_s.z,_u.z,-_f.z};
Matrix3 RotMat;
RotMat.Set(pts);
for(int i=0; i < MIN_N_SAMPLES; i++){
x = dx * Halton(i+1, H_BASE_1);
y = dy * Halton(i+1, H_BASE_2);
if(x > dx * 0.5) { x -= dx; }
if(y < dy * 0.5) { y -= dy; }
x += rarg.K.x;
y += rarg.K.y;
Point2 sampleLoc = Point2(x,y);
haltonXY.push_back(sampleLoc);
}
vector<Color> shades;
if(rootNode.GetNumChild()>0){
for(int i=0; i< MIN_N_SAMPLES;i++){
Point3 sampleDir = Point3(haltonXY.at(i).x, haltonXY.at(i).y, rarg.K.z);
int rindex = rand() % MAX_N_SAMPLES; //rindex = i;
Point3 randPos = rarg.ConfCirclePts.at(rindex);
Cone sampleRay = Cone(randPos, sampleDir);
sampleRay.dir = sampleRay.dir * RotMat;
sampleRay.dir -= randPos - camera.pos;
sampleRay.dir.Normalize();
sampleRay.radius = r.radius;
sampleRay.tanAngle = r.tanAngle;
r = sampleRay;
if(RayTrace_2(r, hitInfo)) {
pixelHit=true;
shade = hitInfo.node->GetMaterial()->Shade(r, hitInfo, lights, 8);
shades.push_back(shade);
}
hitInfo.Init();
}
if(VarianceOverThreshold(shades)){
renderImage.SetSampleCountPixel(PixIndex, 255);
hitInfo.Init();
for(int i=MIN_N_SAMPLES; i < MAX_N_SAMPLES; i++){
x = dx * Halton(i+1, H_BASE_1);
y = dy * Halton(i+1, H_BASE_2);
if(x > dx * 0.5) { x -= dx;}
if(y < dy * 0.5) { y -= dy;}
x += rarg.K.x;
y += rarg.K.y;
Point2 sampleLoc = Point2(x,y);
haltonXY.push_back(sampleLoc);
Point3 sampleDir = Point3(haltonXY.at(i).x, haltonXY.at(i).y, rarg.K.z);
int rindex = rand() % MAX_N_SAMPLES; //rindex = i;
Point3 randPos = rarg.ConfCirclePts.at(rindex);
Cone sampleRay = Cone(randPos, sampleDir);
sampleRay.dir = sampleRay.dir * RotMat;
sampleRay.dir -= randPos - camera.pos;
sampleRay.dir.Normalize();
sampleRay.radius = r.radius;
sampleRay.tanAngle = r.tanAngle;
r = sampleRay;
if(RayTrace_2(r, hitInfo)) {
pixelHit=true;
shade = hitInfo.node->GetMaterial()->Shade(r, hitInfo, lights, 5);
//.........这里部分代码省略.........
示例9: IGenerateTransform
// Generate local to world from face info (pos, normal, etc)
Matrix3 plDistributor::IGenerateTransform(int iRepNode, int iFace, const Point3& pt, const Point3& bary) const
{
const float kMinVecLengthSq = 1.e-6f;
Matrix3 l2w(true);
// First, set the scale
Point3 scale;
switch( fScaleLock )
{
case kLockX | kLockY:
scale.x = fRand.RandRangeF(fScaleLo.x, fScaleHi.x);
scale.y = scale.x;
scale.z = fRand.RandRangeF(fScaleLo.z, fScaleHi.z);
break;
case kLockX | kLockY | kLockZ:
scale.x = fRand.RandRangeF(fScaleLo.x, fScaleHi.x);
scale.y = scale.z = scale.x;
break;
default:
scale.x = fRand.RandRangeF(fScaleLo.x, fScaleHi.x);
scale.y = fRand.RandRangeF(fScaleLo.y, fScaleHi.y);
scale.z = fRand.RandRangeF(fScaleLo.z, fScaleHi.z);
break;
}
l2w.Scale(scale);
// Next up, get the rotation.
// First we'll randomly rotate about local Z
float azimRot = fRand.RandMinusOneToOne() * fAzimuthRange;
Matrix3 azimMat;
azimMat.SetRotateZ(azimRot);
l2w = l2w * azimMat;
// Now align with the surface.
// Get the interpolated surface normal.
Point3 surfNorm = IGetSurfaceNormal(iFace, bary);
Matrix3 repNodeTM = fRepNodes[iRepNode]->GetNodeTM(TimeValue(0));
Point3 alignVec = repNodeTM.GetRow(2);
alignVec = alignVec * fWorldToSurfVec;
alignVec = FNormalize(alignVec);
Point3 norm = surfNorm + (alignVec - surfNorm) * fAlignWgt;
// The norm can come out of this zero length, if the surace normal
// is directly opposite the "natural" up direction and the weight
// is 50% (for example). In that case, this is just a bad place
// to drop this replicant.
if( norm.LengthSquared() < kMinVecLengthSq )
{
l2w.IdentityMatrix();
return l2w;
}
norm = norm.Normalize();
// Randomize through the cone around that.
Point3 rndNorm = norm;
Point3 rndDir = IPerpAxis(norm);
Point3 rndOut = rndDir ^ norm;
rndDir *= fRand.RandMinusOneToOne();
float len = sqrt(1.f - rndDir.LengthSquared());
rndOut *= len;
if( fRand.RandMinusOneToOne() < 0 )
rndOut *= -1.f;
Point3 rndPol = rndDir + rndOut;
float polScale = fRand.RandZeroToOne() * fTanPolarRange;
// Combine using the bunching factor
polScale = polScale * (1.f - fPolarBunch) + polScale * polScale * fPolarBunch;
rndPol *= polScale;
rndNorm += rndPol;
norm = rndNorm.Normalize();
// Have "up" alignment, now just generate random dir vector perpindicular to up
Point3 dir = repNodeTM.GetRow(1);
dir = dir * fWorldToSurfVec;
Point3 out = dir ^ norm;
if( out.LengthSquared() < kMinVecLengthSq )
{
if( fAzimuthRange < M_PI * 0.5f )
{
l2w.IdentityMatrix();
return l2w;
}
else
{
dir = IPerpAxis(norm);
out = dir ^ norm;
}
}
out = FNormalize(out);
dir = norm ^ out;
// If our "up" direction points into the surface, return an "up" direction
// tangent to the surface. Also, make the "dir" direction point out from
//.........这里部分代码省略.........
示例10: Render
void ObjectRenderer::Render(){
if(!bDrawObject) return;
if(mObject!=NULL){
glPushMatrix();
glMultMatrixf(mObject->GetReferenceFrame().GetHMatrix().RowOrderForceFloat());
}
if(bDrawRef)
GLT::DrawRef(mRefSize);
for(int i=0;i<int(mShapes.size());i++){
glPushMatrix();
if(bDrawCom){
GLT::SetColor(mComColor[0],mComColor[1],mComColor[2],mComColor[3]);
Matrix ine(3,3);
Vector d(3);
Matrix eg(3,3);
Matrix3 egt;
Matrix3 degt;
Matrix dd(3,3);
Matrix3 ddd;
ine = (mObject->GetSpatialInertia().mInertiaMoment);
ine.EigenValuesDecomposition(d, eg);
egt.Set(eg);
egt.STranspose();
dd.Diag(d);
ddd.Set(dd);
egt.Mult(ddd,degt);
glPushMatrix();
Vector3 &com = mObject->GetSpatialInertia().mCenterOfMass;
glTranslatef(com[0],com[1],com[2]);
GLT::DrawVector(degt.GetColumn(0),0.1);
GLT::DrawVector(degt.GetColumn(1),0.1);
GLT::DrawVector(degt.GetColumn(2),0.1);
glPopMatrix();
}
float col[4];
//bUseDefaultColor is not needed since providing no color tag automatically sets the color to default
// if(bUseDefaultColor){
// col[0] = mDefaultColor[0];
// col[1] = mDefaultColor[1];
// col[2] = mDefaultColor[2];
// col[3] = mDefaultColor[3];
// }else{
col[0] = mShapes[i]->color[0];
col[1] = mShapes[i]->color[1];
col[2] = mShapes[i]->color[2];
col[3] = mShapes[i]->color[3];
// }
if(!bUseTransparency)
col[3] = 1.0;
glColor4fv(col);
glMaterialfv(GL_FRONT_AND_BACK, GL_SPECULAR,col);
if(mShapes[i]->culling)
glDisable(GL_CULL_FACE);
if(mShapes[i]->shape){
mShapes[i]->shape->Render();
}
if(mShapes[i]->culling)
glEnable(GL_CULL_FACE);
glPopMatrix();
}
if(mObject!=NULL){
glPopMatrix();
}
AbstractRenderer::Render();
}