本文整理汇总了C++中ossimDpt::makeNan方法的典型用法代码示例。如果您正苦于以下问题:C++ ossimDpt::makeNan方法的具体用法?C++ ossimDpt::makeNan怎么用?C++ ossimDpt::makeNan使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ossimDpt
的用法示例。
在下文中一共展示了ossimDpt::makeNan方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: imageToView
void ossimIvtGeomXform::imageToView(const ossimDpt& ipt, ossimDpt& viewPt)
{
viewPt.makeNan();
if(m_ivt.valid())
{
m_ivt->imageToView(ipt, viewPt);
}
}
示例2: groundToImage
void ossimIvtGeomXform::groundToImage(const ossimGpt& gpt, ossimDpt& ipt)
{
ipt.makeNan();
if(m_geom.valid())
{
m_geom->worldToLocal(gpt, ipt);
}
}
示例3: viewToImage
void ossimIvtGeomXform::viewToImage(const ossimDpt& viewPt, ossimDpt& ipt)
{
ipt.makeNan();
if(m_ivt.valid())
{
m_ivt->viewToImage(viewPt, ipt);
}
}
示例4: worldToLineSample
//*****************************************************************************
// METHOD: ossimRpcProjection::worldToLineSample()
//
// Overrides base class implementation. Directly computes line-sample from
// the polynomials.
//*****************************************************************************
void ossimRpcProjection::worldToLineSample(const ossimGpt& ground_point,
ossimDpt& imgPt) const
{
if (traceExec()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimRpcProjection::worldToLineSample(): entering..." << std::endl;
if(ground_point.isLatNan() ||
ground_point.isLonNan() )
{
imgPt.makeNan();
return;
}
//*
// Normalize the lat, lon, hgt:
//*
double nlat = (ground_point.lat - theLatOffset) / theLatScale;
double nlon = (ground_point.lon - theLonOffset) / theLonScale;
double nhgt;
if(ossim::isnan(ground_point.hgt))
{
nhgt = (theHgtScale - theHgtOffset) / theHgtScale;
}
else
{
nhgt = (ground_point.hgt - theHgtOffset) / theHgtScale;
}
//***
// Compute the adjusted, normalized line (U) and sample (V):
//***
double Pu = polynomial(nlat, nlon, nhgt, theLineNumCoef);
double Qu = polynomial(nlat, nlon, nhgt, theLineDenCoef);
double Pv = polynomial(nlat, nlon, nhgt, theSampNumCoef);
double Qv = polynomial(nlat, nlon, nhgt, theSampDenCoef);
double U_rot = Pu / Qu;
double V_rot = Pv / Qv;
//***
// U, V are normalized quantities. Need now to establish the image file
// line and sample. First, back out the adjustable parameter effects
// starting with rotation:
//***
double U = U_rot*theCosMapRot + V_rot*theSinMapRot;
double V = V_rot*theCosMapRot - U_rot*theSinMapRot;
//***
// Now back out skew, scale, and offset adjustments:
//***
imgPt.line = U*(theLineScale+theIntrackScale) + theLineOffset + theIntrackOffset;
imgPt.samp = V*(theSampScale+theCrtrackScale) + theSampOffset + theCrtrackOffset;
if (traceExec()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimRpcProjection::worldToLineSample(): returning..." << std::endl;
return;
}
示例5: getDecimationFactor
void ossimImageSourceSequencer::getDecimationFactor(ossim_uint32 resLevel,
ossimDpt& result) const
{
if(theInputConnection)
{
theInputConnection->getDecimationFactor(resLevel, result);
}
result.makeNan();
}
示例6: groundToView
void ossimIvtGeomXform::groundToView(const ossimGpt& gpt, ossimDpt& viewPt)
{
ossimDpt ipt;
viewPt.makeNan();
groundToImage(gpt, ipt);
if(!ipt.hasNans())
{
imageToView(ipt, viewPt);
}
}
示例7: viewToImage
//*****************************************************************************
// Other workhorse of the object. Converts view-space to image-space.
//*****************************************************************************
void ossimImageViewProjectionTransform::viewToImage(const ossimDpt& vp, ossimDpt& ip) const
{
// Check for same geometries on input and output (this includes NULL geoms):
if (m_imageGeometry == m_viewGeometry)
{
ip = vp;
return;
}
// Otherwise we need access to good geoms. Check for a bad geometry object:
if (!m_imageGeometry || !m_viewGeometry)
{
ip.makeNan();
return;
}
// Check for same projection on input and output sides to save projection to ground:
const ossimProjection* iproj = m_imageGeometry->getProjection();
const ossimProjection* vproj = m_viewGeometry->getProjection();
if ((iproj && vproj && iproj->isEqualTo(*vproj)) || (iproj == vproj))
{
// Check for possible same 2D transforms as well:
const ossim2dTo2dTransform* ixform = m_imageGeometry->getTransform();
const ossim2dTo2dTransform* vxform = m_viewGeometry->getTransform();
if (((ixform && vxform && ixform->isEqualTo(*vxform)) || (ixform == vxform)) &&
(m_imageGeometry->decimationFactor(0) == m_viewGeometry->decimationFactor(0)))
{
ip = vp;
return;
}
// Not the same 2D transform, so just perform local-image -> full-image -> local-view:
ossimDpt fp;
m_viewGeometry->rnToFull(vp, 0, fp);
m_imageGeometry->fullToRn(fp, 0, ip);
return;
}
//---
// Completely different left and right side geoms (typical situation).
// Need to project to ground.
//---
ossimGpt gp;
m_viewGeometry->localToWorld(vp, gp);
m_imageGeometry->worldToLocal(gp, ip);
#if 0 /* Please leave for debug. */
cout <<"DEBUG ossimImageViewProjectionTransform::viewToImage:"
<<"\n vp: "<<vp
<<"\n gp: "<<gp
<<"\n ip: "<<ip
<<std::endl;
#endif
}
示例8: getDecimationFactor
void ossimMrSidReader::getDecimationFactor(ossim_uint32 resLevel,
ossimDpt& result) const
{
if (theGeometry.valid())
{
theGeometry->decimationFactor(resLevel, result);
}
else
{
result.makeNan();
}
}
示例9: worldToLineSample
void ossimBuckeyeSensor::worldToLineSample(const ossimGpt& world_point,
ossimDpt& image_point) const
{
if (traceDebug()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimBuckeyeSensor::worldToLineSample: entering..." << std::endl;
if((theBoundGndPolygon.getNumberOfVertices() > 0)&&
(!theBoundGndPolygon.hasNans()))
{
if (!(theBoundGndPolygon.pointWithin(world_point)))
{
image_point.makeNan();
return;
}
}
ossimEcefPoint g_ecf(world_point);
ossimEcefVector ecfRayDir(g_ecf - theAdjEcefPlatformPosition);
ossimColumnVector3d camRayDir (theCompositeMatrixInverse*ecfRayDir.data());
double scale = -theFocalLength/camRayDir[2];
ossimDpt film (scale*camRayDir[0], scale*camRayDir[1]);
if (theLensDistortion.valid())
{
ossimDpt filmOut;
theLensDistortion->distort(film, filmOut);
film = filmOut;
}
ossimDpt f1(film + thePrincipalPoint);
ossimDpt p1(f1.x/thePixelSize.x,
-f1.y/thePixelSize.y);
ossimDpt p0 (p1.x + theRefImgPt.x,
p1.y + theRefImgPt.y);
image_point = p0;
if (traceDebug()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimBuckeyeSensor::worldToLineSample: returning..." << std::endl;
}
示例10: worldToLineSample
void ossimTileMapModel::worldToLineSample(const ossimGpt& ground_point,
ossimDpt& img_pt) const
{
// if (traceExec()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimTileMapModel::worldToLineSample(): entering..." << std::endl;
if(ground_point.isLatNan() || ground_point.isLonNan() )
{
img_pt.makeNan();
return;
}
double x = (180.0 + ground_point.lon) / 360.0;
double y = - ground_point.lat * M_PI / 180; // convert to radians
y = 0.5 * log((1+sin(y)) / (1 - sin(y)));
y *= 1.0/(2 * M_PI); // scale factor from radians to normalized
y += 0.5; // and make y range from 0 - 1
img_pt.samp = floor(x*pow(2.,static_cast<double>(qDepth))*256);
img_pt.line = floor(y*pow(2.,static_cast<double>(qDepth))*256);
return;
}
示例11: getScaleChangeViewToImage
void ossimImageViewTransform::getScaleChangeViewToImage(ossimDpt& result,
const ossimDrect& viewRect)
{
result.makeNan();
if(!viewRect.hasNans())
{
ossimDpt iul;
ossimDpt iur;
ossimDpt ilr;
ossimDpt ill;
imageToView(viewRect.ul(),
iul);
imageToView(viewRect.ur(),
iur);
imageToView(viewRect.lr(),
ilr);
imageToView(viewRect.ll(),
ill);
if(!iul.hasNans()&&
!iur.hasNans()&&
!ilr.hasNans()&&
!ill.hasNans())
{
double deltaTop = (iul - iur).length();
double deltaBottom = (ill - ilr).length();
double deltaRight = (iur - ilr).length();
double w = viewRect.width();
double h = viewRect.height();
result.x = (deltaTop/w + deltaBottom/w)*.5;
result.y = (deltaRight/h + deltaRight/h)*.5;
}
}
}
示例12: getScaleChangeImageToView
void ossimImageViewTransform::getScaleChangeImageToView(ossimDpt& result,
const ossimDrect& imageRect)
{
result.makeNan();
if(!imageRect.hasNans())
{
ossimDpt vul;
ossimDpt vur;
ossimDpt vlr;
ossimDpt vll;
imageToView(imageRect.ul(),
vul);
imageToView(imageRect.ur(),
vur);
imageToView(imageRect.lr(),
vlr);
imageToView(imageRect.ll(),
vll);
if(!vul.hasNans()&&
!vur.hasNans()&&
!vlr.hasNans()&&
!vll.hasNans())
{
double deltaTop = (vul - vur).length();
double deltaBottom = (vll - vlr).length();
double deltaRight = (vur - vlr).length();
double w = imageRect.width();
double h = imageRect.height();
result.x = (deltaTop/w + deltaBottom/w)*.5;
result.y = (deltaRight/h + deltaRight/h)*.5;
}
}
}
示例13: worldToLineSample
void ossimEquDistCylProjection::worldToLineSample(const ossimGpt &worldPoint,
ossimDpt& lineSample)const
{
if(theModelTransformUnitType != OSSIM_UNIT_UNKNOWN)
{
ossimMapProjection::worldToLineSample(worldPoint, lineSample);
return;
}
else
{
// make sure our tie point is good and world point
// is good.
//
if(theUlEastingNorthing.isNan()||
worldPoint.isLatNan() || worldPoint.isLonNan())
{
lineSample.makeNan();
return;
}
// initialize line sample
// lineSample = ossimDpt(0,0);
// I am commenting this code out because I am going to
// move it to the ossimImageViewProjectionTransform.
//
// see if we have a datum set and if so
// shift the world to our datum. If not then
// find the easting northing value for the world
// point.
if(theDatum)
{
ossimGpt gpt = worldPoint;
gpt.changeDatum(theDatum);
// lineSample is currently in easting northing
// and will need to be converted to line sample.
lineSample = forward(gpt);
}
else
{
// lineSample is currently in easting northing
// and will need to be converted to line sample.
lineSample = forward(worldPoint);
}
// check the final result to make sure there were no
// problems.
//
if(!lineSample.isNan())
{
// if(!isIdentityMatrix())
// {
// ossimDpt temp = lineSample;
// lineSample.x = theInverseTrans[0][0]*temp.x+
// theInverseTrans[0][1]*temp.y+
// theInverseTrans[0][2];
// lineSample.y = theInverseTrans[1][0]*temp.x+
// theInverseTrans[1][1]*temp.y+
// theInverseTrans[1][2];
// }
// else
{
lineSample.x = ((lineSample.x - theUlEastingNorthing.x)/theMetersPerPixel.x);
// We must remember that the Northing is negative since the positive
// axis for an image is assumed to go down since it's image space.
lineSample.y = (-(lineSample.y - theUlEastingNorthing.y)/theMetersPerPixel.y);
}
}
}
}
示例14: worldToLineSample
//*****************************************************************************
// METHOD: ossimRpcModel::worldToLineSample()
//
// Overrides base class implementation. Directly computes line-sample from
// the polynomials.
//*****************************************************************************
void ossimRpcModel::worldToLineSample(const ossimGpt& ground_point,
ossimDpt& img_pt) const
{
// if (traceExec()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimRpcModel::worldToLineSample(): entering..." << std::endl;
if(ground_point.isLatNan() || ground_point.isLonNan() )
{
img_pt.makeNan();
return;
}
//***
// First check if the world point is inside bounding rectangle:
//***
//ossimDpt wdp (ground_point);
//if (!(theBoundGndPolygon.pointWithin(ground_point)))
// {
//img_pt = extrapolate(ground_point);
//if (traceExec()) CLOG << "returning..." << endl;
// img_pt.makeNan();
// return;
// }
//***
// Normalize the lat, lon, hgt:
//***
double nlat = (ground_point.lat - theLatOffset) / theLatScale;
double nlon = (ground_point.lon - theLonOffset) / theLonScale;
double nhgt;
if( ground_point.isHgtNan() )
{
// nhgt = (theHgtScale - theHgtOffset) / theHgtScale;
nhgt = ( - theHgtOffset) / theHgtScale;
}
else
{
nhgt = (ground_point.hgt - theHgtOffset) / theHgtScale;
}
//***
// Compute the adjusted, normalized line (U) and sample (V):
//***
double Pu = polynomial(nlat, nlon, nhgt, theLineNumCoef);
double Qu = polynomial(nlat, nlon, nhgt, theLineDenCoef);
double Pv = polynomial(nlat, nlon, nhgt, theSampNumCoef);
double Qv = polynomial(nlat, nlon, nhgt, theSampDenCoef);
double U_rot = Pu / Qu;
double V_rot = Pv / Qv;
//***
// U, V are normalized quantities. Need now to establish the image file
// line and sample. First, back out the adjustable parameter effects
// starting with rotation:
//***
double U = U_rot*theCosMapRot + V_rot*theSinMapRot;
double V = V_rot*theCosMapRot - U_rot*theSinMapRot;
//***
// Now back out skew, scale, and offset adjustments:
//***
img_pt.line = U*(theLineScale+theIntrackScale) + theLineOffset + theIntrackOffset;
img_pt.samp = V*(theSampScale+theCrtrackScale) + theSampOffset + theCrtrackOffset;
// if (traceExec()) ossimNotify(ossimNotifyLevel_DEBUG) << "DEBUG ossimRpcModel::worldToLineSample(): returning..." << std::endl;
return;
}
示例15: wdp
//---------------------------------------------------------------------------**
// METHOD: ossimSensorModel::worldToLineSample()
//
// Performs forward projection of ground point to image space.
//
//---------------------------------------------------------------------------**
void ossimH5GridModel::worldToLineSample(const ossimGpt& worldPoint,
ossimDpt& ip) const
{
static const double PIXEL_THRESHOLD = .1; // acceptable pixel error
static const int MAX_NUM_ITERATIONS = 20;
if(worldPoint.isLatNan() || worldPoint.isLonNan())
{
ip.makeNan();
return;
}
//---
// First check if the world point is inside bounding rectangle:
//---
int iters = 0;
ossimDpt wdp (worldPoint);
if ( m_crossesDateline && (wdp.x < 0.0) )
{
// Longitude points stored between 0 and 360.
wdp.x += 360.0;
}
if( (m_boundGndPolygon.getNumberOfVertices() > 0) &&
(!m_boundGndPolygon.hasNans()) )
{
if (!(m_boundGndPolygon.pointWithin(wdp)))
{
// if(theSeedFunction.valid())
// {
// theSeedFunction->worldToLineSample(worldPoint, ip);
// }
// else if(!theExtrapolateGroundFlag) // if I am not already in the extrapolation routine
// {
// ip = extrapolate(worldPoint);
// }
ip.makeNan();
return;
}
}
//---
// Substitute zero for null elevation if present:
//---
double height = worldPoint.hgt;
if ( ossim::isnan(height) )
{
height = 0.0;
}
//---
// Utilize iterative scheme for arriving at image point. Begin with guess
// at image center:
//---
if(theSeedFunction.valid())
{
theSeedFunction->worldToLineSample(worldPoint, ip);
}
else
{
ip.u = theRefImgPt.u;
ip.v = theRefImgPt.v;
}
ossimDpt ip_du;
ossimDpt ip_dv;
ossimGpt gp, gp_du, gp_dv;
double dlat_du, dlat_dv, dlon_du, dlon_dv;
double delta_lat, delta_lon, delta_u, delta_v;
double inverse_norm;
bool done = false;
//---
// Begin iterations:
//---
do
{
//---
// establish perturbed image points about the guessed point:
//---
ip_du.u = ip.u + 1.0;
ip_du.v = ip.v;
ip_dv.u = ip.u;
ip_dv.v = ip.v + 1.0;
//---
// Compute numerical partials at current guessed point:
//---
lineSampleHeightToWorld(ip, height, gp);
lineSampleHeightToWorld(ip_du, height, gp_du);
lineSampleHeightToWorld(ip_dv, height, gp_dv);
if(gp.isLatNan() || gp.isLonNan())
//.........这里部分代码省略.........