本文整理汇总了C++中LINFO函数的典型用法代码示例。如果您正苦于以下问题:C++ LINFO函数的具体用法?C++ LINFO怎么用?C++ LINFO使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了LINFO函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: imx175_i2c_add_driver
static int __init imx175_i2c_add_driver(
void)
{
LINFO("%s called\n", __func__);
return i2c_add_driver(imx175_act_t.i2c_driver);
}
示例2: GVX_TRACE
// ######################################################################
Image<int> IntegerSimpleChannel::getOutputInt()
{
GVX_TRACE(__PRETTY_FUNCTION__);
if (!this->hasInput())
// if you think this LFATAL() has been triggered incorrectly, then
// first make sure that somebody has called setInputDims()
CLFATAL("Oops! can't get output -- I don't even have any input yet");
if (!this->outputAvailable())
{
// it's possible that we have input but don't yet have output in
// the case of a channel that requires several input frames
// before it can start generating output (such as a flicker or
// motion channel); in that case we just return an empty image
// of the appropriate size
LERROR("No %s channel yet! -- IGNORING.", this->tagName().c_str());
return Image<int>(this->getMapDims(), ZEROS);
}
if (!itsOutputCache.initialized())
{
itsOutputCache = Image<int>(getMapDims(), ZEROS);
// compute max-normalized weighted sum of center-surround at all levels:
for (uint idx = 0; idx < itsLevelSpec.getVal().maxIndex(); ++idx)
{
const Image<int> submap = getSubmapInt(idx); // get the unweighted map
// add submap to our sum
itsOutputCache += (submap / int(itsLevelSpec.getVal().maxIndex()));
if (MYLOGVERB >= LOG_DEBUG)
{
uint clev = 0, slev = 0;
itsLevelSpec.getVal().indexToCS(idx, clev, slev);
LDEBUG("%s(%d,%d): weight %f", tagName().c_str(), clev, slev, 1.0f);
}
}
// apply max-normalization on the output as needed:
if (itsNormalizeOutput.getVal())
{
LDEBUG("%s: Normalizing output: %s(%d .. %d)", tagName().c_str(),
maxNormTypeName(itsNormType.getVal()), itsOutputRangeMin.getVal(),
itsOutputRangeMax.getVal());
itsOutputCache =
intgMaxNormalize(itsOutputCache, itsOutputRangeMin.getVal(),
itsOutputRangeMax.getVal(), itsNormType.getVal());
}
// print some debug info if in debug mode:
if (MYLOGVERB >= LOG_DEBUG)
{
int mi, ma; getMinMax(itsOutputCache, mi, ma);
LDEBUG("%s: final range [%d .. %d]", tagName().c_str(), mi, ma);
}
LINFO("Computed %s Conspicuity Map", descriptiveName().c_str());
}
return itsOutputCache;
}
示例3: LINFO
void OTBSpectralAngleDistanceImageFilterProcessor::process() {
try
{
//Detect the number of spectral bands the input image has.
nbBands = inPort_.getData()->GetNumberOfComponentsPerPixel();
LINFO("Number of Bands detected: " << nbBands);
updateBands(nbBands);
MultiSpectralImageType::PixelType pixelRef;
//Pass the parameters to filter
//depending on input image's spectral bands.
switch (nbBands) {
case 1: {
pixelRef.SetSize(1);
pixelRef[0] = refPixel0_.get();
break;
}
case 2: {
pixelRef.SetSize(2);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
break;
}
case 3: {
pixelRef.SetSize(3);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
break;
}
case 4: {
pixelRef.SetSize(4);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
pixelRef[3] = refPixel3_.get();
break;
}
case 5: {
pixelRef.SetSize(5);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
pixelRef[3] = refPixel3_.get();
pixelRef[4] = refPixel4_.get();
break;
}
case 6: {
pixelRef.SetSize(6);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
pixelRef[3] = refPixel3_.get();
pixelRef[4] = refPixel4_.get();
pixelRef[5] = refPixel5_.get();
break;
}
case 7: {
pixelRef.SetSize(7);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
pixelRef[3] = refPixel3_.get();
pixelRef[4] = refPixel4_.get();
pixelRef[5] = refPixel5_.get();
pixelRef[6] = refPixel6_.get();
break;
}
case 8: {
pixelRef.SetSize(8);
pixelRef[0] = refPixel0_.get();
pixelRef[1] = refPixel1_.get();
pixelRef[2] = refPixel2_.get();
pixelRef[3] = refPixel3_.get();
pixelRef[4] = refPixel4_.get();
pixelRef[5] = refPixel5_.get();
pixelRef[6] = refPixel6_.get();
pixelRef[7] = refPixel7_.get();
break;
}
}
filter->SetInput(inPort_.getData());
filter->SetReferencePixel(pixelRef);
filter->UpdateLargestPossibleRegion();
filter->Update();
outPort_.setData(filter->GetOutput());
LINFO("Spectral Angle Distance Image Filter Connected!");
}
catch (int e)
{
LERROR("Error in Spectral Angle Distance Image Filter");
return;
}
}
示例4: LINFO
void FilteringForwarder::forward(const RuntimeStatus & s){
LINFO("RC") << "forward status" << LE;
wrappedForwarder->forward(s);
}
示例5: JEVOIS_TRACE
// ##############################################################################################################
void jevois::Camera::setFormat(jevois::VideoMapping const & m)
{
JEVOIS_TRACE(2);
JEVOIS_TIMED_LOCK(itsMtx);
// Get current format:
itsFormat.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
XIOCTL(itsFd, VIDIOC_G_FMT, &itsFormat);
// Set desired format:
itsFormat.fmt.pix.width = m.cw;
itsFormat.fmt.pix.height = m.ch;
itsFormat.fmt.pix.pixelformat = m.cfmt;
itsFormat.fmt.pix.field = V4L2_FIELD_NONE;
itsFps = m.cfps;
LDEBUG("Requesting video format " << itsFormat.fmt.pix.width << 'x' << itsFormat.fmt.pix.height << ' ' <<
jevois::fccstr(itsFormat.fmt.pix.pixelformat));
XIOCTL(itsFd, VIDIOC_S_FMT, &itsFormat);
// Get the format back as the driver may have adjusted some sizes, etc:
XIOCTL(itsFd, VIDIOC_G_FMT, &itsFormat);
// The driver returns a different format code, may be the mbus code instead of the v4l2 fcc...
itsFormat.fmt.pix.pixelformat = v4l2sunxiFix(itsFormat.fmt.pix.pixelformat);
LINFO("Camera set video format to " << itsFormat.fmt.pix.width << 'x' << itsFormat.fmt.pix.height << ' ' <<
jevois::fccstr(itsFormat.fmt.pix.pixelformat));
// Because modules may rely on the exact format that they request, throw if the camera modified it:
if (itsFormat.fmt.pix.width != m.cw || itsFormat.fmt.pix.height != m.ch || itsFormat.fmt.pix.pixelformat != m.cfmt)
LFATAL("Camera did not accept the requested video format as specified");
// Reset cropping parameters. NOTE: just open()'ing the device does not reset it, according to the unix toolchain
// philosophy. Hence, although here we do not provide support for cropping, we still need to ensure that it is
// properly reset. Note that some cameras do not support this so here we swallow that exception:
try
{
struct v4l2_cropcap cropcap = { };
cropcap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
XIOCTL_QUIET(itsFd, VIDIOC_CROPCAP, &cropcap);
struct v4l2_crop crop = { };
crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; crop.c = cropcap.defrect;
XIOCTL_QUIET(itsFd, VIDIOC_S_CROP, &crop);
LDEBUG("Set cropping rectangle to " << cropcap.defrect.width << 'x' << cropcap.defrect.height << " @ ("
<< cropcap.defrect.left << ", " << cropcap.defrect.top << ')');
}
catch (...) { LDEBUG("Querying/setting crop rectangle not supported"); }
// Set frame rate:
try
{
struct v4l2_streamparm parms = { };
parms.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
parms.parm.capture.timeperframe = jevois::VideoMapping::fpsToV4l2(m.cfps);
parms.parm.capture.capturemode = 2; // V4L2_MODE_VIDEO not defined in our headers? its value is 2.
XIOCTL(itsFd, VIDIOC_S_PARM, &parms);
LDEBUG("Set framerate to " << m.cfps << " fps");
}
catch (...) { LERROR("Setting frame rate to " << m.cfps << " fps failed -- IGNORED"); }
}
示例6: setupCases
// ######################################################################
// open a testing file containing images and corresponding ground truth
void setupCases(std::string folder, std::string fname, bool equalize)
{
char comment[200];
FILE *fp;
char inLine[100];
// open a file that lists the sample with ground truth
std::string name = folder + fname;
if((fp = fopen(name.c_str(),"rb")) == NULL)
{
LINFO("samples file: %s not found", name.c_str());
// input and output vector
out.resize(0);
in.resize(0);
nSamples = 0;
return;
}
LINFO("tName: %s",name.c_str());
// get number of samples
if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed");
sscanf(inLine, "%d %s", &nSamples, comment);
// the number of categories -> has to agree with the training file
uint tNout;
if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed");
sscanf(inLine, "%d %s", &tNout, comment);
if(tNout != info->nOutput)
LFATAL("Num categories differ: %d != %d", tNout, info->nOutput);
// get the type of ground truth
char gtOpt[100];
int gtType = -1;
if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed");
sscanf(inLine, "%s %s", gtOpt, comment);
if(strcmp(gtOpt,"ABSOLUTE") == 0)
gtType = ABSOLUTE;
else if(strcmp(gtOpt,"MIXTURE" ) == 0)
gtType = MIXTURE;
else
LFATAL("unknown ground truth type %s",gtOpt);
// set up the size input and output vector
out.resize(nSamples);
in.resize(nSamples);
// skip column headers
if (fgets(inLine, 1000, fp) == NULL) LFATAL("fgets failed");
char cName[100];
char sName[100];
char iName[100];
char ext[100];
int cStart, cNum;
int gTruth;
FILE *ifp;
int count = 0;
int tSamples = 0;
std::vector<uint> nSamples;
while(fgets(inLine, 1000, fp) != NULL)
{
if(gtType == ABSOLUTE)
{
// get the files in this category and ground truth
sscanf(inLine, "%s %d %d %d %s", cName, &cStart, &cNum, &gTruth, ext);
sprintf(sName,"%s%s", folder.c_str(), cName);
printf(" sName: %s %d %d %d %s\n",sName, cStart, cNum, gTruth, ext);
}
else if(gtType == MIXTURE)
{
// get the files in this category and ground truth
//char tStr[300];
//sscanf(inLine, "%s %d %d %s %s", cName, &cStart, &cNum, tStr, ext);
//sprintf(sName,"%s%s", folder, cName);
//printf(" sName: %s %d %d %d %s\n",sName, cStart, cNum, gTruth, ext);
// change to mixture values
LFATAL("MIXTURE ground truth type not yet implemented");
}
else LFATAL("unknown ground truth type %s",gtOpt);
nSamples.push_back(cNum);
// go through every sample
for(int j = cStart; j < cStart+cNum; j++)
{
tSamples++;
// get the corresponding vector file (if exist)
sprintf(iName,"%s%06d%s", sName,j,ext);
// open the file
if((ifp = fopen(iName,"rb")) != NULL)
{
Image<double> tData(1,info->oriFeatSize, NO_INIT);
Image<double>::iterator aptr = tData.beginw();
//.........这里部分代码省略.........
示例7: main
//.........这里部分代码省略.........
// do post-command-line configs:
nub::soft_ref<FrameIstream> gb = gbc->getFrameGrabber();
if (gb.isInvalid())
LFATAL("You need to select a frame grabber type via the "
"--fg-type=XX command-line option for this program "
"to be useful");
int width = gb->getWidth(), height = gb->getHeight();
float delay = 0;
// let's get all our ModelComponent instances started:
manager.start();
XWindow wini(Dims(width, height), 0, 0, "test-input window");
XWindow wino1(Dims(width/4, height/4), 0, 0, "test-output window 1");
XWindow wino2(Dims(width/4, height/4), 0, 0, "test-output window 2");
XWindow winAux1(Dims(100, 450), 0, 0, "HSV levels 1");
XWindow winAux2(Dims(100, 450), 0, 0, "HSV levels 2");
Timer tim; Image< PixRGB<byte> > ima; Image< PixRGB<float> > fima;
Image< PixRGB<byte> > display;
Timer camPause; // to pause the move command
camPause.reset();
uint64 t[NAVG]; int frame = 0;
segmentImageMerge segmenter(2);
// set up tracking parameters
//segmenter.setTrackColor(10,10,0.15,0.20,150,150,0,true,15);
segmenter.setTrackColor(13,7,0.17,0.3,156,30,0,true,15);
//segmenter.setTrackColor(10,10,0.15,0.20,150,150,1,false,15);
segmenter.setTrackColor(270,10,0.18,0.25,60,60,1,true,15);
segmenter.setAdaptBound(20,5,.30,.15,170,100,0);
//segmenter.setAdaptBound(15,5,.30,.25,140,100,0);
segmenter.setAdaptBound(285,265,.25,.15,80,40,1);
segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,0);
segmenter.setFrame(0,0,width/4,height/4,width/4,height/4,1);
segmenter.setCircleColor(0,255,0,0);
segmenter.setCircleColor(0,0,255,1);
segmenter.setBoxColor(255,255,0,0);
segmenter.setBoxColor(255,0,255,1);
segmenter.setAdapt(3,true,3,true,3,true,0);
segmenter.setAdapt(3,true,3,true,3,true,1);
while(1) {
tim.reset();
ima = gb->readRGB();
uint64 t0 = tim.get(); // to measure display time
Image<PixRGB<byte> > Aux1;
Image<PixRGB<byte> > Aux2;
Aux1.resize(100,450,true);
Aux2.resize(100,450,true);
Image<byte> outputI1;
Image<byte> outputI2;
display = ima;
segmenter.trackImage(ima,&display,0,&Aux1);
segmenter.trackImage(ima,&display,1,&Aux2);
segmenter.mergeImages(&display);
if(camPause.get() > delay)
{
int modi,modj;
segmenter.getImageTrackXY(&modi,&modj,0);
//segmenter.getImageTrackXYMerge(&modi,&modj);
modi = modi*8;
modj = 480-modj*8;
if(modi > 0 && modi < 640 && modj > 0 && modj < 480)
{
if(segmenter.returnLOT(0) == false)
{
camPause.reset();
delay = camera->moveCamXYFrame(modi,modj);
}
}
}
Image<byte> temp1 = segmenter.returnCandidateImage(0);
Image<byte> temp2 = segmenter.returnCandidateImage(1);
wini.drawImage(display);
//wino1.drawImage(outputI1);
wino1.drawImage(temp1);
wino2.drawImage(temp2);
winAux1.drawImage(Aux1);
winAux2.drawImage(Aux2);
t[frame % NAVG] = tim.get();
t0 = t[frame % NAVG] - t0;
if (t0 > 28) LINFO("Display took %llums", t0);
// compute and show framerate over the last NAVG frames:
if (frame % NAVG == 0 && frame > 0)
{
uint64 avg = 0; for (int i = 0; i < NAVG; i ++) avg += t[i];
float avg2 = 1000.0 / (float)avg * NAVG;
printf("Framerate: %.1f fps\n", avg2);
}
frame ++;
}
manager.stop();
return 0;
}
示例8: glViewport
void CanvasRenderer::process() {
if (!canvas_)
return;
canvas_->getGLFocus();
glViewport(0, 0, canvas_->getSize().x, canvas_->getSize().y);
if (inport_.isReady()) {
// render inport to image, if renderToImage flag has been set
if (renderToImage_) {
try {
renderInportToImage(renderToImageFilename_);
LINFO("Saved rendering with dimensions " << inport_.getSize() << " to file: " << tgt::FileSystem::cleanupPath(renderToImageFilename_));
}
catch (std::bad_alloc& /*e*/) {
LERROR("Exception in CanvasRenderer::renderInportToImage(): bad allocation (" << getID() << ")");
renderToImageError_ = "Not enough system memory (bad allocation)";
}
catch (VoreenException& e) {
LERROR(e.what());
renderToImageError_ = std::string(e.what());
}
catch (std::exception& e) {
LERROR("Exception in CanvasRenderer::renderInportToImage(): " << e.what() << " (" << getID() << ")");
renderToImageError_ = std::string(e.what());
}
renderToImage_ = false;
}
// map texture of input target onto a screen-aligned quad
else {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
// activate shader
shader_->activate();
// set common uniforms
setGlobalShaderParameters(shader_);
// manually pass the viewport dimensions to the shader,
// since setGlobalShaderParameters() expects a render outport, which we do not have
shader_->setIgnoreUniformLocationError(true);
shader_->setUniform("screenDim_", tgt::vec2(canvas_->getSize()));
shader_->setUniform("screenDimRCP_", 1.f / tgt::vec2(canvas_->getSize()));
shader_->setIgnoreUniformLocationError(false);
// bind input textures
inport_.bindTextures(GL_TEXTURE0, GL_TEXTURE1);
// pass texture parameters to the shader
shader_->setUniform("colorTex_", 0);
shader_->setUniform("depthTex_", 1);
inport_.setTextureParameters(shader_, "texParams_");
LGL_ERROR;
// execute the shader
renderQuad();
shader_->deactivate();
LGL_ERROR;
}
}
else {
// render error texture
if (!errorTex_) {
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
return;
}
glClear(GL_DEPTH_BUFFER_BIT);
glActiveTexture(GL_TEXTURE0);
errorTex_->bind();
errorTex_->enable();
glColor3f(1.f, 1.f, 1.f);
renderQuad();
errorTex_->disable();
}
glActiveTexture(GL_TEXTURE0);
LGL_ERROR;
}
示例9: LFATAL
// ######################################################################
void XCgrabberFlex::start1()
{
#ifndef HAVE_XCLIB
LFATAL("you must have XC support and the xclib library in order to use XCgrabberFlex");
#else
// open the XC cameralink imaging board
int i;
if(!strcmp(itsFormatFile.getVal().c_str(),"noFile"))
{
LINFO("use default setup configure format");
char* format =(char*)("default");
i = xclib::xclib_open(&itsXclib, NULL,NULL, format, NULL);
}
else
{
LINFO("use input format file as configure file");
char* formatFile = (char*)(itsFormatFile.getVal().c_str());
i = xclib::xclib_open(&itsXclib, NULL, NULL, NULL, formatFile);
}
if(i != 0)
{
LINFO("error code %d\n", i);
LFATAL("can not open the XC camera");
}
switch(itsGrabMode.getVal())
{
case VIDFMT_BAYER_GB12: itsBitDepth = 12; break;
case VIDFMT_BAYER_GR12: itsBitDepth = 12; break;
case VIDFMT_BAYER_RG12: itsBitDepth = 12; break;
case VIDFMT_BAYER_BG12: itsBitDepth = 12; break;
case VIDFMT_BAYER_GB: itsBitDepth = 8; break;
case VIDFMT_BAYER_GR: itsBitDepth = 8; break;
case VIDFMT_BAYER_RG: itsBitDepth = 8; break;
case VIDFMT_BAYER_BG: itsBitDepth = 8; break;
default: LFATAL("ERROR in specify the xc grab mode");
}
// list basic camera info
struct xclib::pxdevinfo pxinfo;
memset(&pxinfo, 0, sizeof(pxinfo));
pxinfo.ddch.len = sizeof(pxinfo);
pxinfo.ddch.mos = PXMOS_DEVINFO;
itsXclib.pxdev.getDevInfo(&(itsXclib.pxdev), UNITMAP, 0, &pxinfo);
LINFO("find %d baords, frame buffer memory = %.4f Kbytes",
pxinfo.nunits,(double)pxinfo.memsize/1024);
//white balance
WhiteBalance();
struct xclib::pxlibservice pxlib = itsXclib.pxlib;
struct xclib::xcdevservice xcdev = itsXclib.xcdev;
// initialize pxvidstate
i = pxlib.allocStateCopy(&pxlib, 0, 0, &itsStatep);
LINFO("allocate state copy (video state), result code: %d", i);
i = pxlib.initStateCopy(&pxlib, 0, 0,
itsStatep, &pxinfo, (char*)("default"), PXMODE_DIGI);
LINFO("init state copy (video state), result code: %d",i);
pxlib.defineState(&pxlib, 0, itsStateid, itsStatep);
// itsStatep->vidres->x.vidoffset = 640;//1920/2-itsDims.getVal().w()/2;
//itsStatep->vidres->x.vidoffsend = 1920; //1920/2+itsDims.getVal().w()/2;
LINFO("pxvidimage dims = %d,%d\n",itsStatep->vidres->x.vidoffset,
itsStatep->vidres->x.vidoffsend);
//! show some info of pxvidstate structure
/*
LINFO("the pxvidimage bayerpattern: %d, %d, %d, %d, %d, %d\n",
itsStatep->vidimage->bp.order,
itsStatep->vidimage->bp.mode,
itsStatep->vidimage->bp.arg[0],
itsStatep->vidimage->bp.arg[1],
itsStatep->vidimage->bp.arg[2],
itsStatep->vidimage->bp.arg[3]);
LINFO("the pxvidimage colorspace: %d, %d, %d",
itsStatep->vidimage->cs.order,
itsStatep->vidimage->cs.mode,
(int)itsStatep->vidimage->cs.scale);
LINFO("the pxvid image whitebalance: %d, %d, %d",
itsStatep->vidimage->wb.order,
itsStatep->vidimage->wb.mode,
(int)itsStatep->vidimage->wb.gamma[0][0]);
LINFO("the pxvid image sharp :%d, %d, %d, %d, %d",
itsStatep->vidimage->sh.order,
itsStatep->vidimage->sh.mode,
itsStatep->vidimage->sh.scale,
itsStatep->vidimage->sh.into[0],
itsStatep->vidimage->sh.from[0]);
for(int i=0; i<6; i++)
for(int j=0; j<4; j++)
{
//.........这里部分代码省略.........
示例10: downscaleFancy
Image<PixRGB<T> > downscaleFancy(const Image<PixRGB<T> >& src,
int width, int height, int weighting_slope,
bool no_weight_black)
{
GVX_TRACE(__PRETTY_FUNCTION__);
PixRGB<T> pix(0);
Image<PixRGB<T> > buffer;
Image<PixRGB<T> > out;
Image<T> bufferWeight;
buffer.resize(width,height,true);
out.resize(width,height,true);
bufferWeight.resize(width,height,true);
T scalex = (T)width / (T)src.getWidth();
T scaley = (T)height / (T)src.getHeight();
T dx, dy, weight, xweight, yweight, xfrac, yfrac;
typename Image<PixRGB<T> >::iterator bufelem;
typename Image<T>::iterator bufw;
dx = (T)0.0;
dy = (T)0.0;
for (int sy = (int)0; sy < (int)src.getHeight();
sy++, dy += scaley)
{
// outer loop for Y axis
yfrac = dy - (T)floor(dy);
switch (weighting_slope)
{
case 5: yweight = yfrac < 0.5 ? 0 : 1;
break;
case 4: yweight = (T)(0.5 + 0.5*tanh(15*(yfrac - 0.5)));
break;
case 3: yweight = (T)(0.5 + 0.5*tanh(8*(yfrac - 0.5)));
break;
case 2: yweight = (T)(0.5 + 0.5*tanh(5*(yfrac - 0.5)));
break;
case 1: yweight = (T)(0.5 - 0.5*cos(yfrac*M_PI));
break;
case 0: yweight = yfrac;
break;
default : LERROR("illegal weighting slope");
yweight = yfrac;
}
// inner loop for X axis
dx = (T)0;
for (int sx = (int)0; sx < (int)src.getWidth();
sx++, dx += scalex)
{
//LINFO("X %d",sx);
xfrac = dx - (T)floor(dx);
switch (weighting_slope)
{
case 5: xweight = xfrac < 0.5 ? 0 : 1;
break;
case 4: xweight = (T)(0.5 + 0.5*tanh(15*(xfrac - 0.5)));
break;
case 3: xweight = (T)(0.5 + 0.5*tanh(8*(xfrac - 0.5)));
break;
case 2: xweight = (T)(0.5 + 0.5*tanh(5*(xfrac - 0.5)));
break;
case 1: xweight = (T)(0.5 - 0.5*cos(xfrac*M_PI));
/*almost same as tanh(4*x)*/
break;
case 0: xweight = xfrac;
break;
default : LINFO("illegal weighting slope");
xweight = xfrac;
}
//LINFO("XWEIGHT %f",xweight);
int floordx = (int)floor((T)dx);
int floordy = (int)floor((T)dy);
const PixRGB<T> *in_sy_sx = &src.getVal(sx,sy);
if (no_weight_black)
if (in_sy_sx->red() == 0 &&
in_sy_sx->green() == 0 &&
in_sy_sx->blue() == 0)
continue;
bufelem = buffer.beginw()
+ buffer.getWidth()*floordy + floordx;
bufw = bufferWeight.beginw()
+ bufferWeight.getWidth()*floordy + floordx;
ADD_RGB(bufelem, ((T)1.0-xweight)*((T)1.0-yweight), bufw, in_sy_sx);
if (dx < width - 1)
{
bufelem++; bufw++;
ADD_RGB(bufelem, xweight*((T)1.0-yweight), bufw, in_sy_sx);
}
if (dy < height - 1)
{
bufelem = buffer.beginw()
+ buffer.getWidth()*(floordy+1) + floordx;
//.........这里部分代码省略.........
示例11: LINFO
// ######################################################################
void GistEstimatorGen::getFeatureVector(rutz::shared_ptr<ChannelMaps> chanMaps)
{
//! first get the gist feature size and allocate the gist vector size
int sz = 0, sz_cs=0, sz_nocs = 0;
if(itsUseCS.getVal() == 1 || itsUseCS.getVal() == 2)
sz_cs += chanMaps->numSubmaps();
// sz_nocs is the number of how many raw pyramid types
if(itsUseCS.getVal() == 0 || itsUseCS.getVal() == 2)
for(uint i=0; i < chanMaps->numSubchans(); i++)
{
rutz::shared_ptr<ChannelMaps> currChan = chanMaps->subChanMaps(i);
if(currChan->numSubchans() == 0)
sz_nocs++;
else
sz_nocs += currChan->numSubchans();
}
sz_nocs *= PYR_LEVEL;
sz = sz_cs + sz_nocs;
LINFO("there are in total %4d gist feature chans", sz);
itsGistVector.resize(1,NUM_GIST_FEAT * sz, NO_INIT);
int count = 0;
//! get the center-surround feature values
if(itsUseCS.getVal() == 1 || itsUseCS.getVal() == 2)
for(int i = 0; i<sz_cs; i++)
{
inplacePaste(itsGistVector,getSubSumGen(chanMaps->getRawCSmap(i)),
Point2D<int>(0, count*NUM_GIST_FEAT));
count++;
}
//! get the non center-surround feature values
if(itsUseCS.getVal() == 0 || itsUseCS.getVal() == 2)
for(uint i=0; i<chanMaps->numSubchans(); i++)
{
rutz::shared_ptr<ChannelMaps> currChan = chanMaps->subChanMaps(i);
if(currChan->numSubchans() == 0)
{
ASSERT(currChan->hasPyramid());
for(uint j=0; j<PYR_LEVEL; j++)
{
inplacePaste(itsGistVector,getSubSumGen
(currChan->getPyramid().getImage(j)),
Point2D<int>(0,count*NUM_GIST_FEAT));
count++;
}
}
else
{
for(uint i=0; i<currChan->numSubchans(); i++)
{
rutz::shared_ptr<ChannelMaps> currSubChan = currChan->subChanMaps(i);
ASSERT(currSubChan->hasPyramid());
for(uint j=0; j<PYR_LEVEL; j++)
{
inplacePaste(itsGistVector,getSubSumGen
(currSubChan->getPyramid().getImage(j)),
Point2D<int>(0,count*NUM_GIST_FEAT));
count++;
}
}
}
}
ASSERT(count == sz);
itsGistSize = sz;
}
示例12: fsg_lun_open
static int fsg_lun_open(struct fsg_lun *curlun, const char *filename)
{
int ro;
struct file *filp = NULL;
int rc = -EINVAL;
struct inode *inode = NULL;
struct backing_dev_info *bdi;
loff_t size;
loff_t num_sectors;
loff_t min_sectors;
unsigned int blkbits;
unsigned int blksize;
/* R/W if we can, R/O if we must */
ro = curlun->initially_ro;
if (!ro) {
filp = filp_open(filename, O_RDWR | O_LARGEFILE, 0);
if (PTR_ERR(filp) == -EROFS || PTR_ERR(filp) == -EACCES)
ro = 1;
}
if (ro)
filp = filp_open(filename, O_RDONLY | O_LARGEFILE, 0);
if (IS_ERR(filp)) {
LINFO(curlun, "unable to open backing file: %s\n", filename);
return PTR_ERR(filp);
}
if (!(filp->f_mode & FMODE_WRITE))
ro = 1;
inode = file_inode(filp);
if ((!S_ISREG(inode->i_mode) && !S_ISBLK(inode->i_mode))) {
LINFO(curlun, "invalid file type: %s\n", filename);
goto out;
}
/*
* If we can't read the file, it's no good.
* If we can't write the file, use it read-only.
*/
if (!(filp->f_op->read || filp->f_op->aio_read)) {
LINFO(curlun, "file not readable: %s\n", filename);
goto out;
}
if (!(filp->f_op->write || filp->f_op->aio_write))
ro = 1;
size = i_size_read(inode->i_mapping->host);
if (size < 0) {
LINFO(curlun, "unable to find file size: %s\n", filename);
rc = (int) size;
goto out;
}
if (curlun->cdrom) {
blksize = 2048;
blkbits = 11;
} else if (inode->i_bdev) {
blksize = bdev_logical_block_size(inode->i_bdev);
blkbits = blksize_bits(blksize);
bdi = &inode->i_bdev->bd_queue->backing_dev_info;
if (bdi->capabilities & BDI_CAP_STRICTLIMIT) {
curlun->max_ratio = bdi->max_ratio;
curlun->nofua = 1;
if (bdi_set_max_ratio(bdi, uicc_ums_max_ratio))
pr_debug("%s, error in setting max_ratio\n",
__func__);
}
} else {
blksize = 512;
blkbits = 9;
}
num_sectors = size >> blkbits; /* File size in logic-block-size blocks */
min_sectors = 1;
if (curlun->cdrom) {
min_sectors = 300; /* Smallest track is 300 frames */
if (num_sectors >= 256*60*75) {
num_sectors = 256*60*75 - 1;
LINFO(curlun, "file too big: %s\n", filename);
LINFO(curlun, "using only first %d blocks\n",
(int) num_sectors);
}
}
if (num_sectors < min_sectors) {
LINFO(curlun, "file too small: %s\n", filename);
rc = -ETOOSMALL;
goto out;
}
if (fsg_lun_is_open(curlun))
fsg_lun_close(curlun);
curlun->blksize = blksize;
curlun->blkbits = blkbits;
curlun->ro = ro;
curlun->filp = filp;
curlun->file_length = size;
//.........这里部分代码省略.........
示例13: clear
bool SceneGraph::loadFromFile(const std::string& sceneDescription) {
clear(); // Move this to a later stage to retain a proper scenegraph when the loading fails ---abock
std::string absSceneFile = absPath(sceneDescription);
// See if scene file exists
if (!FileSys.fileExists(absSceneFile, true)) {
LERROR("Could not load scene file '" << absSceneFile << "'. " <<
"File not found");
return false;
}
LINFO("Loading SceneGraph from file '" << absSceneFile << "'");
// Load dictionary
ghoul::Dictionary sceneDictionary;
try {
ghoul::lua::loadDictionaryFromFile(absSceneFile, sceneDictionary);
}
catch (...) {
return false;
}
std::string sceneDescriptionDirectory =
ghoul::filesystem::File(absSceneFile, true).directoryName();
std::string sceneDirectory(".");
sceneDictionary.getValue(KeyPathScene, sceneDirectory);
// The scene path could either be an absolute or relative path to the description
// paths directory
std::string relativeCandidate = sceneDescriptionDirectory +
ghoul::filesystem::FileSystem::PathSeparator + sceneDirectory;
std::string absoluteCandidate = absPath(sceneDirectory);
if (FileSys.directoryExists(relativeCandidate))
sceneDirectory = relativeCandidate;
else if (FileSys.directoryExists(absoluteCandidate))
sceneDirectory = absoluteCandidate;
else {
LERROR("The '" << KeyPathScene << "' pointed to a "
"path '" << sceneDirectory << "' that did not exist");
return false;
}
ghoul::Dictionary moduleDictionary;
bool success = sceneDictionary.getValue(KeyModules, moduleDictionary);
if (!success)
// There are no modules that are loaded
return true;
lua_State* state = ghoul::lua::createNewLuaState();
OsEng.scriptEngine().initializeLuaState(state);
// Get the common directory
bool commonFolderSpecified = sceneDictionary.hasKey(KeyCommonFolder);
bool commonFolderCorrectType = sceneDictionary.hasKeyAndValue<std::string>(KeyCommonFolder);
if (commonFolderSpecified) {
if (commonFolderCorrectType) {
std::string commonFolder = sceneDictionary.value<std::string>(KeyCommonFolder);
std::string fullCommonFolder = FileSys.pathByAppendingComponent(
sceneDirectory,
commonFolder
);
if (!FileSys.directoryExists(fullCommonFolder))
LERROR("Specified common folder '" << fullCommonFolder << "' did not exist");
else {
if (!commonFolder.empty()) {
FileSys.registerPathToken(_commonModuleToken, commonFolder);
size_t nKeys = moduleDictionary.size();
moduleDictionary.setValue(std::to_string(nKeys + 1), commonFolder);
}
}
}
else
LERROR("Specification for 'common' folder has invalid type");
}
std::vector<std::string> keys = moduleDictionary.keys();
std::map<std::string, std::vector<std::string>> dependencies;
std::map<std::string, std::string> parents;
_rootNode = new SceneGraphNode;
_rootNode->setName(SceneGraphNode::RootNodeName);
SceneGraphNodeInternal* internalRoot = new SceneGraphNodeInternal;
internalRoot->node = _rootNode;
_nodes.push_back(internalRoot);
std::sort(keys.begin(), keys.end());
ghoul::filesystem::Directory oldDirectory = FileSys.currentDirectory();
for (const std::string& key : keys) {
std::string moduleName = moduleDictionary.value<std::string>(key);
std::string modulePath = FileSys.pathByAppendingComponent(sceneDirectory, moduleName);
if (!FileSys.directoryExists(modulePath)) {
LERROR("Could not load module '" << moduleName << "'. Directory did not exist");
continue;
}
std::string moduleFile = FileSys.pathByAppendingComponent(
//.........这里部分代码省略.........
示例14: LINFO
// ######################################################################
void SimulationViewerSurpCont::saveResults(const nub::ref<FrameOstream>& ofs)
{
// update our internal time:
double msecs = itsCurrTime.msecs();
LINFO("Running Surprise Control on Sample Input time %f ms",msecs);
LFATAL("FIXME");
//// itsScaleSurpriseControl.SSCprocessFrame(itsBrain);
LINFO("Saving Surprise Control Output");
Image<PixRGB<byte> > bimage;
Image<PixRGB<float> > outImage = itsScaleSurpriseControl.SSCgetFrame();
bimage = outImage;
ofs->writeRGB(bimage, "SSC", FrameInfo("ScaleSurpriseControl final image",
SRC_POS));
Image<PixRGB<float> > diffImage =
itsScaleSurpriseControl.SSCgetDiffImage(false);
bimage = diffImage;
ofs->writeRGB(bimage, "SSC-diff",
FrameInfo("ScaleSurpriseControl diff image",SRC_POS));
diffImage = itsScaleSurpriseControl.SSCgetDiffImage(true);
bimage = diffImage;
ofs->writeRGB(bimage, "SSC-diff-norm",
FrameInfo("ScaleSurpriseControl diff image normalized",SRC_POS));
if(itsDrawDiffParts.getVal())
{
std::vector<Image<PixRGB<float> > > diffParts =
itsScaleSurpriseControl.SSCgetDiffParts();
std::vector<Image<PixRGB<float> > >::const_iterator diffPartsItr =
diffParts.begin();
ushort type = 0;
while(diffPartsItr != diffParts.end())
{
bimage = *diffPartsItr;
char name[100];
if(type == 0)
sprintf(name,"SSC-diffParts-H1-");
else if(type == 1)
sprintf(name,"SSC-diffParts-H2-");
else if(type == 2)
sprintf(name,"SSC-diffParts-S-");
else if(type == 3)
sprintf(name,"SSC-diffParts-V-");
else
sprintf(name,"SSC-diffParts-%d-",type);
std::string prefix = name;
std::string frameInfo = "ScaleSurpriseControl difference ";
frameInfo = frameInfo + prefix;
ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS));
++diffPartsItr; type++;
}
}
if(itsDrawBetaParts.getVal())
{
std::vector<Image<float> > betaParts =
itsScaleSurpriseControl.SSCgetBetaParts(false);
std::vector<Image<float> >::const_iterator betaPartsItr =
betaParts.begin();
ushort type = 0;
while(betaPartsItr != betaParts.end())
{
bimage = *betaPartsItr;
char name[100];
sprintf(name,"SSC-betaParts-%s-",sc_channel_name_abv[type].c_str());
std::string prefix = name;
std::string frameInfo = "ScaleSurpriseControl beta ";
frameInfo = frameInfo + prefix;
ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS));
++betaPartsItr; type++;
}
betaParts = itsScaleSurpriseControl.SSCgetBetaParts(true);
betaPartsItr = betaParts.begin();
type = 0;
while(betaPartsItr != betaParts.end())
{
bimage = *betaPartsItr;
char name[100];
sprintf(name,"SSC-betaParts-norm-%s-",sc_channel_name_abv[type].c_str());
std::string prefix = name;
std::string frameInfo = "ScaleSurpriseControl beta norm";
frameInfo = frameInfo + prefix;
ofs->writeRGB(bimage, prefix, FrameInfo(frameInfo,SRC_POS));
++betaPartsItr; type++;
}
}
if(itsDrawBiasParts.getVal())
//.........这里部分代码省略.........
示例15: LINFO
bool LocalErrorHistogramManager::buildHistograms(int numBins) {
LINFO("Build histograms with " << numBins << " bins each");
_numBins = numBins;
_file = &(_tsp->file());
if (!_file->is_open()) {
return false;
}
_minBin = 0.0; // Should be calculated from tsp file
_maxBin = 1.0; // Should be calculated from tsp file as (maxValue - minValue)
unsigned int numOtLevels = _tsp->numOTLevels();
unsigned int numOtLeaves = pow(8, numOtLevels - 1);
unsigned int numBstLeaves = pow(2, _tsp->numBSTLevels() - 1);
_numInnerNodes = _tsp->numTotalNodes() - numOtLeaves * numBstLeaves;
_spatialHistograms = std::vector<Histogram>(_numInnerNodes);
_temporalHistograms = std::vector<Histogram>(_numInnerNodes);
for (unsigned int i = 0; i < _numInnerNodes; i++) {
_spatialHistograms[i] = Histogram(_minBin, _maxBin, numBins);
_temporalHistograms[i] = Histogram(_minBin, _maxBin, numBins);
}
// All TSP Leaves
int numOtNodes = _tsp->numOTNodes();
int otOffset = (pow(8, numOtLevels - 1) - 1) / 7;
int numBstNodes = _tsp->numBSTNodes();
int bstOffset = numBstNodes / 2;
int numberOfLeaves = numOtLeaves * numBstLeaves;
LINFO("Building spatial histograms");
ProgressBar pb1(numberOfLeaves);
int processedLeaves = 0;
pb1.print(processedLeaves);
bool success = true;
for (int bst = bstOffset; bst < numBstNodes; bst++) {
for (int ot = otOffset; ot < numOtNodes; ot++) {
success &= buildFromOctreeChild(bst, ot);
if (!success) LERROR("Failed in buildFromOctreeChild");
if (!success) return false;
pb1.print(processedLeaves++);
}
}
//pb1.stop();
LINFO("Building temporal histograms");
ProgressBar pb2(numberOfLeaves);
processedLeaves = 0;
pb2.print(processedLeaves);
for (int ot = otOffset; ot < numOtNodes; ot++) {
for (int bst = bstOffset; bst < numBstNodes; bst++) {
success &= buildFromBstChild(bst, ot);
if (!success) LERROR("Failed in buildFromBstChild");
if (!success) return false;
pb2.print(processedLeaves++);
}
}
//pb2.stop();
return success;
}