本文整理汇总了C++中ImageOf类的典型用法代码示例。如果您正苦于以下问题:C++ ImageOf类的具体用法?C++ ImageOf怎么用?C++ ImageOf使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageOf类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: readDepth
bool KinectDriverOpenNI::readDepth(ImageOf<PixelMono16> &depth, double ×tamp)
{
const XnDepthPixel* pDepthMap = depthGenerator.GetDepthMap();
int ts=(int)depthGenerator.GetTimestamp();
timestamp=(double)ts/1000.0;
SceneMetaData smd;
userGenerator.GetUserPixels(0,smd);
for (int y=0; y<this->def_depth_height; y++)
{
for(int x=0;x<this->def_depth_width;x++)
{
int player=(smd[y * this->def_depth_width + x]);
int depth=(pDepthMap[y * this->def_depth_width + x]);
int finalValue=0;
finalValue|=((depth<<3)&0XFFF8);
finalValue|=(player&0X0007);
//if (x==320 && y==240)
// fprintf(stdout, "%d %d\n", ((finalValue&0XFFF8)>>3), finalValue&0X0007);
//We associate the depth to the first 13 bits, using the last 3 for the player identification
depthMat->data.s[y * this->def_depth_width + x ]=finalValue;
}
}
if (device==KINECT_TAGS_DEVICE_KINECT)
{
cvGetImage(depthMat,depthTmp);
resizeImage(depthTmp,depthImage);
}
else
cvGetImage(depthMat,depthImage);
depth.wrapIplImage(depthImage);
return true;
}
示例2: ImageReadMono
static bool ImageReadMono(ImageOf<PixelMono> &img, const char *filename)
{
int width, height, color, num;
FILE *fp=0;
fp = fopen(filename, "rb");
if(fp==0)
{
fprintf(stderr, "Error opening %s, check if file exists.\n", filename);
return false;
}
if (!ReadHeader(fp, &height, &width, &color))
{
fclose (fp);
fprintf(stderr, "Error reading header, is file a valid ppm/pgm?\n");
return false;
}
if (color)
{
fclose(fp);
fprintf(stderr, "File is color, conversion not yet supported\n");
return false;
}
img.resize(width,height);
const int w = img.width() * img.getPixelSize();
const int h = img.height();
const int pad = img.getRowSize();
unsigned char *dst = img.getRawImage ();
num = 0;
for (int i = 0; i < h; i++)
{
num += (int)fread((void *) dst, 1, (size_t) w, fp);
dst += pad;
}
fclose(fp);
return true;
}
示例3: testCopy
void testCopy() {
report(0,"testing image copying...");
ImageOf<PixelRgb> img1;
img1.resize(128,64);
for (int x=0; x<img1.width(); x++) {
for (int y=0; y<img1.height(); y++) {
PixelRgb& pixel = img1.pixel(x,y);
pixel.r = x;
pixel.g = y;
pixel.b = 42;
}
}
ImageOf<PixelRgb> result;
result.copy(img1);
checkEqual(img1.width(),result.width(),"width check");
checkEqual(img1.height(),result.height(),"height check");
if (img1.width()==result.width() &&
img1.height()==result.height()) {
int mismatch = 0;
for (int x=0; x<img1.width(); x++) {
for (int y=0; y<img1.height(); y++) {
PixelRgb& pix0 = img1.pixel(x,y);
PixelRgb& pix1 = result.pixel(x,y);
if (pix0.r!=pix1.r ||
pix0.g!=pix1.g ||
pix0.b!=pix1.b) {
mismatch++;
}
}
}
checkTrue(mismatch==0,"pixel match check");
}
}
示例4: testCast
void testCast() {
report(0,"testing image casting...");
ImageOf<PixelRgb> img1;
img1.resize(128,64);
for (int x=0; x<img1.width(); x++) {
for (int y=0; y<img1.height(); y++) {
PixelRgb& pixel = img1.pixel(x,y);
unsigned char v = x%30;
pixel.r = v;
pixel.g = v;
pixel.b = v;
}
}
ImageOf<PixelMono> result;
result.copy(img1);
checkEqual(img1.width(),result.width(),"width check");
checkEqual(img1.height(),result.height(),"height check");
if (img1.width()==result.width() &&
img1.height()==result.height()) {
int mismatch = 0;
for (int x=0; x<img1.width(); x++) {
for (int y=0; y<img1.height(); y++) {
PixelRgb& pix0 = img1.pixel(x,y);
PixelMono& pix1 = result.pixel(x,y);
if (pix0.r>pix1+1 || pix0.r<pix1-1) {
mismatch++;
}
}
}
checkTrue(mismatch==0,"pixel match check");
}
}
示例5: glReadPixels
bool OdeSdlSimulation::getImage(ImageOf<PixelRgb>& target) {
int w = cameraSizeWidth;
int h = cameraSizeHeight;
int p = 3;
char *buf=new char[w * h * p];
glReadPixels( 0, 0, w, h, GL_RGB, GL_UNSIGNED_BYTE, buf);
ImageOf<PixelRgb> img;
img.setQuantum(1);
img.setExternal(buf,w,h);
// inefficient flip!
target.resize(img);
int ww = img.width();
int hh = img.height();
for (int x=0; x<ww; x++) {
for (int y=0; y<hh; y++) {
target(x,y) = img(x,hh-1-y);
}
}
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
delete[] buf;
return true;
}
示例6: onRead
void CalibModule::onRead(ImageOf<PixelMono> &imgIn)
{
mutex.lock();
Vector kinPoint,o;
iarm->getPose(kinPoint,o);
Vector c,tipl(2,0.0),tipr(2,0.0);
igaze->get2DPixel(0,kinPoint,c);
ImageOf<PixelBgr> imgOut;
cv::Rect rect=extractFingerTip(imgIn,imgOut,c,tipl);
bool holdImg=false;
if (motorExplorationState==motorExplorationStateLog)
{
cv::Scalar color=cv::Scalar(0,0,255);
string tag="discarded";
if (enabled && (depthRpcPort.getOutputCount()>0))
{
Vector depthPoint;
if (getDepthAveraged(tipl,depthPoint,tipr))
{
// consistency check
double dist=norm(depthPoint-kinPoint);
if (dist<max_dist)
{
color=cv::Scalar(0,255,0);
tag="matched";
if (exp_depth2kin)
{
calibrator->addPoints(depthPoint,kinPoint);
yInfo("collecting calibration points: p_depth=(%s); p_kin=(%s);",
depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str());
tag="acquired";
}
if (exp_aligneyes)
{
Vector kinPoint4=kinPoint;
kinPoint4.push_back(1.0);
Vector xe,oe,kinPoint_e;
Matrix He;
igaze->getLeftEyePose(xe,oe);
He=axis2dcm(oe);
xe.push_back(1.0);
He.setCol(3,xe);
kinPoint_e=SE3inv(He)*kinPoint4;
aligner.addPoints(tipl,kinPoint_e);
yInfo("collecting points for aligning eye: tip=(%s); p_kin=(%s);",
tipl.toString(3,3).c_str(),kinPoint_e.toString(3,3).c_str());
tag="acquired";
}
}
else
yInfo("discarding calibration points: p_depth=(%s); p_kin=(%s); (|p_depth-p_kin|=%g)>%g",
depthPoint.toString(3,3).c_str(),kinPoint.toString(3,3).c_str(),dist,max_dist);
}
else
{
yInfo("unavailable depth; discarding...");
tag="no-depth";
}
}
cv::Mat img=cv::cvarrToMat((IplImage*)imgOut.getIplImage());
cv::putText(img,tag.c_str(),cv::Point(rect.x+5,rect.y+15),CV_FONT_HERSHEY_SIMPLEX,0.5,color);
motorExplorationState=motorExplorationStateTrigger;
holdImg=true;
}
if (depthOutPort.getOutputCount()>0)
{
depthOutPort.prepare()=imgOut;
depthOutPort.write();
if (holdImg)
Time::delay(0.5);
}
mutex.unlock();
}
示例7: SendPictureOCVNative
/*!
* Sends a picture in the OpenCV native format IplImage
*/
void SamgarModule::SendPictureOCVNative( IplImage* Image ) {
ImageOf<PixelBgr> yarpReturnImage;
yarpReturnImage.wrapIplImage( Image );
SendPictureYarpNative( yarpReturnImage );
}
示例8: main
int main() {
Network yarp; // set up yarp
BufferedPort<ImageOf<PixelRgb> > imagePort; // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo.
BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore
imagePort.open("/tutorial/image/in"); // give the port a name
targetPort.open("/tutorial/target/out");
//Network::connect("/icubSim/cam/left","/tutorial/image/in");
Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera
while (1) { // repeat forever
ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
if (image!=NULL) { // check we actually got something
printf("We got an image of size %dx%d\n", image->width(), image->height());
//pramod added nov 15
IplImage *frame_temp = (IplImage*)image->getIplImage();
IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 );
cvCvtColor(frame_temp,frame1, CV_RGB2BGR);
frame1->origin=0;
cvNamedWindow("cName",CV_WINDOW_AUTOSIZE);
cvShowImage("cName",frame1);
// cvWaitKey(0);
//pramod added ends
int nl= frame1->height; // number of lines
int nc= frame1->width * frame1->nChannels; // total number of element per line
int step= frame1->widthStep; // effective width
// get the pointer to the image buffer
unsigned char *data= reinterpret_cast<unsigned char*>(frame1->imageData);
int ct=0;double xmean=0.0;double ymean=0.0;
for (int i=1; i<nl; i++) {
for (int j=0; j<nc; j+= frame1->nChannels) {
// finding blue pixels ---------------------
//printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]);
if((int)data[j]>(int)data[j+1]&&(int)data[j]>(int)data[j+2])
{
// printf("%d %d %d \n",(int)data[j],(int)data[j+1],(int)data[j+2]);
//getchar();
ct++;
xmean += (double)j/3.0;
ymean += (double)i;
// data[j]=data[j+1]=data[j+2]=0;
data[j]=255;
}
// end of pixel processing ----------------
} // end of line
data+= step; // next line
}
cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE);
cvShowImage("cName1",frame1);
char c = cvWaitKey(33);
if(c==27) break;
if (ct>0) {
xmean /= (double)ct;
ymean /= (double)ct;
}
printf("current value of ct %d \n",ct);
if (ct>(frame1->width/20)*(frame1->height/20))
{
printf("Best guess at blue target: %f %f \n", xmean, ymean);
cvCircle(frame1,cvPoint((int)xmean,(int)ymean),5,cvScalar(0,0,255,0));
cvNamedWindow("cName1",CV_WINDOW_AUTOSIZE);
cvShowImage("cName1",frame1);
char c = cvWaitKey(33);
if(c==27) break;
Vector& target = targetPort.prepare();
target.resize(3);
target[0] = xmean;
target[1] = ymean;
target[2] = 1;
targetPort.write();
} else {
Vector& target = targetPort.prepare();
target.resize(3);
target[0] = 0;
target[1] = 0;
target[2] = 0;
targetPort.write();
}
/* double xMean = 0;
double yMean = 0;
int ct = 0;
for (int x=0; x<image->width(); x++) {
for (int y=0; y<image->height(); y++) {
PixelRgb& pixel = image->pixel(x,y); //definisco la variabile pixel (di tipo reference) un'istanza della classe PixelRgb. Allo stesso modo di una variabile puntatore, il tipo reference fa riferimento alla locazione di memoria di un'altra variabile (in questo caso la variabile image che punta al pixel(x,y), ma come una comune variabile, non richiede nessun operatore specifico di deindirizzamento.
// very simple test for blueishness
// make sure blue level exceeds red and green by a factor of 2
if (pixel.b>pixel.g){ //&& pixel.b>pixel.g){//*1.2+10) {
// there's a blueish pixel at (x,y)!
// let's find the average location of these pixels
xMean += x;
yMean += y;
//.........这里部分代码省略.........
示例9: shallowCopyImages
void RGBDSensorWrapper::shallowCopyImages(const ImageOf<PixelFloat>& src, ImageOf<PixelFloat>& dest)
{
dest.setQuantum(src.getQuantum());
dest.setExternal(src.getRawImage(), src.width(), src.height());
}
示例10: main
int main()
{
Network yarp; // set up yarp
BufferedPort<ImageOf<PixelRgb> > imagePort;
BufferedPort<Vector> targetPort;
imagePort.open("/tutorial/image/in");
targetPort.open("/tutorial/target/out");
//Network::connect("/icubSim/cam/left","/tutorial/image/in");
Network::connect("/icub/cam/left","/tutorial/image/in");
IplImage* object_color;
while (1)
{ // repeat forever
ImageOf<PixelRgb> *image = imagePort.read();
if (image!=NULL)
{
printf("We got an image of size %dx%d\n", image->width(), image->height());
IplImage *frame_temp = (IplImage*)image->getIplImage();
IplImage *frames = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 );
cvCvtColor(frame_temp,frames, CV_RGB2BGR);
frames->origin=0;
cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE);
cvShowImage("cNameselect",frames);
char c = cvWaitKey(33);
if(c==27)
{
cvReleaseImage(&frames);
cvReleaseImage(&frame_temp);
object_color = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), frame_temp->depth, 3 );
cvCopy(frames,object_color);
break;
}
else
{
cvReleaseImage(&frame_temp);
cvReleaseImage(&frames);
}
}
}
cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
cvShowImage("cNameoriginal",object_color);
while (1)
{
ImageOf<PixelRgb> *image = imagePort.read();
if (image!=NULL)
{
printf("We got an image of size %dx%d\n", image->width(), image->height());
IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 );
frame_temp = (IplImage*)image->getIplImage();
IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 );
cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);
//send values to the lookatlocation
Vector& target = targetPort.prepare();
target.resize(3);
target[0] = xmean;
target[1] = ymean;
target[2] = 1;
targetPort.write();
}
}
return 0;
}
示例11: ImageWriteMono
static bool ImageWriteMono(ImageOf<PixelMono>& img, const char *filename)
{
return SavePGM((char*)img.getRawImage(), filename, img.height(), img.width(), img.getRowSize());
}
示例12: printf
bool SCSPMClassifier::train(Bottle *locations, Bottle &reply)
{
if(locations==NULL)
return false;
string object_name=locations->get(0).asList()->get(0).asString().c_str();
if(burst)
currObject=object_name.c_str();
// Save Features
if(doTrain)
{
Bottle cmdClass;
cmdClass.addString("save");
cmdClass.addString(object_name.c_str());
Bottle classReply;
printf("Sending training request: %s\n",cmdClass.toString().c_str());
rpcClassifier.write(cmdClass,classReply);
printf("Received reply: %s\n",classReply.toString().c_str());
}
// Read Image and Locations
ImageOf<PixelRgb> *image = imgInput.read(true);
if(image==NULL)
return false;
IplImage* img= (IplImage*) image->getIplImage();
Bottle* bb=locations->get(0).asList()->get(1).asList();
int x_min=bb->get(0).asInt();
int y_min=bb->get(1).asInt();
int x_max=bb->get(2).asInt();
int y_max=bb->get(3).asInt();
if(x_min>5)
x_min=x_min-5;
if(y_min>5)
y_min=y_min-5;
if((x_max+5)<img->height)
x_max=x_max+5;
if((y_max+5)<img->width)
y_max=y_max+5;
int blobW=x_max-x_min;
int blobH=y_max-y_min;
//Crop Image
cvSetImageROI(img,cvRect(x_min,y_min,blobW,blobH));
IplImage* croppedImg=cvCreateImage(cvGetSize(img), img->depth, img->nChannels);
cvCopy(img, croppedImg);
cvResetImageROI(img);
/*cvShowImage("blob", croppedImg);
cvShowImage("all", img);
cvWaitKey(0);*/
//Send Image to SC
ImageOf<PixelBgr>& outim=imgOutput.prepare();
outim.wrapIplImage(croppedImg);
imgOutput.write();
cvReleaseImage(&croppedImg);
//Read Coded Feature
Bottle fea;
featureInput.read(fea);
if(fea.size()==0)
return false;
//Send Feature to Classifier
if(!burst)
{
featureOutput.write(fea);
yarp::os::Time::delay(0.01);
}
else
{
trainingFeature.push_back(fea);
}
// get negatives
/*if(burst)
{
int offW=-3*blobW;
for (int w=0; w<4; w++)
{
int offH=-3*blobH;
for (int h=0; h<4; h++)
//.........这里部分代码省略.........
示例13: main
int main() {
Network yarp; // set up yarp
BufferedPort<ImageOf<PixelRgb> > imagePort; // crea una istanza (la porta imagePort) dalla classe BudderedPort che manda/riceve un'immagine rgb in background senza stoppare il processo.
BufferedPort<Vector> targetPort; //crea una istanza (la porta targetPort) della classe BufferedPort che manda/riceve un vettore
imagePort.open("/tutorial/image/in"); // give the port a name
targetPort.open("/tutorial/target/out");
//Network::connect("/icubSim/cam/left","/tutorial/image/in");
Network::connect("/icub/cam/left","/tutorial/image/in");//la porta /icub/cam/left è aperta dal modulo camera
//training part
while (1) { // repeat forever
ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
if (image!=NULL) { // check we actually got something
printf("We got an image of size %dx%d\n", image->width(), image->height());
//pramod added nov 15
IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 );
frame_temp = (IplImage*)image->getIplImage();
IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 );
cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);
cvSaveImage("object.pgm",frame1);
//frame1->origin=0;
// cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
// cvShowImage("cNameoriginal",frame_temp);
//cvSaveImage("save_first.jpg",image1);
//cvReleaseImage(&image1);
cvNamedWindow("cNameselect",CV_WINDOW_AUTOSIZE);
cvShowImage("cNameselect",frame1);
cvReleaseImage(&frame1);
char c = cvWaitKey(33);
if(c==27) break;
}
}
printf("breaked");
system("/home/gabriele/siftWin32.exe <object.pgm >tmp1.txt");
IplImage *image1 = cvLoadImage("object.pgm");
FILE* fp1=fopen("tmp1.txt","rb");
float *arr1x; float *arr1y; int *arr2;float scale,ori;int temp1, temp2;
fscanf(fp1,"%d %d \n",&temp1, &temp2);
//printf("%d %d \n",temp1,temp2);
//getchar();
arr1x=(float*)malloc(sizeof(float)*temp1);
arr1y=(float*)malloc(sizeof(float)*temp1);
arr2=(int*)malloc(sizeof(int)*temp2*temp1);
for(int i2=1;i2<=temp1;i2++)
{
//printf("temp1 %d \n",i2);
fscanf(fp1,"%f %f %f %f \n", &arr1x[i2-1], &arr1y[i2-1], &scale,&ori);
//printf("%f %f %f %f \n", arr1x[i2-1], arr1y[i2-1], scale,ori);
//getchar();
for(int i3=1;i3<=temp2;i3++)
{
fscanf(fp1,"%d ", &arr2[(i2-1)*temp2+(i3-1)]);
//printf("%d ", arr2[(i2-1)*temp2+(i3-1)]);
}fscanf(fp1,"\n");printf("\n");
cvCircle(image1,cvPoint((int)arr1x[i2-1],(int)arr1y[i2-1]),3,cvScalar(0,255,255,255),1);
//getchar();
}
cvNamedWindow("cNameobject",CV_WINDOW_AUTOSIZE);
cvShowImage("cNameobject",image1);
printf("first one finished \n");
fclose(fp1);
//training part ends
while (1) { // repeat forever
ImageOf<PixelRgb> *image = imagePort.read(); // read an image: crea un puntatore image del tipo ImageOf<PixelRgb> e gli assegna quello che viene letto sulla porta imagePort.
if (image!=NULL) { // check we actually got something
printf("We got an image of size %dx%d\n", image->width(), image->height());
//pramod added nov 15
IplImage *frame_temp = cvCreateImage( cvSize(image->width(), image->height()), IPL_DEPTH_8U, 1 );
frame_temp = (IplImage*)image->getIplImage();
IplImage *frame1 = cvCreateImage( cvSize(frame_temp->width, frame_temp->height), IPL_DEPTH_8U, 1 );
cvCvtColor(frame_temp,frame1, CV_RGB2GRAY);
cvSaveImage("object_test.pgm",frame1);
//frame1->origin=0;
cvNamedWindow("cNameoriginal",CV_WINDOW_AUTOSIZE);
cvShowImage("cNameoriginal",frame_temp);
// cvWaitKey(0);
//pramod added ends
system("/home/gabriele/siftWin32.exe <object_test.pgm >tmp2.txt");
//.........这里部分代码省略.........
示例14: updateModule
bool updateModule()
{
ImageOf<PixelBgr> *img=imgInPort.read();
if (img==NULL)
return true;
mutex.lock();
ImageOf<PixelMono> imgMono;
imgMono.resize(img->width(),img->height());
cv::Mat imgMat=cv::cvarrToMat((IplImage*)img->getIplImage());
cv::Mat imgMonoMat=cv::cvarrToMat((IplImage*)imgMono.getIplImage());
cv::cvtColor(imgMat,imgMonoMat,CV_BGR2GRAY);
if (initBoundingBox)
{
tracker->initialise(imgMonoMat,tl,br);
initBoundingBox=false;
}
if (doCMT)
{
if (tracker->processFrame(imgMonoMat))
{
if (dataOutPort.getOutputCount()>0)
{
Bottle &data=dataOutPort.prepare();
data.clear();
data.addInt((int)tracker->topLeft.x);
data.addInt((int)tracker->topLeft.y);
data.addInt((int)tracker->topRight.x);
data.addInt((int)tracker->topRight.y);
data.addInt((int)tracker->bottomRight.x);
data.addInt((int)tracker->bottomRight.y);
data.addInt((int)tracker->bottomLeft.x);
data.addInt((int)tracker->bottomLeft.y);
dataOutPort.write();
}
if (imgOutPort.getOutputCount()>0)
{
cv::line(imgMat,tracker->topLeft,tracker->topRight,cv::Scalar(255,0,0),2);
cv::line(imgMat,tracker->topRight,tracker->bottomRight,cv::Scalar(255,0,0),2);
cv::line(imgMat,tracker->bottomRight,tracker->bottomLeft,cv::Scalar(255,0,0),2);
cv::line(imgMat,tracker->bottomLeft,tracker->topLeft,cv::Scalar(255,0,0),2);
for (size_t i=0; i<tracker->trackedKeypoints.size(); i++)
cv::circle(imgMat,tracker->trackedKeypoints[i].first.pt,3,cv::Scalar(0,255,0));
}
}
}
if (imgOutPort.getOutputCount()>0)
{
imgOutPort.prepare()=*img;
imgOutPort.write();
}
mutex.unlock();
return true;
}
示例15: yWarning
void CamCalibPort::onRead(ImageOf<PixelRgb> &yrpImgIn)
{
double t=Time::now();
yarp::os::Stamp s;
this->getEnvelope(s);
double time = s.getTime();
if (!updatePose(time)) {
return;
}
unsigned char *pixel = yrpImgIn.getPixelAddress(0, 0);
double *stamp = reinterpret_cast<double*>(pixel);
double backdoorTime = stamp[0];
double backdoorRoll = stamp[1];
double backdoorPitch = stamp[2];
double backdoorYaw = stamp[3];
if (time != backdoorTime) {
yWarning() << "Backdoor time:" << backdoorTime << "Imu time:" << time << "diff:" << (backdoorTime - time);
}
Bottle& b = rpyPort.prepare();
b.clear();
b.addDouble(roll);
b.addDouble(pitch);
b.addDouble(yaw);
b.addDouble(backdoorRoll);
b.addDouble(backdoorPitch);
b.addDouble(backdoorYaw);
b.addDouble(backdoorRoll - roll);
b.addDouble(backdoorPitch - pitch);
b.addDouble(backdoorYaw - yaw);
rpyPort.write();
// execute calibration
if (portImgOut!=NULL) {
yarp::sig::ImageOf<PixelRgb> &yrpImgOut=portImgOut->prepare();
if (verbose) {
yDebug("received input image after %g [s] ... ",t-t0);
}
double t1=Time::now();
if (calibTool!=NULL) {
calibTool->apply(yrpImgIn,yrpImgOut);
for (int r =0; r <yrpImgOut.height(); r++) {
for (int c=0; c<yrpImgOut.width(); c++) {
unsigned char *pixel = yrpImgOut.getPixelAddress(c,r);
double mean = (1.0/3.0)*(pixel[0]+pixel[1]+pixel[2]);
for(int i=0; i<3; i++) {
double s=pixel[i]-mean;
double sn=currSat*s;
sn+=mean;
if(sn<0.0)
sn=0.0;
else if(sn>255.0)
sn=255.0;
pixel[i]=(unsigned char)sn;
}
}
}
if (verbose)
yDebug("calibrated in %g [s]\n",Time::now()-t1);
} else {
yrpImgOut=yrpImgIn;
if (verbose)
yDebug("just copied in %g [s]\n",Time::now()-t1);
}
m.lock();
//timestamp propagation
//yarp::os::Stamp stamp;
//BufferedPort<ImageOf<PixelRgb> >::getEnvelope(stamp);
//portImgOut->setEnvelope(stamp);
Bottle pose;
pose.addDouble(roll);
pose.addDouble(pitch);
pose.addDouble(yaw);
portImgOut->setEnvelope(pose);
portImgOut->writeStrict();
m.unlock();
}
t0=t;
}