本文整理汇总了C++中DJI_event类的典型用法代码示例。如果您正苦于以下问题:C++ DJI_event类的具体用法?C++ DJI_event怎么用?C++ DJI_event使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了DJI_event类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: guidance_callback
//.........这里部分代码省略.........
for(size_t i=0;i<nextrightpts.size();i++)
{
if(nextrightpts[i].x>=0 && nextrightpts[i].x<=WIDTH && nextrightpts[i].y>=0 && nextrightpts[i].y<=HEIGHT)
{
CvScalar line_color; line_color = CV_RGB(255,0,0);
//int line_thickness = 1;
CvPoint p,q;
p.x = (int) prevrightpts[i].x;
p.y = (int) prevrightpts[i].y;
q.x = (int) nextrightpts[i].x;
q.y = (int) nextrightpts[i].y;
// double angle; angle = atan2( (double) p.y - q.y, (double) p.x - q.x );
// double hypotenuse; hypotenuse = sqrt( pow((p.y - q.y),2) + pow((p.x - q.x),2) );
//
//// //scale the line by a factor of 3
// q.x = (int) (p.x - 2 * hypotenuse * cos(angle));
// q.y = (int) (p.y - 2 * hypotenuse * sin(angle));
//
// cvLine( &prevrightimg, p, q, line_color, line_thickness, CV_AA, 0 );
//
// p.x = (int) (q.x + 9 * cos(angle + pi / 4));
// p.y = (int) (q.y + 9 * sin(angle + pi / 4));
// cvLine( &prevrightimg, p, q, line_color, line_thickness, CV_AA, 0 );
// p.x = (int) (q.x + 9 * cos(angle - pi / 4));
// p.y = (int) (q.y + 9 * sin(angle - pi / 4));
// cvLine( &prevrightimg, p, q, line_color, line_thickness, CV_AA, 0 );
//
arrowedLine(right_mask, p, q, line_color);
circle(right_mask, nextrightpts[i], 2, Scalar(0,255,0),-1);
kpts.push_back(nextrightpts[i]);
}
}
imshow("right", right_mask);
moveWindow("right", 500, 0);
}
if(frame_num>0)
{
if(nextrightpts.size()<=MIN_FEATURES)
{
goodFeaturesToTrack(g_greyscale_image_right, prevrightpts, MAX_FEATURES, QUALITY_EIGVALUE, MIN_DISTANCE);
}
else
{
prevrightpts.assign(kpts.begin(), kpts.end());
}
}
g_greyscale_image_right.copyTo(prevrightimg);
frame_num++;
}
//Stero matching
// Mat g_l,g_r;
//
// cvtColor(g_greyscale_image_left, g_l, CV_BGR2GRAY);
// cvtColor(g_greyscale_image_right,g_r, CV_BGR2GRAY);
Mat imgDisparity16S = Mat( g_greyscale_image_left.rows, g_greyscale_image_left.cols, CV_16S );
Mat imgDisparity8U = Mat( g_greyscale_image_left.rows, g_greyscale_image_left.cols, CV_8UC1 );
int ndisparities = 16*4;
int SADWindowSize = 7;
Ptr<StereoBM> sbm = StereoBM::create( ndisparities, SADWindowSize );
sbm->compute( g_greyscale_image_left, g_greyscale_image_right, imgDisparity16S );
double minVal; double maxVal;
minMaxLoc( imgDisparity16S, &minVal, &maxVal );
printf("Min disp: %f Max value: %f \n", minVal, maxVal);
imgDisparity16S.convertTo( imgDisparity8U, CV_8UC1, 255/(maxVal - minVal));
namedWindow( "windowDisparity", WINDOW_AUTOSIZE );
imshow( "windowDisparity", imgDisparity8U );
moveWindow("windowDisparity", 500, 500);
}
waitKey(1);
}
g_lock.leave();
g_event.set_DJI_event();
return 0;
}
示例2: my_callback
int my_callback(int data_type, int data_len, char *content)
{
printf("enter callback..\n");
g_lock.enter();
if (e_image == data_type && NULL != content)
{
printf("callback: type is image..\n");
image_data data;
memcpy((char*)&data, content, sizeof(data));
memcpy(g_imleft.data, data.m_greyscale_image_left[selected_vbus], IMAGE_SIZE);
memcpy(g_imright.data, data.m_greyscale_image_right[selected_vbus], IMAGE_SIZE);
memcpy(g_depth.data, data.m_depth_image[selected_vbus], IMAGE_SIZE * 2);
}
g_lock.leave();
g_event.set_event();
return 0;
}
示例3: main
int main(int argc, const char** argv)
{
reset_config();
int err_code = init_transfer();
RETURN_IF_ERR( err_code );
#if !USE_GUIDANCE_ASSISTANT_CONFIG
err_code = select_greyscale_image( sensor_id, true );
RETURN_IF_ERR( err_code );
err_code = select_greyscale_image( sensor_id, false );
RETURN_IF_ERR( err_code );
err_code = select_depth_image( sensor_id );
RETURN_IF_ERR( err_code );
select_imu();
select_ultrasonic();
select_obstacle_distance();
select_velocity();
#endif
err_code = set_sdk_event_handler( my_callback );
RETURN_IF_ERR( err_code );
err_code = start_transfer();
RETURN_IF_ERR( err_code );
for ( int j = 0; j < 1000; ++j )
{
g_event.wait_event();
}
err_code = stop_transfer();
RETURN_IF_ERR( err_code );
//make sure the ack packet from GUIDANCE is received
sleep( 1000000 );
err_code = release_transfer();
RETURN_IF_ERR( err_code );
return 0;
}
示例4: my_callback
int my_callback(int data_type, int data_len, char *content)
{
g_lock.enter();
/* image data */
/* if (e_image == data_type && NULL != content)
{
image_data* data = (image_data*)content;
if ( data->m_greyscale_image_left[CAMERA_ID] ){
memcpy(g_greyscale_image_left.data, data->m_greyscale_image_left[CAMERA_ID], IMAGE_SIZE);
imshow("left", g_greyscale_image_left);
// publish left greyscale image
cv_bridge::CvImage left_8;
g_greyscale_image_left.copyTo(left_8.image);
left_8.header.frame_id = "guidance";
left_8.header.stamp = ros::Time::now();
left_8.encoding = sensor_msgs::image_encodings::MONO8;
left_image_pub.publish(left_8.toImageMsg());
}
if ( data->m_greyscale_image_right[CAMERA_ID] ){
memcpy(g_greyscale_image_right.data, data->m_greyscale_image_right[CAMERA_ID], IMAGE_SIZE);
imshow("right", g_greyscale_image_right);
// publish right greyscale image
cv_bridge::CvImage right_8;
g_greyscale_image_right.copyTo(right_8.image);
right_8.header.frame_id = "guidance";
right_8.header.stamp = ros::Time::now();
right_8.encoding = sensor_msgs::image_encodings::MONO8;
right_image_pub.publish(right_8.toImageMsg());
}
if ( data->m_depth_image[CAMERA_ID] ){
memcpy(g_depth.data, data->m_depth_image[CAMERA_ID], IMAGE_SIZE * 2);
g_depth.convertTo(depth8, CV_8UC1);
imshow("depth", depth8);
//publish depth image
cv_bridge::CvImage depth_16;
g_depth.copyTo(depth_16.image);
depth_16.header.frame_id = "guidance";
depth_16.header.stamp = ros::Time::now();
depth_16.encoding = sensor_msgs::image_encodings::MONO16;
depth_image_pub.publish(depth_16.toImageMsg());
}
key = waitKey(1);
}
/* imu */
/*
if ( e_imu == data_type && NULL != content )
{
imu *imu_data = (imu*)content;
printf( "frame index: %d, stamp: %d\n", imu_data->frame_index, imu_data->time_stamp );
printf( "imu: [%f %f %f %f %f %f %f]\n", imu_data->acc_x, imu_data->acc_y, imu_data->acc_z, imu_data->q[0], imu_data->q[1], imu_data->q[2], imu_data->q[3] );
// publish imu data
geometry_msgs::TransformStamped g_imu;
g_imu.header.frame_id = "guidance";
g_imu.header.stamp = ros::Time::now();
g_imu.transform.translation.x = imu_data->acc_x;
g_imu.transform.translation.y = imu_data->acc_y;
g_imu.transform.translation.z = imu_data->acc_z;
g_imu.transform.rotation.w = imu_data->q[0];
g_imu.transform.rotation.x = imu_data->q[1];
g_imu.transform.rotation.y = imu_data->q[2];
g_imu.transform.rotation.z = imu_data->q[3];
imu_pub.publish(g_imu);
}
*/
/* velocity */
/*
if ( e_velocity == data_type && NULL != content )
{
velocity *vo = (velocity*)content;
printf( "frame index: %d, stamp: %d\n", vo->frame_index, vo->time_stamp );
printf( "vx:%f vy:%f vz:%f\n", 0.001f * vo->vx, 0.001f * vo->vy, 0.001f * vo->vz );
// publish velocity
geometry_msgs::Vector3Stamped g_vo;
g_vo.header.frame_id = "guidance";
g_vo.header.stamp = ros::Time::now();
g_vo.vector.x = 0.001f * vo->vx;
g_vo.vector.y = 0.001f * vo->vy;
g_vo.vector.z = 0.001f * vo->vz;
velocity_pub.publish(g_vo);
}
*/
/* obstacle distance */
if ( e_obstacle_distance == data_type && NULL != content )
{
obstacle_distance *oa = (obstacle_distance*)content;
printf( "frame index: %d, stamp: %d\n", oa->frame_index, oa->time_stamp );
printf( "obstacle distance:" );
for ( int i = 0; i < CAMERA_PAIR_NUM; ++i )
{
printf( " %f ", 0.01f * oa->distance[i] );
}
printf( "\n" );
// publish obstacle distance
sensor_msgs::LaserScan g_oa;
//.........这里部分代码省略.........
示例5: main
int main(int argc, char** argv)
{
/* if(argc>1){
printf("This is demo program showing data from Guidance.\n\t"
" 'a','d','w','s','x' to select sensor direction.\n\t"
" 'j','k' to change the exposure parameters.\n\t"
" 'm' to switch between AEC and constant exposure modes.\n\t"
" 'n' to return to default exposure mode and parameters.\n\t"
" 'q' to quit.");
return 0;
}
/* initialize ros */
ros::init(argc, argv, "GuidanceNode");
ros::NodeHandle my_node;
// depth_image_pub = my_node.advertise<sensor_msgs::Image>("/guidance/depth_image",1);
// left_image_pub = my_node.advertise<sensor_msgs::Image>("/guidance/left_image",1);
// right_image_pub = my_node.advertise<sensor_msgs::Image>("/guidance/right_image",1);
//imu_pub = my_node.advertise<geometry_msgs::TransformStamped>("/guidance/imu",1);
//velocity_pub = my_node.advertise<geometry_msgs::Vector3Stamped>("/guidance/velocity",1);
obstacle_distance_pub = my_node.advertise<sensor_msgs::LaserScan>("/guidance/obstacle_distance",1);
//ultrasonic_pub = my_node.advertise<sensor_msgs::LaserScan>("/guidance/ultrasonic",1);
/* initialize guidance */
reset_config();
int err_code = init_transfer();
RETURN_IF_ERR(err_code);
int online_status[CAMERA_PAIR_NUM];
err_code = get_online_status(online_status);
RETURN_IF_ERR(err_code);
std::cout<<"Sensor online status: ";
for (int i=0; i<CAMERA_PAIR_NUM; i++)
std::cout<<online_status[i]<<" ";
std::cout<<std::endl;
// get cali param
stereo_cali cali[CAMERA_PAIR_NUM];
err_code = get_stereo_cali(cali);
RETURN_IF_ERR(err_code);
std::cout<<"cu\tcv\tfocal\tbaseline\n";
for (int i=0; i<CAMERA_PAIR_NUM; i++)
{
std::cout<<cali[i].cu<<"\t"<<cali[i].cv<<"\t"<<cali[i].focal<<"\t"<<cali[i].baseline<<std::endl;
}
/* select data */
err_code = select_greyscale_image(CAMERA_ID, true);
RETURN_IF_ERR(err_code);
err_code = select_greyscale_image(CAMERA_ID, false);
RETURN_IF_ERR(err_code);
err_code = select_depth_image(CAMERA_ID);
RETURN_IF_ERR(err_code);
select_imu();
select_ultrasonic();
select_obstacle_distance();
select_velocity();
/* start data transfer */
err_code = set_sdk_event_handler(my_callback);
RETURN_IF_ERR(err_code);
err_code = start_transfer();
RETURN_IF_ERR(err_code);
// for setting exposure
exposure_param para;
para.m_is_auto_exposure = 1;
para.m_step = 10;
para.m_expected_brightness = 120;
para.m_camera_pair_index = CAMERA_ID;
std::cout << "start_transfer" << std::endl;
while (ros::ok())
{
g_event.wait_event();
if (key > 0)
// set exposure parameters
if(key=='j' || key=='k' || key=='m' || key=='n'){
if(key=='j')
if(para.m_is_auto_exposure) para.m_expected_brightness += 20;
else para.m_exposure_time += 3;
else if(key=='k')
if(para.m_is_auto_exposure) para.m_expected_brightness -= 20;
else para.m_exposure_time -= 3;
else if(key=='m'){
para.m_is_auto_exposure = !para.m_is_auto_exposure;
std::cout<<"exposure is "<<para.m_is_auto_exposure<<std::endl;
}
else if(key=='n'){//return to default
para.m_expected_brightness = para.m_exposure_time = 0;
}
std::cout<<"Setting exposure parameters....SensorId="<<CAMERA_ID<<std::endl;
para.m_camera_pair_index = CAMERA_ID;
set_exposure_param(¶);
key = 0;
}
else {// switch image direction
err_code = stop_transfer();
RETURN_IF_ERR(err_code);
//.........这里部分代码省略.........
示例6: my_callback
int my_callback(int data_type, int data_len, char *content)
{
g_lock.enter();
/* image data */
if (e_image == data_type && NULL != content)
{
image_data* data = (image_data*)content;
dji_guidance::multi_image msg;
// forward facing guidance sensor is disabled for now...
//msg.images.push_back(create_image_message(data, e_vbus1));
msg.images.push_back(create_image_message(data, e_vbus2));
msg.images.push_back(create_image_message(data, e_vbus3));
msg.images.push_back(create_image_message(data, e_vbus4));
//msg.images.push_back(create_image_message(data, e_vbus5));
image_pub.publish(msg);
std::cout << "published " << msg.images.size() << " images" << std::endl;
}
/* imu */
if ( e_imu == data_type && NULL != content )
{
imu *imu_data = (imu*)content;
// printf( "frame index: %d, stamp: %d\n", imu_data->frame_index, imu_data->time_stamp );
// printf( "imu: [%f %f %f %f %f %f %f]\n", imu_data->acc_x, imu_data->acc_y, imu_data->acc_z, imu_data->q[0], imu_data->q[1], imu_data->q[2], imu_data->q[3] );
// publish imu data
geometry_msgs::TransformStamped g_imu;
g_imu.header.frame_id = "guidance";
g_imu.header.stamp = ros::Time::now();
g_imu.transform.translation.x = imu_data->acc_x;
g_imu.transform.translation.y = imu_data->acc_y;
g_imu.transform.translation.z = imu_data->acc_z;
g_imu.transform.rotation.w = imu_data->q[0];
g_imu.transform.rotation.x = imu_data->q[1];
g_imu.transform.rotation.y = imu_data->q[2];
g_imu.transform.rotation.z = imu_data->q[3];
imu_pub.publish(g_imu);
}
/* velocity */
if ( e_velocity == data_type && NULL != content )
{
velocity *vo = (velocity*)content;
// printf( "frame index: %d, stamp: %d\n", vo->frame_index, vo->time_stamp );
// printf( "vx:%f vy:%f vz:%f\n", 0.001f * vo->vx, 0.001f * vo->vy, 0.001f * vo->vz );
// publish velocity
geometry_msgs::Vector3Stamped g_vo;
g_vo.header.frame_id = "guidance";
g_vo.header.stamp = ros::Time::now();
g_vo.vector.x = 0.001f * vo->vx;
g_vo.vector.y = 0.001f * vo->vy;
g_vo.vector.z = 0.001f * vo->vz;
velocity_pub.publish(g_vo);
}
g_lock.leave();
g_event.set_event();
return 0;
}
示例7: my_callback
int my_callback(int data_type, int data_len, char *content)
{
g_lock.enter();
/* image data
if (e_image == data_type && NULL != content)
{
image_data data;
memcpy((char*)&data, content, sizeof(data));
memcpy(g_greyscale_image_left.data, data.m_greyscale_image_left[CAMERA_ID], IMAGE_SIZE);
memcpy(g_greyscale_image_right.data, data.m_greyscale_image_right[CAMERA_ID], IMAGE_SIZE);
memcpy(g_depth.data, data.m_depth_image[CAMERA_ID], IMAGE_SIZE * 2);
Mat depth8(HEIGHT, WIDTH, CV_8UC1);
g_depth.convertTo(depth8, CV_8UC1);
imshow("left", g_greyscale_image_left);
imshow("right", g_greyscale_image_right);
imshow("depth", depth8);
key = waitKey(1);
//publish depth image
cv_bridge::CvImage depth_16;
g_depth.copyTo(depth_16.image);
depth_16.header.frame_id = "guidance";
depth_16.header.stamp = ros::Time::now();
depth_16.encoding = sensor_msgs::image_encodings::MONO16;
depth_image_pub.publish(depth_16.toImageMsg());
// publish left greyscale image
cv_bridge::CvImage left_8;
g_greyscale_image_left.copyTo(left_8.image);
left_8.header.frame_id = "guidance";
left_8.header.stamp = ros::Time::now();
left_8.encoding = sensor_msgs::image_encodings::MONO8;
left_image_pub.publish(left_8.toImageMsg());
// publish right greyscale image
cv_bridge::CvImage right_8;
g_greyscale_image_left.copyTo(right_8.image);
right_8.header.frame_id = "guidance";
right_8.header.stamp = ros::Time::now();
right_8.encoding = sensor_msgs::image_encodings::MONO8;
right_image_pub.publish(right_8.toImageMsg());
}*/
/* imu */
if ( e_imu == data_type && NULL != content )
{
imu *imu_data = (imu*)content;
/* printf( "frame index: %d, stamp: %d\n", imu_data->frame_index, imu_data->time_stamp );
printf( "imu: [%f %f %f %f %f %f %f]\n", imu_data->acc_x, imu_data->acc_y, imu_data->acc_z, imu_data->q[0], imu_data->q[1], imu_data->q[2], imu_data->q[3] );
// publish imu data
geometry_msgs::TransformStamped g_imu;
g_imu.header.frame_id = "guidance";
g_imu.header.stamp = ros::Time::now();
g_imu.transform.translation.x = imu_data->acc_x;
g_imu.transform.translation.y = imu_data->acc_y;
g_imu.transform.translation.z = imu_data->acc_z;
g_imu.transform.rotation.x = imu_data->q[0];
g_imu.transform.rotation.y = imu_data->q[1];
g_imu.transform.rotation.z = imu_data->q[2];
g_imu.transform.rotation.w = imu_data->q[3];
imu_pub.publish(g_imu);
}
*/
imu_p.header.stamp = ros::Time::now();
imu_p.header.frame_id = "imu";
imu_p.orientation.x = imu_data->q[0];
imu_p.orientation.y = imu_data->q[1];
imu_p.orientation.z = imu_data->q[2];
imu_p.orientation.w = imu_data->q[3];
imu_p.linear_acceleration.x = imu_data->acc_x;
imu_p.linear_acceleration.y = imu_data->acc_y;
imu_p.linear_acceleration.z = imu_data->acc_z;
double roll, pitch, yaw;
tf::Quaternion orientation;
tf::quaternionMsgToTF(imu_p.orientation, orientation);
tf::Matrix3x3(orientation).getRPY(roll, pitch, yaw);
//printf( "roll:%f pitch:%f yaw:%f\n", roll, pitch, yaw );
//printf( "imu_data->acc_x:%f imu_data->acc_y:%f imu_data->acc_z:%f\n", imu_data->acc_x, imu_data->acc_y, imu_data->acc_z);
/*
if (se=="abc") orientation.setRPY(roll, pitch, yaw);
else
{
if(se=="acb") orientation.setRPY(roll, yaw, pitch);
else if(se=="bac") orientation.setRPY(pitch, roll, yaw);
else if(se=="bca") orientation.setRPY(pitch, yaw, roll);
else if(se=="cab") orientation.setRPY(yaw, roll, pitch);
else if(se=="cba") orientation.setRPY(yaw, pitch, roll);
//.........这里部分代码省略.........
示例8: main
int main(int argc, const char** argv)
{
if (argc>1)
help();
// Create the KCFTracker object with one of the available options
// HOG FIXEDWINDOW MULTISCALE LAB
KCFTracker tracker(false, true, true, false);
VideoWriter vWriter("result.avi", CV_FOURCC('M', 'J', 'P', 'G'), 25, Size(WIDTH, HEIGHT), false);
Rect trackWindow;
int hsize[] = { 16, 16 };
float hranges[] = { 0, 255 };
float dranges[] = { 0, 255 };
const float* phranges[] = { hranges, dranges };
selection = Rect(10, 10, 100, 100);
trackObject = 0;
namedWindow(winDemoName, CV_WINDOW_AUTOSIZE);
setMouseCallback(winDemoName, onMouse, 0);
bool paused = false;
// Connect to Guidance and subscribe data
reset_config();
int err_code = init_transfer();
RETURN_IF_ERR(err_code);
err_code = select_greyscale_image(selected_vbus, true);
RELEASE_IF_ERR(err_code);
err_code = select_greyscale_image(selected_vbus, false);
RELEASE_IF_ERR(err_code);
err_code = select_depth_image(selected_vbus);
RELEASE_IF_ERR(err_code);
err_code = set_sdk_event_handler(my_callback);
RELEASE_IF_ERR(err_code);
err_code = start_transfer();
RELEASE_IF_ERR(err_code);
Mat depth(HEIGHT, WIDTH, CV_8UC1);
for (int times = 0; times < 30000; ++times) {
g_event.wait_event();
// filter depth image
filterSpeckles(g_depth, -16, 50, 20);
// convert 16 bit depth image to 8 bit
g_depth.convertTo(depth, CV_8UC1);
imshow("depth", depth);
waitKey(1);
g_imleft.copyTo(image);
im.clear();
for (int i = 0; i < 3; i++)
im.push_back(image);
merge(im, image);
Rect new_rect;
if (!paused) {
if (trackObject) {
if (select_frame_count > 0) {
new_rect = tracker.update(image);
rectangle(image, new_rect, Scalar(255), 3);
}
else {
tracker.init(selection, image);
select_frame_count++;
}
trackObject = 1;
}
else if (trackObject < 0) {
paused = false;
}
}
if (selectObject && selection.width > 0 && selection.height > 0) {
rectangle(image, selection, Scalar(255), 3);
}
imshow(winDemoName, image);
vWriter << image;
char c = (char)waitKey(10);
if (c == 27 || c == 'q') {
break;
}
switch (c) {
case 'c':
trackObject = 0;
break;
case 'p':
case ' ':
paused = !paused;
//.........这里部分代码省略.........
示例9: my_callback
int my_callback(int data_type, int data_len, char *content)
{
g_lock.enter();
/* image data */
if (e_image == data_type && NULL != content)
{
ros::Time time_in_this_loop = ros::Time::now();
image_data* data = (image_data*)content;
if ( data->m_greyscale_image_left[CAMERA_ID] ) {
memcpy(g_greyscale_image_left.data, data->m_greyscale_image_left[CAMERA_ID], IMAGE_SIZE);
imshow("left", g_greyscale_image_left);
// publish left greyscale image
cv_bridge::CvImage left_8;
g_greyscale_image_left.copyTo(left_8.image);
left_8.header.frame_id = "guidance";
left_8.header.stamp = time_in_this_loop;
left_8.encoding = sensor_msgs::image_encodings::MONO8;
left_image_pub.publish(left_8.toImageMsg());
sensor_msgs::CameraInfo g_cam_info_left;
g_cam_info_left.header.stamp = time_in_this_loop;
g_cam_info_left.header.frame_id = "guidance";
try {
read_params_from_yaml_and_fill_cam_info_msg(camera_params_left, g_cam_info_left);
cam_info_left_pub.publish(g_cam_info_left);
} catch(...) {
// if yaml fails to read data, don't try to publish
}
}
if ( data->m_greyscale_image_right[CAMERA_ID] ) {
memcpy(g_greyscale_image_right.data, data->m_greyscale_image_right[CAMERA_ID], IMAGE_SIZE);
imshow("right", g_greyscale_image_right);
// publish right greyscale image
cv_bridge::CvImage right_8;
g_greyscale_image_right.copyTo(right_8.image);
right_8.header.frame_id = "guidance";
right_8.header.stamp = time_in_this_loop;
right_8.encoding = sensor_msgs::image_encodings::MONO8;
right_image_pub.publish(right_8.toImageMsg());
sensor_msgs::CameraInfo g_cam_info_right;
g_cam_info_right.header.stamp = time_in_this_loop;
g_cam_info_right.header.frame_id = "guidance";
try {
read_params_from_yaml_and_fill_cam_info_msg(camera_params_right, g_cam_info_right);
cam_info_right_pub.publish(g_cam_info_right);
} catch(...) {
// if yaml fails to read data, don't try to publish
}
}
if ( data->m_depth_image[CAMERA_ID] ) {
memcpy(g_depth.data, data->m_depth_image[CAMERA_ID], IMAGE_SIZE * 2);
g_depth.convertTo(depth8, CV_8UC1);
imshow("depth", depth8);
//publish depth image
cv_bridge::CvImage depth_16;
g_depth.copyTo(depth_16.image);
depth_16.header.frame_id = "guidance";
depth_16.header.stamp = ros::Time::now();
depth_16.encoding = sensor_msgs::image_encodings::MONO16;
depth_image_pub.publish(depth_16.toImageMsg());
}
//left image pair(ID=2)
if ( data->m_greyscale_image_left[CAMERA_ID_2] ) {
memcpy(g_greyscale_image_left_2.data, data->m_greyscale_image_left[CAMERA_ID_2], IMAGE_SIZE);
imshow("left_2", g_greyscale_image_left_2);
// publish left greyscale image
cv_bridge::CvImage left_8_2;
g_greyscale_image_left_2.copyTo(left_8_2.image);
left_8_2.header.frame_id = "guidance2";
left_8_2.header.stamp = time_in_this_loop;
left_8_2.encoding = sensor_msgs::image_encodings::MONO8;
left_image_pub_2.publish(left_8_2.toImageMsg());
sensor_msgs::CameraInfo g_cam_info_left;
g_cam_info_left.header.stamp = time_in_this_loop;
g_cam_info_left.header.frame_id = "guidance2";
try {
read_params_from_yaml_and_fill_cam_info_msg(camera2_params_left, g_cam_info_left);
cam2_info_left_pub.publish(g_cam_info_left);
} catch(...) {
// if yaml fails to read data, don't try to publish
}
}
if ( data->m_greyscale_image_right[CAMERA_ID_2] ) {
memcpy(g_greyscale_image_right_2.data, data->m_greyscale_image_right[CAMERA_ID_2], IMAGE_SIZE);
imshow("right_2", g_greyscale_image_right_2);
// publish right greyscale image
cv_bridge::CvImage right_8_2;
g_greyscale_image_right_2.copyTo(right_8_2.image);
right_8_2.header.frame_id = "guidance2";
right_8_2.header.stamp = time_in_this_loop;
right_8_2.encoding = sensor_msgs::image_encodings::MONO8;
right_image_pub_2.publish(right_8_2.toImageMsg());
//.........这里部分代码省略.........
示例10: my_callback
int my_callback(int data_type, int data_len, char *content)
{
g_lock.enter();
if (e_image == data_type && NULL != content)
{
image_data data;
memcpy( (char*)&data, content, sizeof(data) );
printf( "frame index:%d,stamp:%d\n", data.frame_index, data.time_stamp );
#if !USE_GUIDANCE_ASSISTANT_CONFIG
#ifdef HAVE_OPENCV
memcpy( g_greyscale_image_left.data, data.m_greyscale_image_left[sensor_id], IMAGE_SIZE );
memcpy( g_greyscale_image_right.data, data.m_greyscale_image_right[sensor_id], IMAGE_SIZE );
memcpy( g_depth.data, data.m_depth_image[sensor_id], IMAGE_SIZE * 2 );
imshow("left", g_greyscale_image_left);
imshow("right", g_greyscale_image_right);
#endif
#else
for ( int d = 0; d < CAMERA_PAIR_NUM; ++d )
{
string stmps;
if ( data.m_greyscale_image_left[d] )
{
#ifdef HAVE_OPENCV
memcpy( g_greyscale_image_left.data, data.m_greyscale_image_left[d], IMAGE_SIZE );//maybe select too many image so just overwrite it
stmps = "left";
stmps = stmps + (char)('0'+d);
imshow(stmps.c_str(), g_greyscale_image_left);
#endif
}
if ( data.m_greyscale_image_right[d] )
{
#ifdef HAVE_OPENCV
memcpy( g_greyscale_image_right.data, data.m_greyscale_image_right[d], IMAGE_SIZE );
stmps = "right";
stmps = stmps + (char)('0'+d);
imshow(stmps, g_greyscale_image_right);
#endif
}
if ( data.m_depth_image[d] )
{
#ifdef HAVE_OPENCV
Mat depthmap(HEIGHT, WIDTH, CV_16SC1);
Mat depthmap8(HEIGHT, WIDTH, CV_8UC1);
memcpy( depthmap.data, data.m_depth_image[d], IMAGE_SIZE * 2 );
depthmap.convertTo(depthmap8, CV_8UC1);
stmps = "depthmap";
stmps = stmps + (char)('0'+d);
imshow(stmps, depthmap8);
#endif
}
}
#endif
#ifdef HAVE_OPENCV
waitKey(1);
#endif
}
if ( e_imu == data_type && NULL != content )
{
imu *imu_data = (imu*)content;
printf( "imu:%f %f %f,%f %f %f %f\n", imu_data->acc_x, imu_data->acc_y, imu_data->acc_z, imu_data->q[0], imu_data->q[1], imu_data->q[2], imu_data->q[3] );
printf( "frame index:%d,stamp:%d\n", imu_data->frame_index, imu_data->time_stamp );
}
if ( e_velocity == data_type && NULL != content )
{
velocity *vo = (velocity*)content;
printf( "vx:%f vy:%f vz:%f\n", 0.001f * vo->vx, 0.001f * vo->vy, 0.001f * vo->vz );
printf( "frame index:%d,stamp:%d\n", vo->frame_index, vo->time_stamp );
}
if ( e_obstacle_distance == data_type && NULL != content )
{
obstacle_distance *oa = (obstacle_distance*)content;
printf( "obstacle distance:" );
for ( int i = 0; i < CAMERA_PAIR_NUM; ++i )
{
printf( " %f ", 0.01f * oa->distance[i] );
}
printf( "\n" );
printf( "frame index:%d,stamp:%d\n", oa->frame_index, oa->time_stamp );
}
if ( e_ultrasonic == data_type && NULL != content )
{
ultrasonic_data *ultrasonic = (ultrasonic_data*)content;
for ( int d = 0; d < CAMERA_PAIR_NUM; ++d )
{
printf( "ultrasonic distance:%f,reliability:%d\n", ultrasonic->ultrasonic[d] * 0.001f, (int)ultrasonic->reliability[d] );
}
printf( "frame index:%d,stamp:%d\n", ultrasonic->frame_index, ultrasonic->time_stamp );
}
g_lock.leave();
g_event.set_event();
return 0;
}