本文整理汇总了C++中cvCreateImageHeader函数的典型用法代码示例。如果您正苦于以下问题:C++ cvCreateImageHeader函数的具体用法?C++ cvCreateImageHeader怎么用?C++ cvCreateImageHeader使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cvCreateImageHeader函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: printf
void rspfOpenCVSmoothFilter::runUcharTransformation(rspfImageData* tile)
{
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
cvSmooth(input,output,theSmoothType,theParam1,theParam2,theParam3,theParam4);
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
}
theTile->validate();
}
示例2: printf
void rspfOpenCVSobelFilter::runUcharTransformation(rspfImageData* tile) {
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
IplImage * tmp = cvCreateImage(cvSize(tile->getWidth(),tile->getHeight()),IPL_DEPTH_16S,1);
cvSobel(input,tmp,theXOrder,theYOrder,theApertureSize);
cvConvertScale(tmp,output);
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
cvReleaseImage(&tmp);
}
theTile->validate();
}
示例3: radial_sample
void radial_sample(int width, int height, char* data, IplImage *unwrapped, int slice)
{
IplImage *cvcast = cvCreateImageHeader(cvSize(width, height),
IPL_DEPTH_8U, 1);
cvcast->imageData = data;
// cvSaveImage("slice.png",cvcast);
CvPoint center = cvPoint(cx,cy);
unsigned char* linebuf;
for(int sample = 0; sample < RADIAL_SAMPLES; sample++) {
float theta = ((float)sample)*((2.0*PI)/(float)RADIAL_SAMPLES);
CvPoint outer = calc_ray_outer(theta, center);
// printf("%g:\t%d,%d\n", theta*(180.0/PI), outer.x, outer.y);
cvClipLine(cvSize(width, height), &outer, ¢er);
int linesize = abs(center.x-outer.x)+abs(center.y-outer.y)+1;
linebuf = (unsigned char*)malloc(linesize);
cvSampleLine(cvcast,outer,center,linebuf,4);
IplImage *castline = cvCreateImageHeader(cvSize(linesize,1), IPL_DEPTH_8U, 1);
castline->imageData = (char*)linebuf;
IplImage *sobel = cvCreateImage(cvSize(linesize,1), IPL_DEPTH_8U, 1);
cvSobel(castline, sobel, 1, 0, 3);
int layer = 0;
for(int i = 0; (i < linesize) && (layer < MAX_LAYERS); i++) {
// printf(" %d,", (int)cvGetReal1D(sobel,i));
if((int)cvGetReal1D(sobel,i) > SOBEL_THRESH) {
int max = 0, max_i = 0;
for(; i < linesize; i++) {
int curval = (int)cvGetReal1D(sobel,i);
if(curval == 0) break;
if(curval > max) {
max = curval;
max_i = i;
}
}
cvSetReal2D(unwrapped,slice,(layer*RADIAL_SAMPLES)+sample,cvGetReal1D(castline,max_i));
// printf("%d\t",max);
layer++;
}
}
// printf("\n");
/*
char filename[] = "line000.png";
sprintf(filename,"line%03d.png",(int)(theta*(180.0/PI)));
cvSaveImage(filename,sobel);
*/
cvReleaseImageHeader(&castline);
cvReleaseImage(&sobel);
free(linebuf);
}
}
示例4: printf
void rspfOpenCVErodeFilter::runUcharTransformation(rspfImageData* tile)
{
IplImage *input;
IplImage *output;
char* bSrc;
char* bDst;
int nChannels = tile->getNumberOfBands();
for(int k=0; k<nChannels; k++) {
printf("Channel %d\n",k);
input=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
output=cvCreateImageHeader(cvSize(tile->getWidth(),tile->getHeight()),8,1);
bSrc = static_cast<char*>(tile->getBuf(k));
input->imageData=bSrc;
bDst = static_cast<char*>(theTile->getBuf(k));
output->imageData=bDst;
cvErode(input,output,NULL,theIterations); // a 3x3 rectangular structuring element is used
cvReleaseImageHeader(&input);
cvReleaseImageHeader(&output);
}
theTile->validate();
}
示例5: while
void BleWindowsCaptureSource::run()
{
// TODO make could select screen
// QGuiApplication::screens();
while (!m_stop) {
QElapsedTimer elapsedTimer;
elapsedTimer.start();
QScreen *screen = QGuiApplication::primaryScreen();
if (screen) {
QPixmap pixmap = screen->grabWindow(m_wid, m_x, m_y, m_width, m_height);
#if 1
// TODO to draw cursor to image
QRect desktopRect = QRect(QPoint(0, 0), screen->size());
if (desktopRect.contains(QCursor::pos())) {
drawCursor(&pixmap);
}
#endif
QImage image = pixmap.toImage();
m_modifyMutex.lock(); // Start lock
BleImage be;
be.width = image.width();
be.height = image.height();
int imageSize = be.width * be.height * 3;
be.data = new char[imageSize];
IplImage *oriImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 4);
cvSetData(oriImage, image.bits(), image.bytesPerLine());
IplImage *dstImage = cvCreateImageHeader(cvSize(image.width(), image.height()), IPL_DEPTH_8U, 3);
cvSetData(dstImage, be.data, be.width * 3);
cvCvtColor(oriImage, dstImage, CV_BGRA2BGR);
be.dataSize = imageSize;
be.format = BleImage_Format_BGR24;
m_image = be;
cvReleaseImageHeader(&oriImage);
cvReleaseImageHeader(&dstImage);
m_modifyMutex.unlock(); // End unlock
}
int elapsedMs = elapsedTimer.elapsed();
int needSleepMs = m_interval - elapsedMs;
if (needSleepMs < 0) {
needSleepMs = 0;
}
msleep(needSleepMs);
}
log_trace("BleWindowsCaptureSource exit normally.");
}
示例6: Q_UNUSED
void BleImageProcess::paintEvent(QPaintEvent *event)
{
Q_UNUSED(event);
QPainter p(this);
p.setRenderHint(QPainter::SmoothPixmapTransform);
// back ground
p.fillRect(rect(), QBrush(QColor(48, 48, 48)));
// element draw
for (int i = 0; i < m_sources.size(); ++i) {
const SourcePair & pair = m_sources.at(i);
BleSourceAbstract *s = pair.source;
// TODO image data may be used by other thread
BleImage image = s->getImage();
if (image.dataSize <= 0) continue;
QImage qimage;
if (image.format == BleImage_Format_BGR24) {
IplImage *oriImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
cvSetData(oriImage, image.data, image.width*3);
IplImage *dstImage = cvCreateImageHeader(cvSize(image.width, image.height), IPL_DEPTH_8U, 3);
cvSetData(dstImage, image.data, image.width*3);
cvCvtColor(oriImage, dstImage, CV_BGR2RGB);
cvReleaseImageHeader(&oriImage);
cvReleaseImageHeader(&dstImage);
}
qimage = QImage((uchar*)image.data, image.width, image.height, QImage::Format_RGB888);
p.drawPixmap(pair.rect, QPixmap::fromImage(qimage));
// p.drawImage(pair.rect, qimage);
}
if (m_activePair && m_activePair->rect.isValid()) {
QPen pen(Qt::SolidLine);
pen.setColor(Qt::white);
pen.setWidth(2);
pen.setStyle(Qt::DotLine);
p.setPen(pen);
p.drawRect(m_activePair->rect);
QRect topLeftRect(m_activePair->rect.x(), m_activePair->rect.y(), 8, 8);
p.fillRect(topLeftRect, QBrush(Qt::red));
QRect bottomRightRect(m_activePair->rect.bottomRight().x(), m_activePair->rect.bottomRight().y(), -8, -8);
p.fillRect(bottomRightRect, QBrush(Qt::red));
}
}
示例7: gst_opencv_video_filter_set_caps
static gboolean
gst_opencv_video_filter_set_caps (GstBaseTransform * trans, GstCaps * incaps,
GstCaps * outcaps)
{
GstOpencvVideoFilter *transform = GST_OPENCV_VIDEO_FILTER (trans);
GstOpencvVideoFilterClass *klass =
GST_OPENCV_VIDEO_FILTER_GET_CLASS (transform);
gint in_width, in_height;
gint in_depth, in_channels;
gint out_width, out_height;
gint out_depth, out_channels;
GError *in_err = NULL;
GError *out_err = NULL;
if (!gst_opencv_parse_iplimage_params_from_caps (incaps, &in_width,
&in_height, &in_depth, &in_channels, &in_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse input caps: %s",
in_err->message);
g_error_free (in_err);
return FALSE;
}
if (!gst_opencv_parse_iplimage_params_from_caps (outcaps, &out_width,
&out_height, &out_depth, &out_channels, &out_err)) {
GST_WARNING_OBJECT (transform, "Failed to parse output caps: %s",
out_err->message);
g_error_free (out_err);
return FALSE;
}
if (klass->cv_set_caps) {
if (!klass->cv_set_caps (transform, in_width, in_height, in_depth,
in_channels, out_width, out_height, out_depth, out_channels))
return FALSE;
}
if (transform->cvImage) {
cvReleaseImage (&transform->cvImage);
}
if (transform->out_cvImage) {
cvReleaseImage (&transform->out_cvImage);
}
transform->cvImage =
cvCreateImageHeader (cvSize (in_width, in_height), in_depth, in_channels);
transform->out_cvImage =
cvCreateImageHeader (cvSize (out_width, out_height), out_depth,
out_channels);
gst_base_transform_set_in_place (GST_BASE_TRANSFORM (transform),
transform->in_place);
return TRUE;
}
示例8: haarwrapper_flip
guint32 haarwrapper_flip(t_haarwrapper *hc, t_haarwrapper_image* im, t_haarwrapper_image* im2)
{
IplImage *img = cvCreateImageHeader(cvSize(im->width,im->height), IPL_DEPTH_8U, 3);
img->widthStep = im->rowbytes;
img->imageData = (char*)im->data[0];
IplImage *img2 = cvCreateImageHeader(cvSize(im2->width,im2->height), IPL_DEPTH_8U, 3);
img2->widthStep = im2->rowbytes;
img2->imageData = (char*)im2->data[0];
cvFlip(img, img2, 1);
return(0);
}
示例9: main
int main (int argc,char* argv[]){
if (argc != 2 && argc != 3){
printf("usage:\n %s /path/to/recoding/filename.oni\n",argv[0]);
return 0;
}
Xn_sensor sensor(WIDTH,HEIGHT);
sensor.play(argv[1],false);
cvNamedWindow( "Model Extractor Viewer", 1 );
IplImage* rgb_image = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
IplImage* test = cvCreateImageHeader(cvSize(WIDTH,HEIGHT), 8, 3);
IplImage* gray = cvCreateImage(cvSize(WIDTH,HEIGHT), 8, 1);
Mat img;
pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud (new pcl::PointCloud<pcl::PointXYZRGB>);
pcl::PointCloud<pcl::PointXYZRGB>::Ptr model (new pcl::PointCloud<pcl::PointXYZRGB>);
//pcl::visualization::CloudViewer viewer("Model Extractor Viewer");
//Read Fiducial from file
Fiducial fiducial("fiducial.yml");
Pose pose;
while(/*!viewer.wasStopped() && */!sensor.endPlaying()){
//Get the frame
sensor.update();
sensor.getPCL(cloud);
cvSetData(rgb_image,sensor.rgb,rgb_image->widthStep);
//Estimate Camera Pose from fiducial
cvCvtColor(rgb_image,gray,CV_BGR2GRAY);
if (fiducial.find(gray,true)){
pose.estimate(gray,fiducial);
//fiducial.draw(rgb_image);
}
if (pose.isFound()){
printf("Rotation");
printMat<double>(pose.getR());
printf("Translation");
printMat<double>(pose.getT());
//Segment volume around the fiducial
boxFilter(cloud,pose);
//Create 3D model
buildModel(cloud,model);
}
//viewer.showCloud (model);
}
pcl::io::savePCDFileBinary ("model.pcd", *model);
sensor.shutdown();
return 0;
}
示例10: main
int main(int argc, char* argv[])
{
printf("DUOLib Version: v%s\n", GetLibVersion());
// Open DUO camera and start capturing
if(!OpenDUOCamera(WIDTH, HEIGHT, FPS))
{
printf("Could not open DUO camera\n");
return 0;
}
// Create OpenCV windows
cvNamedWindow("Left");
cvNamedWindow("Right");
// Set exposure and LED brightness
SetExposure(50);
SetLed(25);
// Create image headers for left & right frames
IplImage *left = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
IplImage *right = cvCreateImageHeader(cvSize(WIDTH, HEIGHT), IPL_DEPTH_8U, 1);
// Run capture loop until <Esc> key is pressed
while((cvWaitKey(1) & 0xff) != 27)
{
// Capture DUO frame
PDUOFrame pFrameData = GetDUOFrame();
if(pFrameData == NULL) continue;
// Set the image data
left->imageData = (char*)pFrameData->leftData;
right->imageData = (char*)pFrameData->rightData;
// Process images here (optional)
// Display images
cvShowImage("Left", left);
cvShowImage("Right", right);
}
// Release image headers
cvReleaseImageHeader(&left);
cvReleaseImageHeader(&right);
// Close DUO camera
CloseDUOCamera();
return 0;
}
示例11: gst_buffer_get_caps
//
// decode buffer
//
IplImage * CvCapture_GStreamer::retrieveFrame(int)
{
if(!buffer)
return false;
if(!frame) {
gint height, width;
GstCaps *buff_caps = gst_buffer_get_caps(buffer);
assert(gst_caps_get_size(buff_caps) == 1);
GstStructure* structure = gst_caps_get_structure(buff_caps, 0);
if(!gst_structure_get_int(structure, "width", &width) ||
!gst_structure_get_int(structure, "height", &height))
return false;
frame = cvCreateImageHeader(cvSize(width, height), IPL_DEPTH_8U, 3);
gst_caps_unref(buff_caps);
}
// no need to memcpy, just use gstreamer's buffer :-)
frame->imageData = (char *)GST_BUFFER_DATA(buffer);
//memcpy (frame->imageData, GST_BUFFER_DATA(buffer), GST_BUFFER_SIZE (buffer));
//gst_buffer_unref(buffer);
//buffer = 0;
return frame;
}
示例12: lk
bool HiwrCameraControllerNodelet::copyRead(){
std::unique_lock<std::mutex> lk(mutex_);
if(!lk.owns_lock()){
try {
lk.lock();
} catch(const std::system_error& e) {
std::cout << "coin2 Caught system_error with code " << e.code()
<< " meaning " << e.what() << '\n';
}
}
if(!new_frame_){
waiter_.wait(lk);
}
image_ipl_ = cvCreateImageHeader(cvSize(config_width_ ,config_height_), 8, 1);
dealMemory();
const int total = config_width_*config_height_*2;
if(total!=bytes_used_)
return false;
int j=0;
for(int i=0; i< total; j++){
final_[j]=frame_[i];
i+=2;
};
new_frame_=false;
lk.unlock();
return true;
}
示例13: cvCreateImageHeader
bool JpegSaver::saveNUimageAsJpeg(NUimage* image, const std::string& pFileName)
{
unsigned char r,g,b;
char* bgrBuffer = new char[image->width() * image->height() * 3];
int bufferIndex = 0;
pixels::Pixel temp;
for(int y = 0; y < image->height(); y++)
{
for (int x = 0; x < image->width(); x++)
{
temp = image->image[y][x];
ColorModelConversions::fromYCbCrToRGB(temp.y,temp.cb,temp.cr,r,g,b);
bgrBuffer[bufferIndex++] = b;
bgrBuffer[bufferIndex++] = g;
bgrBuffer[bufferIndex++] = r;
}
}
IplImage* fIplImageHeader;
fIplImageHeader = cvCreateImageHeader(cvSize(image->width(), image->height()), 8, 3);
fIplImageHeader->imageData = bgrBuffer;
cvSaveImage(pFileName.c_str(),fIplImageHeader);
if (fIplImageHeader)
{
cvReleaseImageHeader(&fIplImageHeader);
}
delete [] bgrBuffer;
return true;
}
示例14: gst_motiondetect_set_caps
static gboolean
gst_motiondetect_set_caps (GstBaseTransform *trans, GstCaps *incaps,
GstCaps *outcaps)
{
gint width, height, depth, ipldepth, channels;
GError *err = NULL;
GstStructure *structure = gst_caps_get_structure (incaps, 0);
StbtMotionDetect *filter = GST_MOTIONDETECT (trans);
if (!filter) {
return FALSE;
}
if (!gst_structure_get_int (structure, "width", &width) ||
!gst_structure_get_int (structure, "height", &height) ||
!gst_structure_get_int (structure, "depth", &depth)) {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"No width/height/depth in caps");
return FALSE;
}
if (gst_structure_has_name (structure, "video/x-raw-rgb")) {
channels = 3;
} else if (gst_structure_has_name (structure, "video/x-raw-gray")) {
channels = 1;
} else {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"Unsupported caps %s", gst_structure_get_name (structure));
return FALSE;
}
if (depth / channels == 8) {
ipldepth = IPL_DEPTH_8U;
} else {
g_set_error (&err, GST_CORE_ERROR, GST_CORE_ERROR_NEGOTIATION,
"Unsupported depth/channels %d/%d", depth, channels);
return FALSE;
}
if (filter->cvCurrentImage) {
cvReleaseImageHeader (&filter->cvCurrentImage);
filter->cvCurrentImage = NULL;
}
if (filter->cvReferenceImageGray) {
cvReleaseImage (&filter->cvReferenceImageGray);
filter->cvReferenceImageGray = NULL;
}
if (filter->cvCurrentImageGray) {
cvReleaseImage (&filter->cvCurrentImageGray);
filter->cvCurrentImageGray = NULL;
}
filter->cvCurrentImage =
cvCreateImageHeader (cvSize (width, height), ipldepth, channels);
filter->cvReferenceImageGray = cvCreateImage(
cvSize (width, height), IPL_DEPTH_8U, 1);
filter->cvCurrentImageGray = cvCreateImage(
cvSize (width, height), IPL_DEPTH_8U, 1);
filter->state = MOTION_DETECT_STATE_ACQUIRING_REFERENCE_IMAGE;
return gst_motiondetect_check_mask_compability(filter);
}
示例15: memcpy
// --------------------------------------------------------------------------
//! @brief Get an image from the AR.Drone's camera.
//! @return An OpenCV image data (IplImage or cv::Mat)
//! @retval NULL Failure
// --------------------------------------------------------------------------
ARDRONE_IMAGE ARDrone::getImage(void)
{
// There is no image
if (!img) return ARDRONE_IMAGE(NULL);
// Enable mutex lock
if (mutexVideo) pthread_mutex_lock(mutexVideo);
// AR.Drone 2.0
if (version.major == ARDRONE_VERSION_2) {
// Copy current frame to an IplImage
memcpy(img->imageData, pFrameBGR->data[0], pCodecCtx->width * ((pCodecCtx->height == 368) ? 360 : pCodecCtx->height) * sizeof(uint8_t) * 3);
}
// AR.Drone 1.0
else {
// If the sizes of the buffer and the IplImage are differnt
if (pCodecCtx->width != img->width || pCodecCtx->height != img->height) {
// Resize the image to 320x240
IplImage *small_img = cvCreateImageHeader(cvSize(pCodecCtx->width, pCodecCtx->height), IPL_DEPTH_8U, 3);
small_img->imageData = (char*)bufferBGR;
cvResize(small_img, img, CV_INTER_CUBIC);
cvReleaseImageHeader(&small_img);
}
// For 320x240 image, just copy it
else memcpy(img->imageData, bufferBGR, pCodecCtx->width * pCodecCtx->height * sizeof(uint8_t) * 3);
}
// The latest image has been read, so change newImage accordingly
newImage = false;
// Disable mutex lock
if (mutexVideo) pthread_mutex_unlock(mutexVideo);
return ARDRONE_IMAGE(img);
}