本文整理汇总了C++中cv::Mat::create方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::create方法的具体用法?C++ Mat::create怎么用?C++ Mat::create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: load
void load(Archive &ar, cv::Mat &mat)
{
int rows, cols, type;
bool continuous;
ar &rows &cols &type &continuous;
if (continuous)
{
mat.create(rows, cols, type);
const int data_size = rows * cols * static_cast<int>(mat.elemSize());
auto mat_data = cereal::binary_data(mat.ptr(), data_size);
ar &mat_data;
}
else
{
mat.create(rows, cols, type);
const int row_size = cols * static_cast<int>(mat.elemSize());
for (int i = 0; i < rows; i++)
{
auto row_data = cereal::binary_data(mat.ptr(i), row_size);
ar &row_data;
}
}
}
示例2: fillSamples
bool TrainProcessor::fillSamples(const std::string &inputName, cv::Mat &samples, cv::Mat &categories) {
FILE* f = fopen(inputName.c_str(), "rt");
if (f == 0) {
return false;
}
cv::Mat image, gray, resized, resizedFloat;
std::vector<cv::Mat> imagesVector;
std::vector<char> categoriesVector;
char buf[1000];
while (fgets(buf, 1000, f)) {
int len = (int) strlen(buf);
while (len > 0 && isspace(buf[len-1])) {
len--;
}
buf[len] = '\0';
std::string imageName(buf);
char cat = category(imageName);
std::cout << "file " << imageName.c_str() << std::endl;
if (cat == '\0') {
std::cout << "WARNING: no category detected" << std::endl;
std::cout << std::endl;
continue;
} else if (myClassesList.find(cat) == std::string::npos) {
std::cout << "WARNING: unknown category detected" << std::endl;
std::cout << std::endl;
continue;
}
image = cv::imread(imageName, 1);
if (!image.empty()) {
cv::cvtColor(image, gray, CV_BGR2GRAY);
cv::resize(gray, resized, myFaceSize, 0, 0, cv::INTER_LINEAR);
cv::equalizeHist(resized, resized);
resized.convertTo(resizedFloat, CV_32FC1, 1.0 / 255);
imagesVector.push_back(cv::Mat());
cv::Mat &vec = imagesVector.back();
resizedFloat.reshape(0, 1).copyTo(vec);
categoriesVector.push_back(cat);
} else {
std::cout << "WARNING: unable to read file" << std::endl;
std::cout << std::endl;
}
}
fclose(f);
samples.create(imagesVector.size(), myFaceSize.width * myFaceSize.height, CV_32FC1);
categories.create(imagesVector.size(), 1, CV_8UC1);
for (size_t i = 0; i < imagesVector.size(); ++i) {
cv::Mat rowi = samples.row(i);
imagesVector[i].copyTo(rowi);
categories.at<unsigned char>(i, 0) = categoriesVector.at(i);
}
return true;
}
示例3: pixel
void Retina::_convertValarrayGrayBuffer2cvMat(const std::valarray<double> &grayMatrixToConvert, const unsigned int nbRows, const unsigned int nbColumns, const bool colorMode, cv::Mat &outBuffer)
{
// fill output buffer with the valarray buffer
const double *valarrayPTR=get_data(grayMatrixToConvert);
if (!colorMode)
{
outBuffer.create(cv::Size(nbColumns, nbRows), CV_8U);
for (unsigned int i=0;i<nbRows;++i)
{
for (unsigned int j=0;j<nbColumns;++j)
{
cv::Point2d pixel(j,i);
outBuffer.at<unsigned char>(pixel)=(unsigned char)*(valarrayPTR++);
}
}
}else
{
const unsigned int doubleNBpixels=_retinaFilter->getOutputNBpixels()*2;
outBuffer.create(cv::Size(nbColumns, nbRows), CV_8UC3);
for (unsigned int i=0;i<nbRows;++i)
{
for (unsigned int j=0;j<nbColumns;++j,++valarrayPTR)
{
cv::Point2d pixel(j,i);
cv::Vec3b pixelValues;
pixelValues[2]=(unsigned char)*(valarrayPTR);
pixelValues[1]=(unsigned char)*(valarrayPTR+_retinaFilter->getOutputNBpixels());
pixelValues[0]=(unsigned char)*(valarrayPTR+doubleNBpixels);
outBuffer.at<cv::Vec3b>(pixel)=pixelValues;
}
}
}
}
示例4:
static inline void lbsp_computeImpl(const cv::Mat& oInputImg, const cv::Mat& oRefImg, const std::vector<cv::KeyPoint>& voKeyPoints,
cv::Mat& oDesc, bool bSingleColumnDesc, size_t nThreshold) {
static_assert(LBSP::DESC_SIZE==2,"bad assumptions in impl below");
CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
const size_t nChannels = (size_t)oInputImg.channels();
const cv::Mat& oRefMat = oRefImg.empty()?oInputImg:oRefImg;
CV_DbgAssert(oInputImg.isContinuous() && oRefMat.isContinuous());
const size_t nKeyPoints = voKeyPoints.size();
if(nChannels==1) {
if(bSingleColumnDesc)
oDesc.create((int)nKeyPoints,1,CV_16UC1);
else
oDesc.create(oInputImg.size(),CV_16UC1);
for(size_t k=0; k<nKeyPoints; ++k) {
const int x = (int)voKeyPoints[k].pt.x;
const int y = (int)voKeyPoints[k].pt.y;
LBSP::computeDescriptor<1>(oInputImg,oRefMat.at<uchar>(y,x),x,y,0,nThreshold,oDesc.at<ushort>((int)k));
}
return;
}
else { //nChannels==3
if(bSingleColumnDesc)
oDesc.create((int)nKeyPoints,1,CV_16UC3);
else
oDesc.create(oInputImg.size(),CV_16UC3);
const std::array<size_t,3> anThreshold = {nThreshold,nThreshold,nThreshold};
for(size_t k=0; k<nKeyPoints; ++k) {
const int x = (int)voKeyPoints[k].pt.x;
const int y = (int)voKeyPoints[k].pt.y;
const uchar* acRef = oRefMat.data+oInputImg.step.p[0]*y+oInputImg.step.p[1]*x;
LBSP::computeDescriptor(oInputImg,acRef,x,y,anThreshold,((ushort*)(oDesc.data+oDesc.step.p[0]*k)));
}
}
}
示例5: convertPclMessageToMat
unsigned long HeadDetectorNode::convertPclMessageToMat(const sensor_msgs::PointCloud2::ConstPtr& pointcloud, cv::Mat& depth_image, cv::Mat& color_image)
{
pcl::PointCloud<pcl::PointXYZRGB> depth_cloud; // point cloud
pcl::fromROSMsg(*pointcloud, depth_cloud);
depth_image.create(depth_cloud.height, depth_cloud.width, CV_32FC3);
color_image.create(depth_cloud.height, depth_cloud.width, CV_8UC3);
uchar* depth_image_ptr = (uchar*) depth_image.data;
uchar* color_image_ptr = (uchar*) color_image.data;
for (int v=0; v<(int)depth_cloud.height; v++)
{
int depth_base_index = depth_image.step*v;
int color_base_index = color_image.step*v;
for (int u=0; u<(int)depth_cloud.width; u++)
{
int depth_index = depth_base_index + 3*u*sizeof(float);
float* depth_data_ptr = (float*)(depth_image_ptr+depth_index);
int color_index = color_base_index + 3*u*sizeof(uchar);
uchar* color_data_ptr = (uchar*)(color_image_ptr+color_index);
pcl::PointXYZRGB point_xyz = depth_cloud(u,v);
depth_data_ptr[0] = point_xyz.x;
depth_data_ptr[1] = point_xyz.y;
depth_data_ptr[2] = (isnan(point_xyz.z)) ? 0.f : point_xyz.z;
color_data_ptr[0] = point_xyz.r;
color_data_ptr[1] = point_xyz.g;
color_data_ptr[2] = point_xyz.b;
//if (u%100 == 0) std::cout << "u" << u << " v" << v << " z" << data_ptr[2] << "\n";
}
}
return ipa_Utils::RET_OK;
}
示例6: CreateMap
// create a map for cv::remap()
void CreateMap(const cv::Mat& TriangleMap, const std::vector<cv::Mat>& HomMatrices, cv::Mat& map_x, cv::Mat& map_y)
{
assert(TriangleMap.type() == CV_32SC1);
// Allocate cv::Mat for the map
map_x.create(TriangleMap.size(), CV_32FC1);
map_y.create(TriangleMap.size(), CV_32FC1);
// Compute inverse matrices
std::vector<cv::Mat> invHomMatrices(HomMatrices.size());
for(int i=0; i<HomMatrices.size(); i++){
invHomMatrices[i] = HomMatrices[i].inv();
}
for(int y=0; y<TriangleMap.rows; y++){
for(int x=0; x<TriangleMap.cols; x++){
int idx = TriangleMap.at<int>(y,x)-1;
if(idx >= 0){
cv::Mat H = invHomMatrices[TriangleMap.at<int>(y,x)-1];
float z = H.at<float>(2,0) * x + H.at<float>(2,1) * y + H.at<float>(2,2);
if(z==0)
z = 0.00001;
map_x.at<float>(y,x) = (H.at<float>(0,0) * x + H.at<float>(0,1) * y + H.at<float>(0,2)) / z;
map_y.at<float>(y,x) = (H.at<float>(1,0) * x + H.at<float>(1,1) * y + H.at<float>(1,2)) / z;
}
else{
map_x.at<float>(y,x) = x;
map_y.at<float>(y,x) = y;
}
}
}
}
示例7: prepareCacheForYUV
//Attention: this method should be called inside pthread_mutex_lock(m_nextFrameMutex) only
void CvCapture_Android::prepareCacheForYUV(int width, int height)
{
if (width != m_width || height != m_height)
{
LOGD("CvCapture_Android::prepareCacheForYUV: Changing size of buffers: from width=%d height=%d to width=%d height=%d", m_width, m_height, width, height);
m_width = width;
m_height = height;
/*
unsigned char *tmp = m_frameYUV420next;
m_frameYUV420next = new unsigned char [width * height * 3 / 2];
if (tmp != NULL)
{
delete[] tmp;
}
tmp = m_frameYUV420;
m_frameYUV420 = new unsigned char [width * height * 3 / 2];
if (tmp != NULL)
{
delete[] tmp;
}*/
m_frameYUV420.create(height * 3 / 2, width, CV_8UC1);
m_frameYUV420next.create(height * 3 / 2, width, CV_8UC1);
}
}
示例8: gen_dgauss
static void gen_dgauss(double sigma, cv::Mat& GX, cv::Mat& GY)
{
int f_wid = 4 * cvCeil(sigma) + 1;
cv::Mat kernel_separate = cv::getGaussianKernel(f_wid, sigma, CV_64F);
cv::Mat kernel = kernel_separate * kernel_separate.t();
GX.create(kernel.size(), kernel.type());
GY.create(kernel.size(), kernel.type());
for (int r = 0; r < kernel.rows; ++r) {
for (int c = 0; c < kernel.cols; ++c) {
if (c == 0) {
GX.at<double>(r, c) = kernel.at<double>(r, c + 1) - kernel.at<double>(r, c);
}
else if (c == kernel.cols - 1) {
GX.at<double>(r, c) = kernel.at<double>(r, c) - kernel.at<double>(r, c - 1);
}
else {
GX.at<double>(r, c) = (kernel.at<double>(r, c + 1) -
kernel.at<double>(r, c - 1)) / 2;
}
if (r == 0) {
GY.at<double>(r, c) = kernel.at<double>(r + 1, c) - kernel.at<double>(r, c);
}
else if (r == kernel.rows - 1) {
GY.at<double>(r, c) = kernel.at<double>(r, c) - kernel.at<double>(r - 1, c);
}
else {
GY.at<double>(r, c) = (kernel.at<double>(r + 1, c) -
kernel.at<double>(r - 1, c)) / 2;
}
}
}
GX = GX * 2 / cv::sum(cv::abs(GX))[0];
GY = GY * 2 / cv::sum(cv::abs(GY))[0];
}
示例9: minv
void Cost::minv(float* _data, cv::Mat& _minIndex, cv::Mat& _minValue){
assert(_minIndex.type() == CV_32SC1);
int r = rows;
int c = cols;
int l = layers;
_minIndex.create(rows,cols,CV_32SC1);
_minValue.create(rows,cols,CV_32FC1);
float* data = (float*)( _data);
int* minIndex = (int*)(_minIndex.data);
float* minValue = (float*)(_minValue.data);
for(int i = 0, id = 0; i < r*c; i++){//i is offset in 2d, id is offset in 3d
//first element is min so far
int mi = 0;
float mv = data[id];
id++;
for (int il = 1; il < l; il++, id++){ //il is layer index
float v = data[id];
if(mv > v){
mi = il;
mv = v;
}
}
minIndex[i] = mi;
minValue[i] = mv;
}
}
示例10: globalMattingHelper
static void globalMattingHelper(cv::Mat _image, cv::Mat _trimap, cv::Mat &_foreground, cv::Mat &_alpha, cv::Mat &_conf)
{
const cv::Mat_<cv::Vec3b> &image = (const cv::Mat_<cv::Vec3b>&)_image;
const cv::Mat_<uchar> &trimap = (const cv::Mat_<uchar>&)_trimap;
std::vector<cv::Point> foregroundBoundary = findBoundaryPixels(trimap, 255, 128);
std::vector<cv::Point> backgroundBoundary = findBoundaryPixels(trimap, 0, 128);
int n = (int)(foregroundBoundary.size() + backgroundBoundary.size());
for (int i = 0; i < n; ++i)
{
int x = rand() % trimap.cols;
int y = rand() % trimap.rows;
if (trimap(y, x) == 0)
backgroundBoundary.push_back(cv::Point(x, y));
else if (trimap(y, x) == 255)
foregroundBoundary.push_back(cv::Point(x, y));
}
std::sort(foregroundBoundary.begin(), foregroundBoundary.end(), IntensityComp(image));
std::sort(backgroundBoundary.begin(), backgroundBoundary.end(), IntensityComp(image));
std::vector<std::vector<Sample> > samples;
calculateAlphaPatchMatch(image, trimap, foregroundBoundary, backgroundBoundary, samples);
_foreground.create(image.size(), CV_8UC3);
_alpha.create(image.size(), CV_8UC1);
_conf.create(image.size(), CV_8UC1);
cv::Mat_<cv::Vec3b> &foreground = (cv::Mat_<cv::Vec3b>&)_foreground;
cv::Mat_<uchar> &alpha = (cv::Mat_<uchar>&)_alpha;
cv::Mat_<uchar> &conf = (cv::Mat_<uchar>&)_conf;
for (int y = 0; y < alpha.rows; ++y)
for (int x = 0; x < alpha.cols; ++x)
{
switch (trimap(y, x))
{
case 0:
alpha(y, x) = 0;
conf(y, x) = 255;
foreground(y, x) = 0;
break;
case 128:
{
alpha(y, x) = 255 * samples[y][x].alpha;
conf(y, x) = 255 * exp(-samples[y][x].cost / 6);
cv::Point p = foregroundBoundary[samples[y][x].fi];
foreground(y, x) = image(p.y, p.x);
break;
}
case 255:
alpha(y, x) = 255;
conf(y, x) = 255;
foreground(y, x) = image(y, x);
break;
}
}
}
示例11: reshapeDesc
void LBSP::reshapeDesc(cv::Size oSize, const std::vector<cv::KeyPoint>& voKeypoints, const cv::Mat& oDescriptors, cv::Mat& oOutput) {
static_assert(LBSP::DESC_SIZE==2,"bad assumptions in impl below");
CV_DbgAssert(!voKeypoints.empty());
CV_DbgAssert(!oDescriptors.empty() && oDescriptors.cols==1);
CV_DbgAssert(oSize.width>0 && oSize.height>0);
CV_DbgAssert(oDescriptors.type()==CV_16UC1 || oDescriptors.type()==CV_16UC3);
const size_t nChannels = (size_t)oDescriptors.channels();
const size_t nKeyPoints = voKeypoints.size();
if(nChannels==1) {
oOutput.create(oSize,CV_16UC1);
oOutput = cv::Scalar_<ushort>(0);
for(size_t k=0; k<nKeyPoints; ++k)
oOutput.at<ushort>(voKeypoints[k].pt) = oDescriptors.at<ushort>((int)k);
}
else { //nChannels==3
oOutput.create(oSize,CV_16UC3);
oOutput = cv::Scalar_<ushort>(0,0,0);
for(size_t k=0; k<nKeyPoints; ++k) {
ushort* output_ptr = (ushort*)(oOutput.data + oOutput.step.p[0]*(int)voKeypoints[k].pt.y);
const ushort* const desc_ptr = (ushort*)(oDescriptors.data + oDescriptors.step.p[0]*k);
const size_t idx = 3*(int)voKeypoints[k].pt.x;
for(size_t n=0; n<3; ++n)
output_ptr[idx+n] = desc_ptr[n];
}
}
}
示例12:
static inline void lbsp_computeImpl( const cv::Mat& oInputImg,
const cv::Mat& oRefImg,
const std::vector<cv::KeyPoint>& voKeyPoints,
cv::Mat& oDesc,
size_t _t) {
CV_DbgAssert(oRefImg.empty() || (oRefImg.size==oInputImg.size && oRefImg.type()==oInputImg.type()));
CV_DbgAssert(oInputImg.type()==CV_8UC1 || oInputImg.type()==CV_8UC3);
CV_DbgAssert(LBSP::DESC_SIZE==2); // @@@ also relies on a constant desc size
const size_t nChannels = (size_t)oInputImg.channels();
const size_t _step_row = oInputImg.step.p[0];
const uchar* _data = oInputImg.data;
const uchar* _refdata = oRefImg.empty()?oInputImg.data:oRefImg.data;
const size_t nKeyPoints = voKeyPoints.size();
if(nChannels==1) {
oDesc.create((int)nKeyPoints,1,CV_16UC1);
for(size_t k=0; k<nKeyPoints; ++k) {
const int _x = (int)voKeyPoints[k].pt.x;
const int _y = (int)voKeyPoints[k].pt.y;
const uchar _ref = _refdata[_step_row*(_y)+_x];
ushort& _res = oDesc.at<ushort>((int)k);
#include "LBSP_16bits_dbcross_1ch.i"
}
}
else { //nChannels==3
oDesc.create((int)nKeyPoints,1,CV_16UC3);
for(size_t k=0; k<nKeyPoints; ++k) {
const int _x = (int)voKeyPoints[k].pt.x;
const int _y = (int)voKeyPoints[k].pt.y;
const uchar* _ref = _refdata+_step_row*(_y)+3*(_x);
ushort* _res = ((ushort*)(oDesc.data + oDesc.step.p[0]*k));
#include "LBSP_16bits_dbcross_3ch1t.i"
}
}
}
示例13: test2main
/** @函数 main */
int test2main()
{
/// 装载图像
src = imread("/Users/sky/Downloads/daily_delete/aaa/Movie12347.MOV_MYIMG_ORI0.JPG");
myhist(&src);
// myback();
/// 等待用户反应
// waitKey(0);
// return 0;
if( !src.data )
{ return -1; }
cv::Mat threshold_output;
/// 创建与src同类型和大小的矩阵(dst)
dst.create( src.size(), src.type() );
cvtColor( src, hsv, CV_BGR2HSV );
/// 分离 Hue 通道
hue.create( hsv.size(), hsv.depth() );
int ch[] = { 0, 0 };
mixChannels( &hsv, 1, &hue, 1, ch, 1 );
/// 原图像转换为灰度图像
// cvtColor( hsv, src_gray, CV _BGR2GRAY );
// for (int i = 0; i < 255; ++i) {
// threshold(hue, threshold_output, i, 255, THRESH_BINARY );
// int aa = i;
// stringstream ss;
// ss<<aa;
// string s1 = ss.str();
// cout<<s1<<endl; // 30
//
// string s2 = "/Users/sky/Downloads/daily_delete/Gray_Image" + s1 + ".jpg";
// ss>>s2;
// cout<<s2<<endl; // 30
// imwrite("/Users/sky/Downloads/daily_delete/Gray_Image" + s1 + ".jpg", threshold_output);
// }
// threshold(hue, threshold_output, 0, 255, THRESH_OTSU );
// imshow("calcHist Demo", threshold_output );
/// 创建显示窗口
namedWindow( window_name, CV_WINDOW_AUTOSIZE );
// imshow("abc", threshold_output);
/// 创建trackbar
createTrackbar( "Min Threshold:", window_name, &lowThreshold, max_lowThreshold, CannyThreshold );
/// 显示图像
CannyThreshold(0, 0);
/// 等待用户反应
waitKey(0);
return 0;
}
示例14:
opt_person_recognition()
{
Scale[0]=0.5, Scale[1]=0.75, Scale[2]=1;
Sigma.create(1, 1, CV_64FC1), Sigma.at<double>(0, 0)=0.6;
Sigma_edge.create(1, 1, CV_64FC1), Sigma_edge.at<double>(0,0)=1;
}
示例15: fromMsg
void Conversions::fromMsg(const std::vector<graph_slam_msgs::SensorData> &sensor_data, cv::Mat &features)
{
// Count feature data points. Assumption: all have the same feature type.
int feature_type = 0;
int feature_count = 0;
int descriptor_size = 0;
for (auto data : sensor_data) {
if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
feature_type = data.features.descriptor_type;
if (data.features.features.size() > 0) {
descriptor_size = data.features.features[0].descriptor.size();
feature_count += data.features.features.size();
}
}
}
if (feature_count > 0) {
int k = 0;
switch (feature_type) {
case graph_slam_msgs::Features::BRIEF:
case graph_slam_msgs::Features::ORB:
case graph_slam_msgs::Features::BRISK:
case graph_slam_msgs::Features::FREAK:
features.create(feature_count, descriptor_size, CV_8U);
for (auto data : sensor_data) {
if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
for (unsigned int i = 0; i < data.features.features.size(); i++) {
for (int j = 0; j < descriptor_size; j++) {
float val = data.features.features[i].descriptor[j];
features.at<unsigned char>(k,j) = (unsigned char)val;
}
k++;
}
}
}
break;
case graph_slam_msgs::Features::SURF:
case graph_slam_msgs::Features::SIFT:
features.create(feature_count, descriptor_size, CV_32F);
for (auto data : sensor_data) {
if (data.sensor_type == graph_slam_msgs::SensorData::SENSOR_TYPE_FEATURE) {
for (unsigned int i = 0; i < data.features.features.size(); i++) {
for (int j = 0; j < descriptor_size; j++) {
features.at<float>(k,j) = data.features.features[i].descriptor[j];
}
k++;
}
}
}
break;
default:
ROS_ERROR_NAMED("feature_link_estimation", "unknown feature type: %d (LE)", feature_type);
break;
}
}
}