本文整理汇总了C++中cv::Mat::elemSize1方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::elemSize1方法的具体用法?C++ Mat::elemSize1怎么用?C++ Mat::elemSize1使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::elemSize1方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: get_array_data_arrangement
// Mat
bool get_array_data_arrangement(cv::Mat const &inst, sdcpp::array_data_arrangement &result)
{
if(inst.empty()) return false;
result.item_size = inst.elemSize1();
result.total_size = inst.rows*inst.step;
if(inst.channels() > 1)
{
result.ndim = 3;
result.size.resize(3);
result.stride.resize(3);
result.size[2] = inst.channels();
result.stride[2] = inst.elemSize1();
}
else
{
result.ndim = 2;
result.size.resize(2);
result.stride.resize(2);
}
result.size[1] = inst.cols;
result.stride[1] = inst.elemSize();
result.size[0] = inst.rows;
result.stride[0] = inst.step;
return true;
}
示例2: type
void cv::ocl::oclMat::download(cv::Mat &m) const
{
CV_DbgAssert(!this->empty());
m.create(wholerows, wholecols, type());
if(m.channels() == 3)
{
int pitch = wholecols * 3 * m.elemSize1();
int tail_padding = m.elemSize1() * 3072;
int err;
cl_mem temp = clCreateBuffer(*(cl_context*)clCxt->getOpenCLContextPtr(), CL_MEM_READ_WRITE,
(pitch * wholerows + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
openCLVerifyCall(err);
convert_C4C3(*this, temp);
openCLMemcpy2D(clCxt, m.data, m.step, temp, pitch, wholecols * m.elemSize(), wholerows, clMemcpyDeviceToHost, 3);
openCLSafeCall(clReleaseMemObject(temp));
}
else
{
openCLMemcpy2D(clCxt, m.data, m.step, data, step, wholecols * elemSize(), wholerows, clMemcpyDeviceToHost);
}
Size wholesize;
Point ofs;
locateROI(wholesize, ofs);
m.adjustROI(-ofs.y, ofs.y + rows - wholerows, -ofs.x, ofs.x + cols - wholecols);
}
示例3: type
void cv::ocl::oclMat::download(cv::Mat &m) const
{
CV_DbgAssert(!this->empty());
// int t = type();
// if(download_channels == 3)
//{
// t = CV_MAKETYPE(depth(), 3);
//}
m.create(wholerows, wholecols, type());
if(m.channels() == 3)
{
int pitch = wholecols * 3 * m.elemSize1();
int tail_padding = m.elemSize1() * 3072;
int err;
cl_mem temp = clCreateBuffer((cl_context)clCxt->oclContext(), CL_MEM_READ_WRITE,
(pitch * wholerows + tail_padding - 1) / tail_padding * tail_padding, 0, &err);
openCLVerifyCall(err);
convert_C4C3(*this, temp);
openCLMemcpy2D(clCxt, m.data, m.step, temp, pitch, wholecols * m.elemSize(), wholerows, clMemcpyDeviceToHost, 3);
//int* cputemp=new int[wholecols*wholerows * 3];
//int* cpudata=new int[this->step*this->wholerows/sizeof(int)];
//openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, temp, CL_TRUE,
// 0, wholecols*wholerows * 3* sizeof(int), cputemp, 0, NULL, NULL));
//openCLSafeCall(clEnqueueReadBuffer(clCxt->impl->clCmdQueue, (cl_mem)data, CL_TRUE,
// 0, this->step*this->wholerows, cpudata, 0, NULL, NULL));
//for(int i=0;i<wholerows;i++)
//{
// int *a = cputemp+i*wholecols * 3,*b = cpudata + i*this->step/sizeof(int);
// for(int j=0;j<wholecols;j++)
// {
// if((a[3*j] != b[4*j])||(a[3*j+1] != b[4*j+1])||(a[3*j+2] != b[4*j+2]))
// printf("rows=%d,cols=%d,cputtemp=%d,%d,%d;cpudata=%d,%d,%d\n",
// i,j,a[3*j],a[3*j+1],a[3*j+2],b[4*j],b[4*j+1],b[4*j+2]);
// }
//}
//delete []cputemp;
//delete []cpudata;
openCLSafeCall(clReleaseMemObject(temp));
}
else
{
openCLMemcpy2D(clCxt, m.data, m.step, data, step, wholecols * elemSize(), wholerows, clMemcpyDeviceToHost);
}
Size wholesize;
Point ofs;
locateROI(wholesize, ofs);
m.adjustROI(-ofs.y, ofs.y + rows - wholerows, -ofs.x, ofs.x + cols - wholecols);
}
示例4: showPropertiesOfMat
//
// showPropertiesOfMat
//
// ...displays all properties of specified Mat.
//
void showPropertiesOfMat (const cv::Mat &src_mat)
{
// 行数
std::cout << "rows:" << src_mat.rows <<std::endl;
// 列数
std::cout << "cols:" << src_mat.cols << std::endl;
// 次元数
std::cout << "dims:" << src_mat.dims << std::endl;
// サイズ(2次元の場合)
std::cout << "size[]:" << src_mat.size().width << "," << src_mat.size().height << "[byte]" << std::endl;
// ビット深度ID
std::cout << "depth (ID):" << src_mat.depth() << "(=" << CV_64F << ")" << std::endl;
// チャンネル数
std::cout << "channels:" << src_mat.channels() << std::endl;
// 1要素内の1チャンネル分のサイズ [バイト単位]
std::cout << "elemSize1 (elemSize/channels):" << src_mat.elemSize1() << "[byte]" << std::endl;
// 要素の総数
std::cout << "total:" << src_mat.total() << std::endl;
// ステップ数 [バイト単位]
std::cout << "step:" << src_mat.step << "[byte]" << std::endl;
// 1ステップ内のチャンネル総数
std::cout << "step1 (step/elemSize1):" << src_mat.step1() << std::endl;
// データは連続か?
std::cout << "isContinuous:" << (src_mat.isContinuous()?"true":"false") << std::endl;
// 部分行列か?
std::cout << "isSubmatrix:" << (src_mat.isSubmatrix()?"true":"false") << std::endl;
// データは空か?
std::cout << "empty:" << (src_mat.empty()?"true":"false") << std::endl;
}
示例5: describe
std::string describe(cv::Mat const &mat)
{
std::ostringstream out;
out << mat.rows << "x" << mat.cols;
out << ", " << mat.channels() << " channels";
out << ", type " << CV_MAT_DEPTH(mat.type()) << ", " << mat.elemSize1() << " byte elemsize1, " << mat.elemSize() << " byte elemsize";
return out.str();
}
示例6: Mat_cheaker
void Mat_cheaker(cv::Mat src_im){
std::cout << "rows: " << src_im.rows << std::endl;
std::cout << "cols: " << src_im.cols << std::endl;
std::cout << "chanel: " << src_im.channels() << std::endl;
std::cout << "step: " << src_im.step1() << std::endl;
std::cout << "dims: " << src_im.dims << std::endl;
std::cout << "elemesize: " << src_im.elemSize1() << std::endl;
}
示例7: if
void ZeroBorderImageTransformer<Dtype>::Transform(const cv::Mat& in, cv::Mat& out) {
const int in_channels = in.channels();
const int in_height = in.rows;
const int in_width = in.cols;
// out is same dims as in, but must be float
out.create(in.size(), CV_32F | (0x18 & in.type()));
for (int h = 0; h < in_height; ++h) {
// channel values are 1 byte wide (uchar)
if (in.elemSize1() == 1) {
const uchar* in_ptr = in.ptr<uchar>(h);
float* out_ptr = out.ptr<float>(h);
int index = 0;
for (int w = 0; w < in_width; ++w) {
for (int c = 0; c < in_channels; ++c) {
if (w < zero_len_ || (in_width - w) < zero_len_ || h < zero_len_ || (in_height - h) < zero_len_) {
out_ptr[index] = 0;
} else {
out_ptr[index] = in_ptr[index];
}
//DLOG(INFO) << "c: " << c << " h: " << h << " w: " << w << " index: " << index << " in_val: " << ((float)in_ptr[index]) << " + " << rand_data[index] << " = " << out_ptr[index];
index++;
}
}
} else if (in.elemSize1() == 4) {
const float* in_ptr = in.ptr<float>(h);
float* out_ptr = out.ptr<float>(h);
int index = 0;
for (int w = 0; w < in_width; ++w) {
for (int c = 0; c < in_channels; ++c) {
if (w < zero_len_ || (in_width - w) < zero_len_ || h < zero_len_ || (in_height - h) < zero_len_) {
out_ptr[index] = 0;
} else {
out_ptr[index] = in_ptr[index];
}
//DLOG(INFO) << "c: " << c << " h: " << h << " w: " << w << " index: " << index << " in_val: " << ((float)in_ptr[index]) << " + " << rand_data[index] << " = " << out_ptr[index];
index++;
}
}
}
}
}
示例8: getChannel
cv::Mat getChannel(cv::Mat &mat, int channel)
{
assert(channel >= 0 && channel < mat.channels());
assert(mat.dims == 2);
int const sizes[2] = { mat.rows, mat.cols };
int const type = CV_MAKETYPE(CV_MAT_DEPTH(mat.type()), 1);
void *data = ((unsigned char*)mat.data) + channel * mat.elemSize1();
size_t steps[2] = { mat.step[0], mat.step[1] };
cv::Mat foo(2, sizes, type, data, steps);
return foo;
}
示例9: CalibPars
CalibPars(const cv::Mat& Q)
{
int type = Q.elemSize1();
if(type == 8)
{
f = Q.at<double>(2,3);
c_x = (-1.0)*Q.at<double>(0,3);
c_y = (-1.0)*Q.at<double>(1,3);
b = (-1.0)/Q.at<double>(3,2);
}
else
{
cout<<"Only double type matrix is allowed!"<<endl;
}
};
示例10:
TensorWrapper::TensorWrapper(cv::Mat & mat) {
if (mat.empty()) {
this->tensorPtr = nullptr;
return;
}
this->typeCode = static_cast<char>(mat.depth());
THByteTensor *outputPtr = new THByteTensor;
// Build new storage on top of the Mat
outputPtr->storage = THByteStorage_newWithData(
mat.data,
mat.step[0] * mat.rows
);
int sizeMultiplier;
if (mat.channels() == 1) {
outputPtr->nDimension = mat.dims;
sizeMultiplier = cv::getElemSize(mat.depth());
} else {
outputPtr->nDimension = mat.dims + 1;
sizeMultiplier = mat.elemSize1();
}
outputPtr->size = static_cast<long *>(THAlloc(sizeof(long) * outputPtr->nDimension));
outputPtr->stride = static_cast<long *>(THAlloc(sizeof(long) * outputPtr->nDimension));
if (mat.channels() > 1) {
outputPtr->size[outputPtr->nDimension - 1] = mat.channels();
outputPtr->stride[outputPtr->nDimension - 1] = 1; //cv::getElemSize(returnValue.typeCode);
}
for (int i = 0; i < mat.dims; ++i) {
outputPtr->size[i] = mat.size[i];
outputPtr->stride[i] = mat.step[i] / sizeMultiplier;
}
// Prevent OpenCV from deallocating Mat data
mat.addref();
outputPtr->refcount = 0;
this->tensorPtr = outputPtr;
}
示例11: DrawTransPinP
/**-----------------------------------------------------------*
* @fn DrawTransPinP
* @brief 透過画像を重ねて描画する
* @param[out] img_dst
* @param[in ] transImg 前景画像。アルファチャンネル付きであること(CV_8UC4)
* @param[in ] baseImg 背景画像。アルファチャンネル不要(CV_8UC3)
*------------------------------------------------------------*/
void DrawTransPinP(cv::Mat &img_dst, const cv::Mat transImg, const cv::Mat baseImg, vector<cv::Point2f> tgtPt)
{
cv::Mat img_rgb, img_aaa, img_1ma;
vector<cv::Mat>planes_rgba, planes_rgb, planes_aaa, planes_1ma;
int maxVal = pow(2, 8*baseImg.elemSize1())-1;
//透過画像はRGBA, 背景画像はRGBのみ許容。ビット深度が同じ画像のみ許容
if(transImg.data==NULL || baseImg.data==NULL || transImg.channels()<4 ||baseImg.channels()<3 || (transImg.elemSize1()!=baseImg.elemSize1()) )
{
img_dst = cv::Mat(100,100, CV_8UC3);
img_dst = cv::Scalar::all(maxVal);
return;
}
//書き出し先座標が指定されていない場合は背景画像の中央に配置する
if(tgtPt.size()<4)
{
//座標指定(背景画像の中心に表示する)
int ltx = (baseImg.cols - transImg.cols)/2;
int lty = (baseImg.rows - transImg.rows)/2;
int ww = transImg.cols;
int hh = transImg.rows;
tgtPt.push_back(cv::Point2f(ltx , lty));
tgtPt.push_back(cv::Point2f(ltx+ww, lty));
tgtPt.push_back(cv::Point2f(ltx+ww, lty+hh));
tgtPt.push_back(cv::Point2f(ltx , lty+hh));
}
//変形行列を作成
vector<cv::Point2f>srcPt;
srcPt.push_back( cv::Point2f(0, 0) );
srcPt.push_back( cv::Point2f(transImg.cols-1, 0) );
srcPt.push_back( cv::Point2f(transImg.cols-1, transImg.rows-1) );
srcPt.push_back( cv::Point2f(0, transImg.rows-1) );
cv::Mat mat = cv::getPerspectiveTransform(srcPt, tgtPt);
//出力画像と同じ幅・高さのアルファ付き画像を作成
cv::Mat alpha0(baseImg.rows, baseImg.cols, transImg.type() );
alpha0 = cv::Scalar::all(0);
cv::warpPerspective(transImg, alpha0, mat,alpha0.size(), cv::INTER_CUBIC, cv::BORDER_TRANSPARENT);
//チャンネルに分解
cv::split(alpha0, planes_rgba);
//RGBA画像をRGBに変換
planes_rgb.push_back(planes_rgba[0]);
planes_rgb.push_back(planes_rgba[1]);
planes_rgb.push_back(planes_rgba[2]);
merge(planes_rgb, img_rgb);
//RGBA画像からアルファチャンネル抽出
planes_aaa.push_back(planes_rgba[3]);
planes_aaa.push_back(planes_rgba[3]);
planes_aaa.push_back(planes_rgba[3]);
merge(planes_aaa, img_aaa);
//背景用アルファチャンネル
planes_1ma.push_back(maxVal-planes_rgba[3]);
planes_1ma.push_back(maxVal-planes_rgba[3]);
planes_1ma.push_back(maxVal-planes_rgba[3]);
merge(planes_1ma, img_1ma);
img_dst = img_rgb.mul(img_aaa, 1.0/(double)maxVal) + baseImg.mul(img_1ma, 1.0/(double)maxVal);
}