本文整理汇总了C++中Mat_::create方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat_::create方法的具体用法?C++ Mat_::create怎么用?C++ Mat_::create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mat_
的用法示例。
在下文中一共展示了Mat_::create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: sqrt
void GaborFilter<_Tp>::generateFilter(Mat_<complex<_Tp> > & result,
int scale, int orientation, int filterSizeX, int filterSizeY,
_Tp kMax, _Tp sigma)
{
_Tp offsetX = filterSizeX/2.0;
_Tp offsetY = filterSizeY/2.0;
_Tp psi = ((orientation*M_PI)/8);
_Tp f = sqrt(2);
_Tp fV = pow(f, scale);
_Tp kReal = (kMax/fV)*cos(psi);
_Tp kImag = (kMax/fV)*sin(psi);
_Tp kSquare = pow(sqrt(pow(kReal, 2) + pow(kImag, 2)), 2);
_Tp sSquare = pow(sigma,2);
_Tp sSquareHalf = -0.5 * sSquare;
_Tp kS = kSquare/sSquare;
_Tp kSHalf = -0.5f * kSquare / sSquare;
result.create(filterSizeX, filterSizeY);
int numberColumns = filterSizeY*filterSizeX;
complex<_Tp>* data = ((Mat)result).ptr<complex<_Tp> >(0);
GaborFilterBody<_Tp> gaborFilterBody(mFilterSizeX, mFilterSizeY,
offsetX, offsetY, kS, kSHalf, kReal, kImag, sSquare, data);
parallel_for(BlockedRange(0, numberColumns), gaborFilterBody);
}
示例2: Response
//===========================================================================
void Multi_SVR_patch_expert::Response(const Mat_<float> &area_of_interest, Mat_<double> &response)
{
int response_height = area_of_interest.rows - height + 1;
int response_width = area_of_interest.cols - width + 1;
if(response.rows != response_height || response.cols != response_width)
{
response.create(response_height, response_width);
}
// For the purposes of the experiment only use the response of normal intensity, for fair comparison
if(svr_patch_experts.size() == 1)
{
svr_patch_experts[0].Response(area_of_interest, response);
}
else
{
// responses from multiple patch experts these can be gradients, LBPs etc.
response.setTo(1.0);
Mat_<double> modality_resp(response_height, response_width);
for(size_t i = 0; i < svr_patch_experts.size(); i++)
{
svr_patch_experts[i].Response(area_of_interest, modality_resp);
response = response.mul(modality_resp);
}
}
}
示例3: initialize
void BackgroundSubtractorGMGImpl::initialize(Size frameSize, double minVal, double maxVal)
{
CV_Assert(minVal < maxVal);
CV_Assert(maxFeatures > 0);
CV_Assert(learningRate >= 0.0 && learningRate <= 1.0);
CV_Assert(numInitializationFrames >= 1);
CV_Assert(quantizationLevels >= 1 && quantizationLevels <= 255);
CV_Assert(backgroundPrior >= 0.0 && backgroundPrior <= 1.0);
minVal_ = minVal;
maxVal_ = maxVal;
frameSize_ = frameSize;
frameNum_ = 0;
nfeatures_.create(frameSize_);
colors_.create(frameSize_.area(), maxFeatures);
weights_.create(frameSize_.area(), maxFeatures);
nfeatures_.setTo(Scalar::all(0));
}
示例4: ResponseDepth
void Multi_SVR_patch_expert::ResponseDepth(const Mat_<float>& area_of_interest, Mat_<double>& response)
{
int response_height = area_of_interest.rows - height + 1;
int response_width = area_of_interest.cols - width + 1;
if(response.rows != response_height || response.cols != response_width)
{
response.create(response_height, response_width);
}
// With depth patch experts only do raw data modality
svr_patch_experts[0].ResponseDepth(area_of_interest, response);
}
示例5: main
int main()
{
Mat_<Vec3b> srcImage;
srcImage.create(512, 512);
for (int i = 0; i < srcImage.rows; i++)
for (int j = 0; j < srcImage.cols; j++)
srcImage(i, j) = Vec3f(255, 255, 255);
imshow("srcImage", srcImage);
waitKey();
return 0;
}
示例6: minimize
double ConjGradSolverImpl::minimize(InputOutputArray x){
CV_Assert(_Function.empty()==false);
dprintf(("termcrit:\n\ttype: %d\n\tmaxCount: %d\n\tEPS: %g\n",_termcrit.type,_termcrit.maxCount,_termcrit.epsilon));
Mat x_mat=x.getMat();
CV_Assert(MIN(x_mat.rows,x_mat.cols)==1);
int ndim=MAX(x_mat.rows,x_mat.cols);
CV_Assert(x_mat.type()==CV_64FC1);
if(d.cols!=ndim){
d.create(1,ndim);
r.create(1,ndim);
r_old.create(1,ndim);
minimizeOnTheLine_buf1.create(1,ndim);
minimizeOnTheLine_buf2.create(1,ndim);
}
Mat_<double> proxy_x;
if(x_mat.rows>1){
buf_x.create(1,ndim);
Mat_<double> proxy(ndim,1,buf_x.ptr<double>());
x_mat.copyTo(proxy);
proxy_x=buf_x;
}else{
proxy_x=x_mat;
}
_Function->getGradient(proxy_x.ptr<double>(),d.ptr<double>());
d*=-1.0;
d.copyTo(r);
//here everything goes. check that everything is setted properly
dprintf(("proxy_x\n"));print_matrix(proxy_x);
dprintf(("d first time\n"));print_matrix(d);
dprintf(("r\n"));print_matrix(r);
for(int count=0;count<_termcrit.maxCount;count++){
minimizeOnTheLine(_Function,proxy_x,d,minimizeOnTheLine_buf1,minimizeOnTheLine_buf2);
r.copyTo(r_old);
_Function->getGradient(proxy_x.ptr<double>(),r.ptr<double>());
r*=-1.0;
double r_norm_sq=norm(r);
if(_termcrit.type==(TermCriteria::MAX_ITER+TermCriteria::EPS) && r_norm_sq<_termcrit.epsilon){
break;
}
r_norm_sq=r_norm_sq*r_norm_sq;
double beta=MAX(0.0,(r_norm_sq-r.dot(r_old))/r_norm_sq);
d=r+beta*d;
}
if(x_mat.rows>1){
Mat(ndim, 1, CV_64F, proxy_x.ptr<double>()).copyTo(x);
}
return _Function->calc(proxy_x.ptr<double>());
}
示例7: myaccumarray
void myaccumarray(Mat & subs,Mat & val,Mat_<T> & accumM,cv::Size size){
accumM.create(size);
accumM.setTo(Scalar::all(0));
cout<<"channels: "<<val.channels()<<endl;
for (int i=0;i<subs.rows;i++)
{
for (int c=0;c<val.channels();c++)
{
//cout<<(subs.at<int>(i,0))<<","<<(subs.at<int>(i,1))<<" "<<endl;
//cout<<val.at<T>(i,0)[c]<<endl;
accumM.at<T>((subs.at<int>(i,0)),(subs.at<int>(i,1)))[c] += val.at<T>(i,0)[c];
//cout<<(subs.at<int>(i,0))<<","<<(subs.at<int>(i,1))<<" "<<accumM.at<T>((subs.at<int>(i,0)),(subs.at<int>(i,1)))[c]<<endl;
}
}
}
示例8: convertQImageToMat
/**
* @brief Util::convertQImageToMat
* @brief 转换QImage到CV::Mat
* @param img_qt QImage类型
* @param img_cv cv::Mat类型
* @return 没有返回值
*/
void Util::convertQImageToMat( QImage &img_qt, Mat_<Vec3b>& img_cv){
img_cv.create(img_qt.height(), img_qt.width());
img_qt.convertToFormat(QImage::Format_RGB32);
int lineNum = 0;
int height = img_qt.height();
int width = img_qt.width();
uchar *imgBits = img_qt.bits();
for(int i=0; i<height; i++){
lineNum = i* width *4;
for(int j=0; j<width; j++){
img_cv(i, j)[2] = imgBits[lineNum + j*4 + 2];
img_cv(i, j)[1] = imgBits[lineNum + j*4 + 1];
img_cv(i, j)[0] = imgBits[lineNum + j*4 + 0];
}
}
}
示例9: linearizeHomographyAt
static inline void linearizeHomographyAt( const Mat_<double>& H, const Point2f& pt, Mat_<double>& A )
{
A.create(2,2);
double p1 = H(0,0)*pt.x + H(0,1)*pt.y + H(0,2),
p2 = H(1,0)*pt.x + H(1,1)*pt.y + H(1,2),
p3 = H(2,0)*pt.x + H(2,1)*pt.y + H(2,2),
p3_2 = p3*p3;
if( p3 )
{
A(0,0) = H(0,0)/p3 - p1*H(2,0)/p3_2; // fxdx
A(0,1) = H(0,1)/p3 - p1*H(2,1)/p3_2; // fxdy
A(1,0) = H(1,0)/p3 - p2*H(2,0)/p3_2; // fydx
A(1,1) = H(1,1)/p3 - p2*H(2,1)/p3_2; // fydx
}
else
A.setTo(Scalar::all(numeric_limits<double>::max()));
}
示例10: viewShapeModelUpdate
void ShapeModel::viewShapeModelUpdate(ModelViewInfo *pInfo)
{
Mat_< double > paramV;
paramV.create(this->nShapeParams, 1);
for (int i=0;i<nShapeParams;i++){
paramV(i, 0) = (pInfo->vList[i]/30.0 - 0.5)*6*
sqrt(pcaShape->eigenvalues.at<double>(i, 0));
}
Mat_<cv::Vec3b> img;
ModelImage s;
s.setShapeInfo(&shapeInfo);
s.loadTrainImage(Mat_<unsigned char>::ones(190*2, 160*2)*255);
projectParamToShape(paramV, s.shapeVec);
SimilarityTrans st = s.shapeVec.getShapeTransformFitingSize(
Size(320, 380));
s.buildFromShapeVec(st, 1000);
img = s.show(0, -1, false);
imshow("Viewing Shape Model", img);
}
示例11: computeDistance
void EnhancedStereo::computeDistance(Mat_<float> & distance)
{
distance.create(smallHeight(), smallWidth());
for (int v = 0; v < smallHeight(); v++)
{
for (int u = 0; u < smallWidth(); u++)
{
if (smallDisparity(v, u) == 0)
{
distance(v, u) = 100;
continue;
}
int idx = getLinearIdx(vBig(v), uBig(u));
Point pinf = pinfPxVec[idx];
CurveRasterizer<Polynomial2> raster(pinf.x, pinf.y, epipolePx.x, epipolePx.y, epipolarVec[idx]);
raster.step(smallDisparity(v, u));
Vector3d X = triangulate(uBig(u), vBig(v), raster.x, raster.y);
distance(v, u) = X.norm();
}
}
}
示例12: generatePlane
void EnhancedStereo::generatePlane(Transformation<double> TcameraPlane,
Mat_<float> & distance, const vector<Vector3d> & polygonVec)
{
distance.create(smallHeight(), smallWidth());
Vector3d t = TcameraPlane.trans();
Vector3d z = TcameraPlane.rotMat().col(2);
vector<Vector3d> polygonCamVec;
TcameraPlane.transform(polygonVec, polygonCamVec);
for (int v = 0; v < smallHeight(); v++)
{
for (int u = 0; u < smallWidth(); u++)
{
distance(v, u) = 0;
Vector3d vec; // the direction vector
if (not cam1.reconstructPoint(Vector2d(uBig(u), vBig(v)), vec)) continue;
double zvec = z.dot(vec);
if (zvec < 1e-3)
{
continue;
}
bool inside = true;
for (int i = 0; i < polygonCamVec.size(); i++)
{
int j = (i + 1) % polygonCamVec.size();
Vector3d normal = polygonCamVec[i].cross(polygonCamVec[j]);
if (vec.dot(normal) < 0)
{
inside = false;
break;
}
}
if (not inside) continue;
double tz = t.dot(z);
double alpha = tz / zvec;
vec *= alpha;
distance(v, u) = vec.norm();
}
}
}
示例13: segment
void SegmenterHumanSimple::segment(const cv::Mat& img, Mat_<uchar>& mask)
{
Mat imgBGR;
Mat imgLAB;
Mat imgBGRo;
float rate = 500.0f/img.cols;
GaussianBlur(img,imgBGRo,Size(),0.8,0.8);
vector<Rect> faces;
resize(imgBGRo,imgBGRo,Size(),rate,rate);
cv::CascadeClassifier faceModel(this->_m_filenameFaceModel);
faceModel.detectMultiScale(imgBGRo,faces);
imgBGRo.convertTo( imgBGR, CV_32F, 1.0/255. );
cvtColor( imgBGR, imgLAB, CV_BGR2Lab );
Superpixel sp(1000,1,5);
Mat_<int> segmentation = sp.segment(imgLAB);
vector<SuperpixelStatistic> stat = sp.stat(imgLAB,imgBGR,segmentation);
Mat_<float> prob;
this->getPixelProbability(imgBGRo,prob,faces);
Mat_<float> sprob;
UtilsSuperpixel::Stat(segmentation,prob,stat,sprob);
Mat_<int> initial(int(stat.size()),1);
initial.setTo(1,sprob>0.5);
initial.setTo(0,sprob<=0.5);
Mat_<float> probaColor;
int myx = cv::countNonZero(initial);
this->_getColorProba(stat,initial,probaColor);
Mat_<float> fgdInit,bgdInit,fgdColor,bgdColor;
this->_prob2energy(sprob,fgdInit,bgdInit);
this->_prob2energy(probaColor,fgdColor,bgdColor);
Mat_<float> fgdEnergy, bgdEnergy;
fgdEnergy = fgdInit + fgdColor;
bgdEnergy = bgdInit + bgdColor;
Mat_<int> label;
mask.create(imgBGRo.rows,imgBGRo.cols);
UtilsSegmentation::MaxFlowSuperpixel(stat,fgdEnergy,bgdEnergy,50.0,label);
for( int i=0;i<mask.rows;i++)
{
for(int j=0;j<mask.cols;j++)
{
if ( label(segmentation(i,j)) > 0.5)
{
mask(i,j) = 255;
}
else
{
mask(i,j) = 0;
}
}
}
cv::resize(mask,mask,Size(img.cols,img.rows));
mask.setTo(255,mask>128);
mask.setTo(0,mask<=128);
}
示例14: MaxFlowSuperpixel
void UtilsSegmentation::MaxFlowSuperpixel(std::vector<SuperpixelStatistic>& spstat, const Mat_<float>& fgdEnergy,
const Mat_<float>& bgdEnergy, float gamma, Mat_<int>& label)
{
//::Graph<float,float,float> graph(nNode,nEdge,errfunc);
//graph
int nEdge = UtilsSuperpixel::CountEdge(spstat);
Mat_<int> edgeVertex(nEdge,2);
Mat_<float> edgeWeight(nEdge,1);
Mat_<float> edgeLen(nEdge,1);
int idx = 0;
for(int i=0;i<spstat.size();i++)
{
SuperpixelStatistic& sp = spstat[i];
for( set<int>::iterator j=sp.conn.begin();
j!= sp.conn.end();
j++)
{
int d = (*j);
SuperpixelStatistic& dsp = spstat[d];
if ( i != d)
{
edgeVertex(idx,0) = min(i,d);
edgeVertex(idx,1) = max(i,d);
float diff = (float) norm(sp.mean_color_ - dsp.mean_color_);
edgeWeight(idx) = diff*diff;
edgeLen(idx) = (float) cv::norm(sp.mean_position_-dsp.mean_position_);
idx++;
}
}
}
float beta = (float) cv::mean(edgeWeight)[0];
Graph<float,float,float> graph((int)spstat.size(), nEdge, errfunc);
graph.add_node((int)spstat.size());
for(int i=0;i<fgdEnergy.total();i++)
{
graph.add_tweights(i,bgdEnergy(i),fgdEnergy(i));
}
edgeWeight = - edgeWeight / beta;
cv::exp(edgeWeight,edgeWeight);
edgeWeight *= gamma;
cv::divide(edgeWeight, edgeLen,edgeWeight);
for(int i=0;i<nEdge;i++)
{
float w = edgeWeight(i);
graph.add_edge(edgeVertex(i,0),edgeVertex(i,1),w,w);
}
graph.maxflow();
label.create((int)spstat.size(),1);
for(int i=0;i<spstat.size();i++)
{
if ( graph.what_segment(i) == Graph<float,float,float>::SOURCE)
{
label(i) = 1;
}
else
{
label(i) = 0;
}
}
}
示例15: init_label_super
void spm_bp::init_label_super(Mat_<Vec2f>& label_super_k, Mat_<float>& dCost_super_k) //, vector<vector<Vec2f> > &label_saved, vector<vector<Mat_<float> > > &dcost_saved)
{
printf("==================================================\n");
printf("Initiating particles...Done!\n");
vector<Vec2f> label_vec;
Mat_<float> localDataCost;
for (int sp = 0; sp < numOfSP1; ++sp) {
int id = repPixels1[sp];
int y = subRange1[id][0];
int x = subRange1[id][1];
int h = subRange1[id][3] - subRange1[id][1] + 1;
int w = subRange1[id][2] - subRange1[id][0] + 1;
label_vec.clear();
int k = 0;
while (k < NUM_TOP_K) {
float du = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_u;
float dv = (float(rand()) / RAND_MAX - 0.5) * 2 * (float)disp_range_v;
du = floor(du * upScale + 0.5) / upScale;
dv = floor(dv * upScale + 0.5) / upScale;
if (du >= -disp_range_u && du <= disp_range_u && dv >= -disp_range_v && dv <= disp_range_v) {
int index_tp = 1;
for (int k1 = 0; k1 < k; ++k1) {
if (checkEqual_PMF_PMBP(label_super_k[repPixels1[sp]][k1], Vec2f(du, dv)))
index_tp = 0;
}
if (index_tp == 1) {
for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii)
label_super_k[superpixelsList1[sp][ii]][k] = Vec2f(du, dv);
label_vec.push_back(Vec2f(du, dv));
++k;
}
}
}
#if USE_CLMF0_TO_AGGREGATE_COST
cv::Mat_<cv::Vec4b> leftCombinedCrossMap;
leftCombinedCrossMap.create(h, w);
subCrossMap1[sp].copyTo(leftCombinedCrossMap);
CFFilter cff;
#endif
int vec_size = label_vec.size();
localDataCost.create(h, w * vec_size);
localDataCost.setTo(0);
#pragma omp parallel for num_threads(NTHREADS)
for (int i = 0; i < vec_size; ++i) {
int kx = i * w;
Mat_<float> rawCost;
getLocalDataCostPerlabel(sp, label_vec[i], rawCost);
#if USE_CLMF0_TO_AGGREGATE_COST
cff.FastCLMF0FloatFilterPointer(leftCombinedCrossMap, rawCost, rawCost);
#endif
rawCost.copyTo(localDataCost(cv::Rect(kx, 0, w, h)));
}
//getLocalDataCost( sp, label_vec, localDataCost);
int pt, px, py, kx;
for (int ii = 0; ii < superpixelsList1[sp].size(); ++ii) {
//cout<<ii<<endl;
pt = superpixelsList1[sp][ii];
px = pt / width1;
py = pt % width1;
for (int kk = 0; kk < NUM_TOP_K; kk++) {
kx = kk * w;
const Mat_<float>& local = localDataCost(cv::Rect(kx, 0, w, h));
dCost_super_k[pt][kk] = local[px - x][py - y];
}
}
}
printf("==================================================\n");
}