本文整理汇总了C++中InputArray::copyTo方法的典型用法代码示例。如果您正苦于以下问题:C++ InputArray::copyTo方法的具体用法?C++ InputArray::copyTo怎么用?C++ InputArray::copyTo使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类InputArray
的用法示例。
在下文中一共展示了InputArray::copyTo方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: setLayerSizes
void setLayerSizes( InputArray _layer_sizes )
{
clear();
_layer_sizes.copyTo(layer_sizes);
int l_count = layer_count();
weights.resize(l_count + 2);
max_lsize = 0;
if( l_count > 0 )
{
for( int i = 0; i < l_count; i++ )
{
int n = layer_sizes[i];
if( n < 1 + (0 < i && i < l_count-1))
CV_Error( CV_StsOutOfRange,
"there should be at least one input and one output "
"and every hidden layer must have more than 1 neuron" );
max_lsize = std::max( max_lsize, n );
if( i > 0 )
weights[i].create(layer_sizes[i-1]+1, n, CV_64F);
}
int ninputs = layer_sizes.front();
int noutputs = layer_sizes.back();
weights[0].create(1, ninputs*2, CV_64F);
weights[l_count].create(1, noutputs*2, CV_64F);
weights[l_count+1].create(1, noutputs*2, CV_64F);
}
}
示例2: drawKeypoints
void drawKeypoints( InputArray image, const std::vector<KeyPoint>& keypoints, InputOutputArray outImage,
const Scalar& _color, DrawMatchesFlags flags )
{
CV_INSTRUMENT_REGION();
if( !(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
{
if (image.type() == CV_8UC3 || image.type() == CV_8UC4)
{
image.copyTo(outImage);
}
else if( image.type() == CV_8UC1 )
{
cvtColor( image, outImage, COLOR_GRAY2BGR );
}
else
{
CV_Error( Error::StsBadArg, "Incorrect type of input image: " + typeToString(image.type()) );
}
}
RNG& rng=theRNG();
bool isRandColor = _color == Scalar::all(-1);
CV_Assert( !outImage.empty() );
std::vector<KeyPoint>::const_iterator it = keypoints.begin(),
end = keypoints.end();
for( ; it != end; ++it )
{
Scalar color = isRandColor ? Scalar( rng(256), rng(256), rng(256), 255 ) : _color;
_drawKeypoint( outImage, *it, color, flags );
}
}
示例3: void
void cv::superres::arrCopy(InputArray src, OutputArray dst)
{
if (dst.isUMat() || src.isUMat())
{
src.copyTo(dst);
return;
}
typedef void (*func_t)(InputArray src, OutputArray dst);
static const func_t funcs[10][10] =
{
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
{ 0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, 0, buf2arr },
{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
{ 0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, 0 , gpu2gpu },
};
const int src_kind = src.kind() >> _InputArray::KIND_SHIFT;
const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT;
CV_Assert( src_kind >= 0 && src_kind < 10 );
CV_Assert( dst_kind >= 0 && dst_kind < 10 );
const func_t func = funcs[src_kind][dst_kind];
CV_Assert( func != 0 );
func(src, dst);
}
示例4: homographyFromSquarePoints
void IPPE::PoseSolver::solveSquare(float squareLength, InputArray _imagePoints, InputArray _cameraMatrix, InputArray _distCoeffs,
OutputArray _rvec1, OutputArray _tvec1, float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
{
//allocate outputs:
_rvec1.create(3, 1, CV_64FC1);
_tvec1.create(3, 1, CV_64FC1);
_rvec2.create(3, 1, CV_64FC1);
_tvec2.create(3, 1, CV_64FC1);
cv::Mat normalizedInputPoints; //undistored version of imagePoints
cv::Mat objectPoints2D;
//generate the object points:
generateSquareObjectCorners2D(squareLength, objectPoints2D);
cv::Mat H; //homography from canonical object points to normalized pixels
if (_cameraMatrix.empty()) {
//this means imagePoints are defined in normalized pixel coordinates, so just copy it:
_imagePoints.copyTo(normalizedInputPoints);
}
else {
//undistort the image points (i.e. put them in normalized pixel coordinates).
cv::undistortPoints(_imagePoints, normalizedInputPoints, _cameraMatrix, _distCoeffs);
}
//compute H
homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0f, H);
//now solve
cv::Mat Ma, Mb;
solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);
//sort poses according to reprojection error:
cv::Mat M1, M2;
cv::Mat objectPoints3D;
generateSquareObjectCorners3D(squareLength, objectPoints3D);
sortPosesByReprojError(objectPoints3D, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb, M1, M2, err1, err2);
//fill outputs
rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);
M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}
示例5: _prepareImage
static void _prepareImage(InputArray src, const Mat& dst)
{
CV_CheckType(src.type(), src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4, "Unsupported source image");
CV_CheckType(dst.type(), dst.type() == CV_8UC3 || dst.type() == CV_8UC4, "Unsupported destination image");
const int src_cn = src.channels();
const int dst_cn = dst.channels();
if (src_cn == dst_cn)
src.copyTo(dst);
else if (src_cn == 1)
cvtColor(src, dst, dst_cn == 3 ? COLOR_GRAY2BGR : COLOR_GRAY2BGRA);
else if (src_cn == 3 && dst_cn == 4)
cvtColor(src, dst, COLOR_BGR2BGRA);
else if (src_cn == 4 && dst_cn == 3)
cvtColor(src, dst, COLOR_BGRA2BGR);
else
CV_Error(Error::StsInternal, "");
}
示例6: ocl_flip
static bool ocl_flip(InputArray _src, OutputArray _dst, int flipCode )
{
int type = _src.type(), cn = CV_MAT_CN(type);
if (cn > 4 || cn == 3)
return false;
const char * kernelName;
int flipType;
if (flipCode == 0)
kernelName = "arithm_flip_rows", flipType = FLIP_ROWS;
else if (flipCode > 0)
kernelName = "arithm_flip_cols", flipType = FLIP_COLS;
else
kernelName = "arithm_flip_rows_cols", flipType = FLIP_BOTH;
Size size = _src.size();
int cols = size.width, rows = size.height;
if ((cols == 1 && flipType == FLIP_COLS) ||
(rows == 1 && flipType == FLIP_ROWS) ||
(rows == 1 && cols == 1 && flipType == FLIP_BOTH))
{
_src.copyTo(_dst);
return true;
}
ocl::Kernel k(kernelName, ocl::core::flip_oclsrc,
format( "-D type=%s", ocl::memopTypeToStr(type)));
if (k.empty())
return false;
_dst.create(size, type);
UMat src = _src.getUMat(), dst = _dst.getUMat();
cols = flipType == FLIP_COLS ? ((cols+1)/2) : cols;
rows = flipType & FLIP_ROWS ? ((rows+1)/2) : rows;
size_t globalsize[2] = { cols, rows };
return k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst), rows, cols).run(2, globalsize, NULL, false);
}
示例7: k
Textons::Textons(int DictionarySize, InputArray input_image):
k(DictionarySize) {
input_image.copyTo(test_image);
}
示例8: GenerateSuperpixels
//.........这里部分代码省略.........
for(int pixel_y = y_lower_limit; pixel_y < y_upper_limit; pixel_y++) {
lab_image_row = lab_image.ptr<Vec3b>(pixel_y);
distance_matrix_row = distance_matrix_.ptr<double>(pixel_y);
visual_word_histogram_row = visual_word_histogram_matrix_.ptr<Vec50d>(pixel_y);
superpixel_label_matrix_row = image_oversegmentation_->pixel_labels_.ptr<int>(pixel_y);
int temp_x = x_lower_limit;
for(int pixel_x = x_lower_limit; pixel_x < x_upper_limit; ++pixel_x) {
if (mask.at<uchar>(pixel_y, pixel_x) != 1)
continue;
//Compute the pixel's distance to centroid[i]
ClusterPoint pixel(Point2f(pixel_x,pixel_y), lab_image_row[pixel_x], visual_word_histogram_row[pixel_x]);
if (visual_word_map.empty()) {
dist = cluster_centroids_[i].distance_to(pixel, m_, S_, 0);
} else {
dist = cluster_centroids_[i].distance_to(pixel, m_, S_, kHistogramDistanceWeight);
}
/*---Update the superpixel[pixel] and distance[pixel] if required---*/
if(dist < distance_matrix_row[pixel_x]) {
distance_matrix_row[pixel_x] = dist;
superpixel_label_matrix_row[pixel_x] = i;
}
}
}
/*---Third loop ends---*/
}/*---Second loop ends---*/
image_oversegmentation_->ComputeSegmentAreas();
//Create vector of flags to indicate discardedsuperpixel_labels
vector<bool> discard_list(num_superpixels_,false);
/*---Fourth loop: iterate through each centroid(superpixel) and count number of pixels within.
If count is too small, mark superpixel for discarding---*/
for(int i = 0; i < num_superpixels_; ++i) {
if (discard_list[i] != 1) {
discard_list[i] = image_oversegmentation_->SegmentArea(i) < kMinSuperpixelAreaThreshold;
}
}
int num_discarded = 0;
for(int i = 0; i < discard_list.size(); ++i)
if(discard_list[i])
++num_discarded;
image_oversegmentation_->DeleteSegments(discard_list);
num_superpixels_ = image_oversegmentation_->NumberOfSegments();
vector<Point> old_centroids = image_oversegmentation_->GetCentroids();
UpdateClusterCentroids(lab_image);
vector<Point> new_centroids = image_oversegmentation_->GetCentroids();
/*---Check for convergence - if converged, then break from loop---*/
int max_centroid_displacement = -1;
for(int i = 0; i < num_superpixels_ ; ++i) {
int x_difference = abs(old_centroids[i].x-new_centroids[i].x);
int y_difference = abs(old_centroids[i].y-new_centroids[i].y);
max_centroid_displacement = std::max(max_centroid_displacement,x_difference);
max_centroid_displacement = std::max(max_centroid_displacement,y_difference);
}
cout << "max distance: " << max_centroid_displacement << "\n";
if (max_centroid_displacement <= kCentroidErrorThreshold) {
RenumberEachConnectedComponent();
RelabelSmallSegmentsToNearestNeighbor(kMinSuperpixelAreaThreshold);
cout << "Number of segments now: " << image_oversegmentation_->NumberOfSegments() << "\n";
break;
}
/*---First loop ends---*/
}
image_oversegmentation_->pixel_labels_.copyTo(_superpixels);
vector<Point> centroids = image_oversegmentation_->GetCentroids();
_superpixel_centroids.create(centroids.size(), 2, CV_32S);
Mat superpixel_centroids = _superpixel_centroids.getMat();
for (int i = 0; i < centroids.size(); ++i) {
superpixel_centroids.at<int>(i,0) = centroids[i].x;
superpixel_centroids.at<int>(i,1) = centroids[i].y;
}
_input_image.copyTo(image_oversegmentation_->_original_image);
visual_word_map.copyTo(image_oversegmentation_->Texton_image);
_edges.copyTo(image_oversegmentation_->_edges);
image_oversegmentation_->ListPixelsForEachSegment();
// Mat src = _input_image.getMat();
//cout << "where2" << endl;
// image_oversegmentation_->ComputeSegmentFeatures(src, visual_word_map, _edges);
image_oversegmentation_->ShowClassifiedLabelImage(mask);
cout << "where3" << endl;
// cout << "total num superpixels: " << centroids.size() << endl;
// _number_of_superpixels_total = centroids.size();
return;
/*---Clean up image_oversegmentation_->pixel_labels_---*/
}