本文整理汇总了C++中roi函数的典型用法代码示例。如果您正苦于以下问题:C++ roi函数的具体用法?C++ roi怎么用?C++ roi使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了roi函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: vl_lbp_new
vector<vector<float> > Encoder::extractMultiLBP(Mat img, Mat landmarks, int level){
//Mat img = imread(img_path, CV_LOAD_IMAGE_COLOR);
VlLbp * lbp = vl_lbp_new (VlLbpUniform, VL_TRUE) ;
int dimensionx = patchSize / cellSize;
int dimensiony = patchSize / cellSize;
int dimensionc = vl_lbp_get_dimension(lbp) ;
vector<vector<float> > ret;
float* code = new float[dimensionx*dimensiony*dimensionc];
//cout<<"dim: "<<dimensionx<<" "<<dimensiony<<" "<<dimensionc <<endl;
for (int l = 0; l < level; l++){
int tmpcellSize = cellSize - l;
int tmppatchSize = tmpcellSize*dimensionx;
for (unsigned int i = 0; i < landmarks.cols; i++){
if (landmarks.at<float>(0, i) > patchSize/2 && landmarks.at<float>(1, i) > patchSize/2 && landmarks.at<float>(0, i) + patchSize/2 < img.cols && landmarks.at<float>(1, i) + patchSize/2 < img.rows){
Mat roi(img, Rect(landmarks.at<float>(0, i) - tmppatchSize/2 , landmarks.at<float>(1, i) - tmppatchSize/2, tmppatchSize, tmppatchSize));
vector<float> data;
if (lbp == NULL) {
cout<<"fail to init LBP detector"<<endl;
return ret;
}
for (int j = 0; j < roi.cols; j++){
for (int k = 0; k < roi.rows; k++){
data.push_back((float)roi.at<unsigned char>(k, j)/255);
}
}
//float* features = new float[dimensionx * dimensiony * dimensionc];
//cout<<"code size: x: "<<dimensionx<<" y: "<<dimensiony<<" c: "<<dimensionc<<endl;
for (int j = 0; j < dimensionx*dimensiony*dimensionc; j++){
code[j] = 0;
}
vl_lbp_process(lbp, code, &data[0], tmppatchSize, tmppatchSize, tmpcellSize);
vector<float> lbpCode;
for (int j = 0; j < dimensionx*dimensiony*dimensionc; j++){
//cout<<code[j]<<" ";
lbpCode.push_back(code[j]);
}
ret.push_back(lbpCode);
//cout<<"feature "<<i/2<<" size: "<<ret.size()<<endl;
}
else{
cout<<"Patch out of bound: "<<landmarks.at<float>(0, i)<<" "<<landmarks.at<float>(1, i)<<endl;
exit(1);
}
}
}
delete[] code;
vl_lbp_delete(lbp);
return ret;
}
示例2: calcError
template<typename Iter>double calcError(const cv::Mat& samples, const cv::Mat& model, Iter begin, Iter end){
double error = 0;
size_t count = 0;
cv::Rect roi(0,0,samples.cols,1);
for(Iter it = begin; it != end; it++){
size_t idx = *it;
roi.y = idx;
error += cv::norm(model, cv::Mat(samples,roi));
count++;
}
return error / count;
}
示例3: ocl_repeat
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
UMat src = _src.getUMat(), dst = _dst.getUMat();
for (int y = 0; y < ny; ++y)
for (int x = 0; x < nx; ++x)
{
Rect roi(x * src.cols, y * src.rows, src.cols, src.rows);
UMat hdr(dst, roi);
src.copyTo(hdr);
}
return true;
}
示例4: fitModel
template<typename Iter> cv::Mat fitModel(const cv::Mat& samples, Iter begin, Iter end){
cv::Mat model(cv::Size(samples.cols,1),samples.type(),cv::Scalar(0));
size_t count = 0;
cv::Rect roi(0,0,samples.cols,1);
for(Iter it = begin; it != end; it++){
size_t idx = *it;
roi.y = idx;
model += cv::Mat(samples,roi);
count++;
}
return model / count;
}
示例5: HalfToneInit
void SAHer::ComputeSAH(const cv::Mat &sal) {
HalfToneInit();
//float e_old = Objective();
bool use_sal = (sal.cols && sal.rows);
int block_size = 2;
float temperature = .2f;
float AnnealFactor = .8f;
do {
for (int block_i = 0; block_i < h_; block_i += block_size) for (int block_j = 0; block_j < w_; block_j += block_size) {
std::vector<std::pair<int,int> > b_indices, w_indices;
for (int ii = 0; ii < block_size && block_i + ii < h_; ii++) {
for (int jj = 0; jj < block_size && block_j + jj < w_; jj++) {
int i = block_i + ii, j = block_j + jj;
if (halftone_image_.at<float>(i,j) > 0) w_indices.push_back(std::pair<int,int>(i, j));
else b_indices.push_back(std::pair<int,int>(i, j));
}
}
if (b_indices.empty() || w_indices.empty()) continue;
// else try block_size x block_size times of swap.
cv::Rect roi(block_j, block_i, std::min(block_size,w_ - block_j ), std::min(block_size, h_ - block_i));
float e_old = Objective(roi);
int exhange_times = use_sal ? round(block_size * block_size * cv::mean(sal(roi))[0]) : block_size * block_size;
for (int k = 0; k < exhange_times; k++){
int rand1 = rand() % b_indices.size(), rand2 = rand() % w_indices.size();
std::pair<int,int> idx1 = b_indices[rand1], idx2 = w_indices[rand2];
halftone_image_.at<float>(idx1.first, idx1.second) = 1;
halftone_image_.at<float>(idx2.first, idx2.second) = 0;
float e_new = Objective(roi);
float delta_e = e_new - e_old;
if ( delta_e < 0.f || rand_float() < exp( - delta_e / temperature*w_*h_ ) ) {
// accept
e_old = e_new;
b_indices[rand1] = idx2;
w_indices[rand2] = idx1;
} else {
// reject and undo swap
halftone_image_.at<float>(idx1.first, idx1.second) = 0;
halftone_image_.at<float>(idx2.first, idx2.second) = 1;
}
}
}
temperature *= AnnealFactor;
} while (temperature > 0.15f);
return;
}
示例6: testCovolutionFullPixelOneBand
//TO-DO test for all types once instantiated
void testCovolutionFullPixelOneBand() {
std::cout << std::endl << "GPU CONV VERIFICATION TEST" << std::endl;
ssize_t filterRadius = 1;
cv::Size2i roi(5,5);
cv::Size2i dSize(roi.width + filterRadius * 2,roi.height + filterRadius * 2);
vector<short> data;
data.resize(dSize.area());
for(int i = 0; i < dSize.area(); ++i) {
data[i] = i;
}
cvt::cvTile<short> inTile(data.data(), dSize, 1);
cvt::cvTile<short>* outTile;
cv::Mat weightsMat = cv::Mat::zeros(3,3,CV_16UC1);
for(int i = 0; i < 3; ++i) {
for(int j = 0; j < 3; ++j) {
weightsMat.at<short>(i,j) = 2;
}
}
inTile.setROI(cv::Rect(filterRadius, filterRadius, roi.width, roi.height));
cvt::gpu::GpuConvolution<short,1,short,1,short> conv(0, roi.width, roi.height,
filterRadius, weightsMat);
TS_ASSERT_EQUALS(cvt::Ok, conv.initializeDevice(cvt::gpu::SQUARE));
conv(inTile, (const cvt::cvTile<short>**)&outTile);
TS_ASSERT_EQUALS(0, (outTile == NULL));
//TODO Can we remove this?
//cv::Mat& a = inTile[0];
cv::Mat& b = (*outTile)[0];
short expected[] = {32, 54, 66, 78,
90, 102, 72, 90,
144, 162, 180, 198,
216, 150, 174, 270,
288, 306, 324, 342,
234, 258, 396, 414, 432};
int k = 0;
for(int i = 0; i < roi.width; ++i) {
for(int j = 0; j < roi.height; ++j) {
//std::cout << "b[" << i << "," << j << "] = " << b.at<short>(i,j) << std::endl;
TS_ASSERT_EQUALS(b.at<short>(i,j), expected[k]);
k++;
}
}
}
示例7: ofEnableSmoothing
//--------------------------------------------------------------
void testApp::setup(){
ofEnableSmoothing();
ofBackground(50);
ofRectangle roi(0, 512, 2048, 1024);
ofSetWindowShape(roi.width, max(roi.height, 256.0f));
grabber.open();
grabber.setROI(roi);
grabber.setExposure(1291);
grabber.startCapture(TriggerMode::Trigger_GPIO1, TriggerSignalType::TriggerSignal_RisingEdge);
recorder.setGrabber(grabber);
this->toggleRecord = false;
this->bangClear = false;
this->bangClearBefore = false;
this->bangClearAfter = false;
this->bangSavePipets = false;
this->toggleSave = false;
this->toggleProgress = false;
gui.setHeight(400);
gui.addLabel("ofxMachineVision", OFX_UI_FONT_LARGE);
gui.addLabel("Camcorder example", OFX_UI_FONT_MEDIUM);
gui.addSpacer();
gui.addLabel("Device");
//gui.addButton("Open camera", &this->bangOpen);
this->guiDeviceStateLabel = gui.addLabel("Device state", "...", OFX_UI_FONT_SMALL);
gui.addSpacer();
gui.addToggle("Record", &this->toggleRecord);
this->guiRecordStateLabel = gui.addLabel("Recorder state", "...", OFX_UI_FONT_SMALL);
this->guiRecordCountLabel = gui.addLabel("Frame count", "Empty", OFX_UI_FONT_SMALL);
gui.addButton("Clear frames", &this->bangClear);
gui.addButton("Clear before", &this->bangClearBefore);
gui.addButton("Clear after", &this->bangClearAfter);
gui.addButton("Save pipets", &this->bangSavePipets);
gui.addToggle("Progress", &this->toggleProgress);
gui.addToggle("Save", &this->toggleSave);
gui.addSpacer();
gui.addLabel("Frame details");
this->guiFrameTimestamp = gui.addLabel("Timestamp", "", OFX_UI_FONT_SMALL);
this->guiFrameDuration = gui.addLabel("Duration", "", OFX_UI_FONT_SMALL);
}
示例8: findMinMax
void findMinMax(const cv::Mat &ir, const std::vector<cv::Point2f> &pointsIr)
{
minIr = 0xFFFF;
maxIr = 0;
for(size_t i = 0; i < pointsIr.size(); ++i)
{
const cv::Point2f &p = pointsIr[i];
cv::Rect roi(std::max(0, (int)p.x - 2), std::max(0, (int)p.y - 2), 9, 9);
roi.width = std::min(roi.width, ir.cols - roi.x);
roi.height = std::min(roi.height, ir.rows - roi.y);
findMinMax(ir(roi));
}
}
示例9: computeProbImage
std::vector<cv::MatND> computeProbImage(cv::Mat image, std::vector<cv::Rect> rectRoi, std::vector<cv::Mat> &hist, std::vector<bool> &detected)
{
int smin = 30;
int vmin = 10;
int vmax = 256;
cv::Mat mask;
cv::Mat hsv;
cv::Mat hue;
std::vector<cv::MatND> backProj;
int channels[] = {0,0};
int hbins = 30; // Quantize the hue to 30 levels
//int sbins = 32; // and the saturation to 32 levels
int histSize = MAX( hbins, 2 );
//int histSizes[] = {hbins, sbins};
float hue_range[] = { 0, 180 }; // hue varies from 0 to 179, see cvtColor
//float sat_range[] = { 0, 256 }; // saturation varies from 0 (black-gray-white) to
const float* range = { hue_range }; // 255 (pure spectrum color)
//const float* ranges = { hue_range, sat_range };
//double maxVal=0;
backProj.resize(rectRoi.size());
hist.resize(rectRoi.size());
cv::cvtColor(image, hsv, CV_BGR2HSV);
hue.create(hsv.size(), hsv.depth());
cv::mixChannels(&hsv, 1, &hue, 1, channels, 1);
cv::inRange(hsv, cv::Scalar(0, smin, MIN(vmin,vmax)), cv::Scalar(180, 256, MAX(vmin, vmax)), mask);
for(size_t i=0;i<rectRoi.size();i++)
{
if(!detected[i])
{
cv::Mat roi(hue, rectRoi[i]);
cv::Mat maskroi(mask, rectRoi[i]);
cv::calcHist(&roi, 1, 0, maskroi, hist[i], 1, &histSize, &range, true, false);
cv::normalize(hist[i], hist[i], 0, 255, cv::NORM_MINMAX);
detected[i] = true;
roi.release();
maskroi.release();
}
cv::calcBackProject(&hue, 1, 0, hist[i], backProj[i], &range);
backProj[i] &= mask;
}
return backProj;
}
示例10: imread
bool ImageSegmenter::Process()
{
// Check if we are working on full image or ROI
if (m_processing_roi.length() > 0) { // ROI
Mat inputImage = imread(m_document_image_path.c_str(), CV_LOAD_IMAGE_GRAYSCALE); // converts to grayscale if required
stringstream ss(m_processing_roi);
ss >> m_roi_tlx >> m_roi_tly >> m_roi_brx >> m_roi_bry;
Rect roi(m_roi_tlx, m_roi_tly, m_roi_brx - m_roi_tlx + 1, m_roi_bry - m_roi_tly + 1);
m_docImage = inputImage(roi);
//imshow("roi", m_docImage);
//waitKey();
}
示例11: makeCanvas
/**
* @brief makeCanvas Makes composite image from the given images
* @param vecMat Vector of Images.
* @param windowHeight The height of the new composite image to be formed.
* @param nRows Number of rows of images. (Number of columns will be calculated
* depending on the value of total number of images).
* @return new composite image.
*/
cv::Mat makeCanvas(std::vector<cv::Mat>& vecMat, int windowHeight, int nRows) {
int N = vecMat.size();
nRows = nRows > N ? N : nRows;
int edgeThickness = 10;
int imagesPerRow = ceil(double(N) / nRows);
int resizeHeight = floor(2.0 * ((floor(double(windowHeight - edgeThickness) / nRows)) / 2.0)) - edgeThickness;
int maxRowLength = 0;
std::vector<int> resizeWidth;
for (int i = 0; i < N;) {
int thisRowLen = 0;
for (int k = 0; k < imagesPerRow; k++) {
double aspectRatio = double(vecMat[i].cols) / vecMat[i].rows;
int temp = int( ceil(resizeHeight * aspectRatio));
resizeWidth.push_back(temp);
thisRowLen += temp;
if (++i == N) break;
}
if ((thisRowLen + edgeThickness * (imagesPerRow + 1)) > maxRowLength) {
maxRowLength = thisRowLen + edgeThickness * (imagesPerRow + 1);
}
}
int windowWidth = maxRowLength;
cv::Mat canvasImage(windowHeight, windowWidth, CV_8UC3, cvScalar(0, 0, 0));
for (int k = 0, i = 0; i < nRows; i++) {
int y = i * resizeHeight + (i + 1) * edgeThickness;
int x_end = edgeThickness;
for (int j = 0; j < imagesPerRow && k < N; k++, j++) {
int x = x_end;
cv::Rect roi(x, y, resizeWidth[k], resizeHeight);
cv::Size s = canvasImage(roi).size();
// change the number of channels to three
cv::Mat target_ROI(s, CV_8UC3);
if (vecMat[k].channels() != canvasImage.channels()) {
if (vecMat[k].channels() == 1) {
cv::cvtColor(vecMat[k], target_ROI, CV_GRAY2BGR);
}
}
cv::resize(target_ROI, target_ROI, s);
if (target_ROI.type() != canvasImage.type()) {
target_ROI.convertTo(target_ROI, canvasImage.type());
}
target_ROI.copyTo(canvasImage(roi));
x_end += resizeWidth[k] + edgeThickness;
}
}
return canvasImage;
}
示例12: pointsToMat
cv::Mat pointsToMat(std::vector<cv::Point2d>& pts)
{
// each column of hm is a homogeneous coordinate of a point.
int c=pts.size(); // amount of points
cv::Mat hm(3,c,CV_64FC1,cv::Scalar(1.0));
cv::Mat m(pts);
m=m.reshape(1,c);
m=m.t();
cv::Mat roi(hm, cv::Rect(0,0,c,2));
m.copyTo(roi);
return hm;
}
示例13: edge_map
void ColorEdge::detectColorEdge(const cv::Mat_<cv::Vec3b> &image, cv::Mat_<uchar> &edge)
{
cv::Mat_<double> edge_map(image.size());
const int filter_half = static_cast<int>(filter_size_ / 2);
for(int y = filter_half; y < (edge_map.rows - filter_half); ++y)
{
for(int x = filter_half; x < (edge_map.cols - filter_half); ++x)
{
cv::Mat_<cv::Vec3b> roi(image, cv::Rect(x - filter_half, y - filter_half, filter_size_, filter_size_));
edge_map(y, x) = calculateMVD(roi);
}
}
edge_map.convertTo(edge, edge.type());
}
示例14: wiener_deconvolve
//deconvolve works with images, read in grayscale mode,
//converted to CV_32F and divided by 255
cv::Mat wiener_deconvolve(cv::Mat img, bool defocus, int d, int ang, int noise, int sz){
double snr = pow(10, -0.1*noise);
blur_edge(img, img, 31);
cv::Mat IMG;
cv::dft(img, IMG, cv::DFT_COMPLEX_OUTPUT);
cv::Mat psf;
if (defocus)
psf = defocus_kernel(d, sz);
else
psf = motion_kernel(ang, d, sz);
cv::namedWindow("psf", cv::WINDOW_NORMAL);
cv::imshow("psf", psf);
cv::divide(psf, cv::sum(psf)[0], psf);
cv::Mat psf_pad = cv::Mat::zeros(img.rows, img.cols, CV_32FC1);
int kh = psf.rows;
int kw = psf.cols;
cv::Mat roi(psf_pad(cv::Rect(0, 0, kw, kh)));
psf.copyTo(roi);
cv::Mat PSF;
cv::dft(psf_pad, PSF, cv::DFT_COMPLEX_OUTPUT, kh);
cv::Mat PSF2 = cv::Mat::zeros(PSF.rows, PSF.cols, PSF.type()); //formula solution
cv::mulSpectrums(PSF, PSF, PSF2, 0, true);
cv::Mat mat_arr[2];
cv::split(PSF2, mat_arr);
mat_arr[0] += snr;
mat_arr[1] = mat_arr[0];
cv::merge(mat_arr, 2, PSF2);
cv::divide(IMG, PSF2, IMG);
cv::mulSpectrums(IMG, PSF, IMG, 0, true);
cv::Mat result(img.rows, img.cols, CV_32FC1);
cv::idft(IMG, result, cv::DFT_REAL_OUTPUT + cv::DFT_SCALE);
roll_mat(result, kh, kw);
IMG.release();
psf.release();
psf_pad.release();
PSF.release();
PSF2.release();
roi.release();
mat_arr[0].release();
mat_arr[1].release();
return result;
}
示例15: convolutionOperator1D
Mat convolutionOperator1D(Mat &signalVector, Mat &kernel, BorderTypes border) {
Mat filtered;
bool was1col = false;
if (!signalVector.empty() || !kernel.empty()) {
// If we receive a signalvector with one column, transpose it
if (signalVector.cols == 1) {
signalVector = signalVector.t();
was1col = true;
}
int extraBorder = kernel.cols / 2;
vector<Mat> signalVectorByChannels(signalVector.channels());
split(signalVector, signalVectorByChannels);
for (vector<Mat>::const_iterator it = signalVectorByChannels.begin(); it != signalVectorByChannels.end(); ++it) {
Mat m = *(it);
// Create a new Mat with the extra borders needed
Mat signalWithBorder;
// Add extra borders to the vector to solve boundary issue
copyMakeBorder(m, signalWithBorder, 0, 0, extraBorder, extraBorder, border, Scalar(0));
// Vector to store the convolution result
filtered = m.clone();
// Create a ROI to pass along the vector and compute convolution with the kernel
Mat roi(signalWithBorder, Rect(0, 0, kernel.cols, 1));
for (int i = 0; i < m.cols; i++) {
// Multiply the focused section by the kernel
Mat r = roi.mul(kernel);
// Sum the result of the above operation to the pixel at i
filtered.at<double>(i) = (double) *(sum(r).val);
// Move the Roi one position to the right
roi = roi.adjustROI(0, 0, -1, 1);
}
filtered.copyTo(m);
}
// Merge the vectors into a multichannel Mat
merge(signalVectorByChannels, filtered);
}
filtered = was1col ? filtered.t() : filtered;
return filtered;
}