本文整理汇总了C++中cv::Mat::release方法的典型用法代码示例。如果您正苦于以下问题:C++ Mat::release方法的具体用法?C++ Mat::release怎么用?C++ Mat::release使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cv::Mat
的用法示例。
在下文中一共展示了Mat::release方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1:
/*
* Class: com_nekomeshi312_stabcam_StabCameraViewFragment
* Method: stopStabilize
* Signature: ()V
*/
JNIEXPORT void JNICALL Java_com_nekomeshi312_stabcam_StabCameraViewFragment_stopStabilize
(JNIEnv *jenv, jobject jobj)
{
LOGI("stopStabilize");
gGrayPrev.release();
gGrayCur.release();
gRGBACur.release();
gAffine.release();
#ifdef USE_POC
gHann.release();
#endif
}
示例2: load_calibration_data
void load_calibration_data(const std::string &file_name, cv::Mat &intrinsics_common, cv::Mat &distortion_vector)
{
if (file_name.empty()) return;
intrinsics_common.release();
distortion_vector.release();
cv::FileStorage fs;
if(fs.open(file_name, cv::FileStorage::READ | cv::FileStorage::FORMAT_XML, "utf8")) {
fs["camera_distortion"] >> distortion_vector;
fs["camera_intrinsics"] >> intrinsics_common;
fs.release();
} else {
示例3: GetPredictionLabelandConfidence
void CMySharkML::GetPredictionLabelandConfidence(std::vector<int> &predictLable, cv::Mat &predictConf, Data<RealVector> &predictions)
{
predictLable.clear();
predictConf.release();
std::size_t numElements = predictions.numberOfElements();
predictLable.assign(numElements, 0);
predictConf = cv::Mat(numElements, m_numLabels, CV_32F);
memset(predictConf.data, 0, sizeof(float)*numElements*m_numLabels);
for(int k = 0; k < numElements; ++k) {
predictions.element(k);
RealVector::iterator iter_p = predictions.element(k).begin(); //Predict.begin();
float maxP = *iter_p;
int L = 0;
predictConf.at<float>(k, 0) = *iter_p;
std::size_t numInputs = predictions.element(k).size(); //Predict.size();
for(int i=1; i<numInputs; ++i){
iter_p++;
predictConf.at<float>(k, i) = *iter_p;
if(*iter_p > maxP) {
maxP = *iter_p;
L = i;
}
}
predictLable[k] = L;
}
}
示例4: download
bool CvMongoMat::download(cv::Mat &image)
{
image.release();
if (conn_ == NULL) return false;
std::auto_ptr<mongo::DBClientCursor> cursor;
cursor = conn_->query(collection_, QUERY(key_ << BSON("$exists" << true)));
if (cursor->more() == false) {
// notiong to do...
return true;
}
mongo::BSONObj obj = cursor->next();
timestamp_ = obj["timestamp"].Long();
int len;
uchar *data = (uchar*)obj[key_].binData(len);
if (len == 0) return true;
std::vector<uchar> v(data, data+len);
image = cv::imdecode(cv::Mat(v), -1);
return true;
}
示例5:
void FORB::toMat32F(const std::vector<TDescriptor> &descriptors,
cv::Mat &mat)
{
if(descriptors.empty())
{
mat.release();
return;
}
const size_t N = descriptors.size();
mat.create(N, FORB::L*8, CV_32F);
float *p = mat.ptr<float>();
for(size_t i = 0; i < N; ++i)
{
const int C = descriptors[i].cols;
const unsigned char *desc = descriptors[i].ptr<unsigned char>();
for(int j = 0; j < C; ++j, p += 8)
{
p[0] = (desc[j] & (1 << 7) ? 1 : 0);
p[1] = (desc[j] & (1 << 6) ? 1 : 0);
p[2] = (desc[j] & (1 << 5) ? 1 : 0);
p[3] = (desc[j] & (1 << 4) ? 1 : 0);
p[4] = (desc[j] & (1 << 3) ? 1 : 0);
p[5] = (desc[j] & (1 << 2) ? 1 : 0);
p[6] = (desc[j] & (1 << 1) ? 1 : 0);
p[7] = desc[j] & (1);
}
}
}
示例6: calculateFeaturesFromInput
static void calculateFeaturesFromInput( cv::Mat imageData, std::vector< float >& featureVector, cv::HOGDescriptor& hog )
{
std::vector< cv::Point > locations;
hog.compute( imageData, featureVector, winStride, trainingPadding, locations );
imageData.release();
}
示例7: loadMatrix
int VirtualCamera::loadMatrix(cv::Mat &matrix,int rows,int cols ,std::string file)
{
std:: ifstream in1;
in1.open(file.c_str());
//if(in1==NULL) //Yang
if (in1.fail())
{
std::cout<<"Error loading file "<<file.c_str()<<"\n";
return -1;
}
if(!matrix.empty())
matrix.release();
matrix=cv::Mat(rows, cols, CV_32F);
for(int i=0; i<rows; i++)
{
for(int j=0; j<cols; j++)
{
float val;
in1>>val;
Utilities::matSet2D(matrix,j,i,val);
}
}
return 1;
}
示例8: retrieve
bool VideoCapture::retrieve(cv::Mat& image, int channel){
if(cam_interface.empty()){
image.release();
return false;
}
return cam_interface[0]->retrieve(image,channel);
}
示例9: PTDEBUG
PTS32 _cvtColor2BGR(PTU8* pPixels, PTS32 nWidth, PTS32 nHeight, PTImageFormatEnum eFormat, cv::Mat& bgrMat)
{
if(pPixels == NULL || nWidth < 0 || nHeight < 0) {
PTDEBUG("%s: Invalid parameters, pPixels[%p], nWidth[%d], nHeight[%d]\n", __FUNCTION__, pPixels, nWidth, nHeight);
return PT_RET_INVALIDPARAM;
}
if(eFormat != PT_IMG_BGR888 && eFormat != PT_IMG_RGBA8888 && eFormat != PT_IMG_ARGB8888 && eFormat != PT_IMG_BGRA8888) {
PTDEBUG("%s: picture format[%d], ie[%s] not supported!\n",__FUNCTION__, (int)eFormat, strImageFormat[eFormat]);
return PT_RET_INVALIDPARAM;
}
bgrMat.release();
PTU8* pBuffer = NULL;
if(eFormat == PT_IMG_ARGB8888) {
pBuffer = (PTU8*) malloc(3*nWidth*nHeight);//buffer for BGR
if(pBuffer == NULL) {
PTDEBUG("%s: no enough memory to support format[%s]\n", __FUNCTION__, strImageFormat[eFormat]);
return PT_RET_NOMEM;
}
}
switch(eFormat) {
case PT_IMG_BGR888 : {
bgrMat = cv::Mat(nHeight, nWidth, CV_8UC3, pPixels);
}
break;
case PT_IMG_RGBA8888 : {
cvtColor(cv::Mat(nHeight, nWidth, CV_8UC4, pPixels), bgrMat, CV_RGBA2BGR);
}
break;
case PT_IMG_ARGB8888 : {
const int nSrcChannel = 4;
const int nDstChannel = 3;
const int nPixels = nHeight*nWidth;
PTU8* pSrcBuffer = pPixels;
PTU8* pDstBuffer = pBuffer;
for(int i = 0; i < nPixels; i++) {
pDstBuffer[2] = pSrcBuffer[1];//R
pDstBuffer[1] = pSrcBuffer[2];//G
pDstBuffer[0] = pSrcBuffer[3];//B
pSrcBuffer += nSrcChannel;
pDstBuffer += nDstChannel;
}
bgrMat = cv::Mat(nHeight, nWidth, CV_8UC3, pBuffer);
}
break;
case PT_IMG_BGRA8888 : {
cvtColor(cv::Mat(nHeight, nWidth, CV_8UC4, pPixels), bgrMat, CV_BGRA2BGR);
}
break;
default : {
PTDEBUG("%s: picture format[%d], ie[%s] not supported!\n", __FUNCTION__, (int)eFormat, strImageFormat[eFormat]);
return PT_RET_INVALIDPARAM;
}
break;
}
return PT_RET_OK;
}
示例10: CallBackFunc
void CallBackFunc(int evnt, int x, int y, int flags, void* userdata) {
if (evnt == cv::EVENT_LBUTTONDOWN) {
mouseButtonDown = true;
targetSelected = false;
boundingRect = cv::Rect(0,0,0,0);
point1 = cv::Point(x,y);
cv::destroyWindow(targetName);
cv::destroyWindow(ColorTracker.getColorSquareWindowName());
targetImage.release();
}
if (evnt == cv::EVENT_MOUSEMOVE) {
if (x < 0) x = 0;
else if (x > image.cols) x = image.cols;
if (y < 0) y = 0;
else if (y > image.rows) y = image.rows;
point2 = cv::Point(x,y);
if (mouseButtonDown) {
boundingRect = cv::Rect(point1,point2);
}
cv::imshow(imageName,image);
}
if (evnt == cv::EVENT_LBUTTONUP) {
mouseButtonDown = false;
if (boundingRect.area() != 0) {
targetImage = image(calibratedRect(boundingRect));
cv::imshow(targetName, targetImage);
}
else {
boundingRect = cv::Rect(point1-cv::Point(5,5),point1+cv::Point(5,5));
targetImage = image(calibratedRect(boundingRect));
cv::imshow(targetName, targetImage);
}
targetSelected = true;
}
}
示例11: loadDescriptorsFromZippedBin
void FileUtils::loadDescriptorsFromZippedBin(const std::string& filename,
cv::Mat& descriptors) {
std::ifstream zippedFile;
boost::iostreams::filtering_istream is;
// Open file
zippedFile.open(filename.c_str(), std::fstream::in | std::fstream::binary);
// Check file
if (zippedFile.good() == false) {
throw std::runtime_error(
"Unable to open file [" + filename + "] for reading");
}
// Obtain uncompressed file size
zippedFile.seekg(-sizeof(int), zippedFile.end);
int fileSize = -1;
zippedFile.read((char*) &fileSize, sizeof(int));
zippedFile.seekg(0, zippedFile.beg);
try {
is.push(boost::iostreams::gzip_decompressor());
is.push(zippedFile);
// Read rows byte
int rows = -1;
is.read((char*) &rows, sizeof(int));
// Read columns byte
int cols = -1;
is.read((char*) &cols, sizeof(int));
// Read type byte
int type = -1;
is.read((char*) &type, sizeof(int));
// Compute data stream size
long dataStreamSize = fileSize - 3 * sizeof(int);
descriptors.release();
descriptors = cv::Mat();
if (type != CV_32F && type != CV_8U) {
throw std::runtime_error("Invalid descriptors type");
}
descriptors.create(rows, cols, type);
// Read data bytes
is.read((char*) descriptors.data, dataStreamSize);
} catch (const boost::iostreams::gzip_error& e) {
throw std::runtime_error(
"Got error while reading file [" + std::string(e.what()) + "]");
}
// Close file
zippedFile.close();
}
示例12: sprintf
bool kfusion::OpenNISource::grab(cv::Mat& depth, cv::Mat& color)
{
Status rc = STATUS_OK;
if (impl_->has_depth)
{
rc = impl_->depthStream.readFrame(&impl_->depthFrame);
if (rc != openni::STATUS_OK)
{
sprintf (impl_->strError, "Frame grab failed: %s\n", openni::OpenNI::getExtendedError() );
REPORT_ERROR (impl_->strError);
}
const void* pDepth = impl_->depthFrame.getData();
int x = impl_->depthFrame.getWidth();
int y = impl_->depthFrame.getHeight();
cv::Mat(y, x, CV_16U, (void*)pDepth).copyTo(depth);
}
else
{
depth.release();
printf ("no depth\n");
}
if (impl_->has_image)
{
rc = impl_->colorStream.readFrame(&impl_->colorFrame);
if (rc != openni::STATUS_OK)
{
sprintf (impl_->strError, "Frame grab failed: %s\n", openni::OpenNI::getExtendedError() );
REPORT_ERROR (impl_->strError);
}
const void* pColor = impl_->colorFrame.getData();
int x = impl_->colorFrame.getWidth();
int y = impl_->colorFrame.getHeight();
cv::Mat(y, x, CV_8UC3, (void*)pColor).copyTo(color);
}
else
{
color.release();
printf ("no color\n");
}
return impl_->has_image || impl_->has_depth;
}
示例13: sigint_handler
// Create a function which manages a clean "CTRL+C" command -> sigint command
void sigint_handler(int dummy)
{
ROS_INFO("- detect-human-face is shutting down...\n");
// Liberation de l'espace memoire
cv::destroyWindow("Initial");
cv::destroyWindow("final");
cv::destroyWindow("diffImg");
prevImg.release();
currImg.release();
cvReleaseHaarClassifierCascade(&cascade);
cvReleaseMemStorage(&storage);
ROS_INFO("\n\n... Bye bye !\n -Manu\n");
exit(EXIT_SUCCESS); // Shut down the program
}
示例14: read
bool DeckLinkCapture::read(cv::Mat& videoFrame)
{
if (grab())
return retrieve(videoFrame);
else {
videoFrame.release();
return false;
}
}
示例15: getResult
void spectralFiltering::getResult(cv::Mat &img)
{
if(originImg.empty())
return;
std::vector<cv::Mat> imageR(channel), imageI(channel);
#pragma omp parallel for
for(int k = 0;k < channel; k++)
{
imageR[k] = spectral[k].real.clone();
imageI[k] = spectral[k].imag.clone();
moveSpectral2Center(imageR[k]);
moveSpectral2Center(imageI[k]);
}
if(originImg.type()==CV_8UC3 && colorMode)
{
std::vector<cv::Mat> temp(channel);
#pragma omp parallel for
for(int k = 0;k < channel; k++)
{
if(GHPF)
iFFT2DHomo(imageR[k], imageI[k], temp[k], originImg.size().width, originImg.size().height);
else
iFFT2D(imageR[k], imageI[k], temp[k], originImg.size().width, originImg.size().height);
}
img.release();
cv::merge(temp, img);
}
else
{
if(GHPF)
iFFT2DHomo(imageR[0], imageI[0], img, originImg.size().width, originImg.size().height);
else
iFFT2D(imageR[0], imageI[0], img, originImg.size().width, originImg.size().height);
}
/*
if(originImg.type()==CV_8UC3 && colorMode)
{
cv::Mat temp;
iFFT2D(imageR, imageI, temp, originImg.size().width, originImg.size().height);
int j, i;
#pragma omp parallel for private(i)
for(j = 0; j < temp.rows; j++)
{
for(i = 0; i < temp.cols; i++)
{
HSV.at<cv::Vec3f>(j,i)[2] = temp.at<uchar>(j,i);
}
}
myCvtColor(HSV, img, HSV2BGR);
}
else
{
iFFT2D(imageR, imageI, img, originImg.size().width, originImg.size().height);
}*/
}