本文整理汇总了C++中NDArrayConverter类的典型用法代码示例。如果您正苦于以下问题:C++ NDArrayConverter类的具体用法?C++ NDArrayConverter怎么用?C++ NDArrayConverter使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了NDArrayConverter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: from_sand_wrapper
PyObject* from_sand_wrapper(PyObject *im) {
NDArrayConverter cvt;
cv::Mat _im = cvt.toMat(im);
cv::Mat _imO(_im.size(), CV_8UC1);
from_sand(_im, _imO);
return cvt.toNDArray(_imO);
}
示例2: from_grass_b_wrapper
PyObject* from_grass_b_wrapper(PyObject *im) {
NDArrayConverter cvt;
cv::Mat _im = cvt.toMat(im);
cv::Mat _imO;
from_grass_b(_im, _imO);
return cvt.toNDArray(_imO);
}
示例3: get_bboxes
PyObject * get_bboxes(PyObject * image_, PyObject * seg_, int x1, int y1, int x2, int y2) {
NDArrayConverter cvt;
cv::Mat image = cvt.toMat(image_);
cv::Mat seg = cvt.toMat(seg_);
//cv::Mat bboxes = cvt.toMat(bboxes_);
std::cout << x1 << " " << y1 << " " << x2 << " " << y2 << std::endl;
return cvt.toNDArray(get_bboxes_(image, seg, x1, y1, x2, y2));
}
示例4: detect_watanabe_wrapper
PyObject* detect_watanabe_wrapper(PyObject *im) {
NDArrayConverter cvt;
cv::Mat _im = cvt.toMat(im);
cv::Point _itokawa;
detect_watanabe(_im, _itokawa);
cv::Mat res(2, 1, CV_32F);
res.at<float>(0, 0) = _itokawa.x;
res.at<float>(1, 0) = _itokawa.y;
return cvt.toNDArray(res);
}
示例5: CannyNewFuncRGB
PyObject* CannyNewFuncRGB(PyObject *srcImgPy, double threshLow, double threshHigh, int kernelSize)
{
NDArrayConverter cvt;
cv::Mat srcImg = cvt.toMat(srcImgPy);
cv::Mat returnedEdges;
cppCannyBunk_RGB(srcImg, returnedEdges, threshLow, threshHigh, kernelSize);
return cvt.toNDArray(returnedEdges);
}
示例6: inputFrame
// cv::Mat segment(cv::Mat inputFrame) {
PyObject *segment(PyObject *_inputFrame) {
NDArrayConverter cvt;
cv::Mat inputFrame = cvt.toMat(_inputFrame);
cv::Mat patch;
inputFrame(cv::Rect(p0.x, p0.y, p1.x - p0.x, p3.y - p0.y)).copyTo(patch);
cv::imshow("Video Captured", inputFrame);
cv::imshow("Patch", patch);
return cvt.toNDArray(patch);
}
示例7: get_gray_img
static cv::Mat get_gray_img(PyObject *p_img) {
// get image and convert if necessary
NDArrayConverter cvt;
cv::Mat img_temp = cvt.toMat(p_img);
cv::Mat img;
if (img_temp.channels() == 1) {
img = img_temp;
} else {
cv::cvtColor(img_temp, img, CV_BGR2GRAY);
}
return img;
}
示例8: createNumpyArrayWithMatWrapper
cv::Mat createNumpyArrayWithMatWrapper(int cols, int rows, int type)
{
int sizes[] = {cols, rows};
PyObject *o = createNumArrayFromMatDimensions(2, sizes, type);
NDArrayConverter conv;
//The NDArrayConverter interface can only return a new Mat object
//allocated on the heap. What we want is a Mat object on the stack.
//So we assign it (cheap copy!) and then deallocate.
cv::Mat *p = conv.toMat(o, 0);
cv::Mat result = *p;
delete p;
return result;
}
示例9: CannyVanilla
PyObject* CannyVanilla(PyObject *srcImgPy, double threshLow, double threshHigh, int kernelSize)
{
NDArrayConverter cvt;
cv::Mat srcImg = cvt.toMat(srcImgPy);
cv::Mat returnedEdges;
if(srcImg.channels() > 1) {
cv::cvtColor(srcImg, srcImg, CV_BGR2GRAY);
}
cv::Canny(srcImg, returnedEdges, threshLow, threshHigh, kernelSize, true);
return cvt.toNDArray(returnedEdges);
}
示例10: extract_features
PyObject* extract_features(PyObject* p_descriptor_extractor,
PyObject *p_img, PyObject *p_keypoints) {
cv::Mat img = get_gray_img(p_img);
Py_ssize_t num_keypoints = PyList_Size(p_keypoints);
std::vector<cv::KeyPoint> keypoints;
for(Py_ssize_t i = 0; i < num_keypoints; ++i) {
keypoints.push_back(cv::KeyPoint());
PyObject* cv2_keypoint = PyList_GetItem(p_keypoints, i);
// get attributes
PyObject* cv2_keypoint_size = PyObject_GetAttrString(cv2_keypoint, "size");
PyObject* cv2_keypoint_angle = PyObject_GetAttrString(cv2_keypoint, "angle");
PyObject* cv2_keypoint_response = PyObject_GetAttrString(cv2_keypoint, "response");
PyObject* cv2_keypoint_pt = PyObject_GetAttrString(cv2_keypoint, "pt");
PyObject* cv2_keypoint_pt_x = PyTuple_GetItem(cv2_keypoint_pt, 0);
PyObject* cv2_keypoint_pt_y = PyTuple_GetItem(cv2_keypoint_pt, 1);
// set data
PyArg_Parse(cv2_keypoint_size, "f", &keypoints[i].size);
PyArg_Parse(cv2_keypoint_angle, "f", &keypoints[i].angle);
PyArg_Parse(cv2_keypoint_response, "f", &keypoints[i].response);
PyArg_Parse(cv2_keypoint_pt_x, "f", &keypoints[i].pt.x);
PyArg_Parse(cv2_keypoint_pt_y, "f", &keypoints[i].pt.y);
Py_DECREF(cv2_keypoint_size);
Py_DECREF(cv2_keypoint_angle);
Py_DECREF(cv2_keypoint_response);
Py_DECREF(cv2_keypoint_pt_x);
Py_DECREF(cv2_keypoint_pt_y);
Py_DECREF(cv2_keypoint_pt);
// TODO: decrement reference doesn't work
// Py_DECREF(cv2_keypoint);
}
cv::Mat descriptors;
brisk::BriskDescriptorExtractor* descriptor_extractor =
static_cast<brisk::BriskDescriptorExtractor*>(PyCObject_AsVoidPtr(p_descriptor_extractor));
descriptor_extractor->compute(img, keypoints, descriptors);
NDArrayConverter cvt;
PyObject* ret = PyList_New(2);
PyObject* ret_keypoints = keypoints_ctopy(keypoints);
PyList_SetItem(ret, 0, ret_keypoints);
PyList_SetItem(ret, 1, cvt.toNDArray(descriptors));
// TODO: decrement reference doesn't work
// Py_DECREF(ret_keypoints);
return ret;
}
示例11: construct
// Convert obj_ptr into a cv::Mat
static void construct(PyObject* obj_ptr,
boost::python::converter::rvalue_from_python_stage1_data* data)
{
using namespace boost::python;
typedef converter::rvalue_from_python_storage< T > storage_t;
storage_t* the_storage = reinterpret_cast<storage_t*>( data );
void* memory_chunk = the_storage->storage.bytes;
NDArrayConverter cvt;
T* newvec = new (memory_chunk) T(cvt.toMat(obj_ptr));
data->convertible = memory_chunk;
return;
}
示例12: calibrate
bool calibrate(PyObject *_inputFrame_BGR, int key) {
NDArrayConverter cvt;
cv::Mat inputFrame_BGR = cvt.toMat(_inputFrame_BGR);
// Convert input image to HSV
cv::Mat inputFrame_HSV;
// cv::GaussianBlur(inputFrame_BGR, inputFrame_BGR, cv::Size(5, 5), 0, 0);
// cv::resize(inputFrame_BGR, inputFrame_BGR, cv::Size(inputFrame_BGR.cols / 2, inputFrame_BGR.rows / 2));
cv::cvtColor(inputFrame_BGR, inputFrame_HSV, CV_BGR2HSV);
// Threshold the HSV image, keep only the red pixels
cv::Mat lowerRedHueImg;
cv::Mat upperRedHueImg;
cv::Mat redHueImg;
cv::inRange(inputFrame_HSV, cv::Scalar(0, 100, 100), cv::Scalar(10, 255, 255), lowerRedHueImg);
cv::inRange(inputFrame_HSV, cv::Scalar(160, 100, 100), cv::Scalar(179, 255, 255), upperRedHueImg);
cv::addWeighted(lowerRedHueImg, 1.0, upperRedHueImg, 1.0, 0.0, redHueImg);
// Find four centers of red pixels in the combined threshold image
std::vector< std::vector<cv::Point> > contours;
cv::findContours(redHueImg, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
if (contours.size() == 4) {
for (int i = 0; i < contours.size(); i++) {
cv::Point center(0, 0);
for (int j = 0; j < contours.at(i).size(); j++)
center += contours.at(i).at(j);
center = cv::Point(center.x / contours.at(i).size(), center.y / contours.at(i).size());
if (center.x < redHueImg.cols / 2 && center.y < redHueImg.rows / 2)
p0 = center;
else if (center.x > redHueImg.cols / 2 && center.y < redHueImg.rows / 2)
p1 = center;
else if (center.x > redHueImg.cols / 2 && center.y > redHueImg.rows / 2)
p2 = center;
else
p3 = center;
cv::drawContours(inputFrame_BGR, contours, i, cv::Scalar(255, 0, 0));
cv::circle(inputFrame_BGR, center, 3, cv::Scalar(255, 0, 0), -1);
}
if (p0 != p1 && p0 != p2 && p0 != p3 && p1 != p2 && p1 != p3 && p2 != p3 && (key & 0xFF) == 27) // 'Esc' key
return true;
}
cv::imshow("Video Captured", inputFrame_BGR);
return false;
}
示例13: doBPySpectralResidualSaliency
bp::object doBPySpectralResidualSaliency(bp::object FullsizedImage)
{
NDArrayConverter cvt;
cv::Mat srcImgCPP = cvt.toMat(FullsizedImage.ptr());
std::vector<cv::Mat> foundCrops;
std::vector<std::pair<double,double>> cropGeolocations;
SpectralResidualSaliencyClass saldoer;
saldoer.ProcessSaliency(&srcImgCPP, &foundCrops, &cropGeolocations, 0);
consoleOutput.Level1() << "SpectralResidualSaliency found " << to_istring(foundCrops.size()) << " crops" << std::endl;
std::vector<bp::object> foundCropsPy;
for(int ii=0; ii<foundCrops.size(); ii++) {
foundCropsPy.append(bp::object(cvt.toNDArray()));
}
return bp::str(shapename.c_str());
}
示例14: detect_and_extract
PyObject* detect_and_extract(PyObject* p_descriptor_extractor, PyObject *p_img,
PyObject *p_thresh, PyObject *p_octaves) {
cv::Mat img = get_gray_img(p_img);
std::vector<cv::KeyPoint> keypoints = detect(img, p_thresh, p_octaves);
cv::Mat descriptors;
brisk::BriskDescriptorExtractor* descriptor_extractor =
static_cast<brisk::BriskDescriptorExtractor*>(PyCObject_AsVoidPtr(p_descriptor_extractor));
descriptor_extractor->compute(img, keypoints, descriptors);
NDArrayConverter cvt;
PyObject* ret = PyList_New(2);
PyObject* ret_keypoints = keypoints_ctopy(keypoints);
PyList_SetItem(ret, 0, ret_keypoints);
PyList_SetItem(ret, 1, cvt.toNDArray(descriptors));
// TODO: decrement reference doesn't work
// Py_DECREF(ret_keypoints);
return ret;
}
示例15: mul
PyObject*
mul(PyObject *left, PyObject *right)
{
NDArrayConverter cvt;
cv::Mat leftMat, rightMat;
leftMat = cvt.toMat(left);
rightMat = cvt.toMat(right);
auto r1 = leftMat.rows, c1 = leftMat.cols, r2 = rightMat.rows,
c2 = rightMat.cols;
// Work only with 2-D matrices that can be legally multiplied.
if (c1 != r2)
{
PyErr_SetString(PyExc_TypeError,
"Incompatible sizes for matrix multiplication.");
py::throw_error_already_set();
}
cv::Mat result = leftMat * rightMat;
PyObject* ret = cvt.toNDArray(result);
return ret;
}