本文整理汇总了C++中ImageProcessor类的典型用法代码示例。如果您正苦于以下问题:C++ ImageProcessor类的具体用法?C++ ImageProcessor怎么用?C++ ImageProcessor使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImageProcessor类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: main
int main(int argc, char *argv[])
{
QCoreApplication a(argc, argv);
ImageProcessor processor;
processor.run();
return a.exec();
}
示例2: main
int main(int argc, char** argv)
{
ros::init(argc, argv, "computer_vision");
// if (ros::console::set_logger_level(ROSCONSOLE_DEFAULT_NAME, ros::console::levels::Debug))
// {
// ros::console::notifyLoggerLevelsChanged();
// }
ImageProcessor ic;
ROS_DEBUG("end of ImageProcessor initialization");
ros::Rate r(12); // 12Hz = average frequency at which we receive images
while ((!ic.pose_publishing || !ic.video_publishing) && ros::ok())
{
ros::spinOnce();
r.sleep();
}
while (ros::ok())
{
ros::spinOnce(); // if we dont want this we have to place callback and services in threads
ic.publishProcessedImg();
r.sleep();
}
return 0;
}
示例3: updateCalibrationCheck
void VisionWindow::updateCalibrationCheck(bool value) {
doingCalibration_ = value;
ImageProcessor *top = core_->vision_->top_processor_;
ImageProcessor *bottom = core_->vision_->bottom_processor_;
top->enableCalibration(value);
bottom->enableCalibration(value);
redrawImages();
}
示例4: data
/**
* RatingsProcessor::onReplyFinished()
*
* Handler for the signal indicating the response for the previous network request.
*
* If the result was a success, it will start the thread of constructing the QImage object.
*/
void RatingsProcessor::onReplyFinished() {
QNetworkReply* reply = qobject_cast<QNetworkReply*>(sender());
QString response;
if (reply) {
if (reply->error() == QNetworkReply::NoError) {
const int available = reply->bytesAvailable();
if (available > 0) {
const QByteArray data(reply->readAll());
// Setup the image processing thread
ImageProcessor *imageProcessor = new ImageProcessor(data);
/*
* Invoke our onProcessingFinished slot after the processing has finished.
* Since imageProcessor and 'this' are located in different threads we use 'QueuedConnection' to
* allow a cross-thread boundary invocation. In this case the QImage parameter is copied in a thread-safe way
* from the worker thread to the main thread.
*/
connect(imageProcessor, SIGNAL(finished(QImage)), this,
SLOT(onImageProcessingFinished(QImage)),
Qt::QueuedConnection);
imageProcessor->start();
}
} else {
if (reply->error() < 100) {
m_loading = false;
emit loadingChanged();
showError("Please check your internet connection");
return;
}
m_label =
tr("Error: %1 status: %2").arg(reply->errorString(),
reply->attribute(
QNetworkRequest::HttpStatusCodeAttribute).toString());
emit labelChanged();
m_loading = false;
emit loadingChanged();
emit ratValueChanged();
}
reply->deleteLater();
} else {
m_label = tr("Download failed.");
emit labelChanged();
m_loading = false;
emit loadingChanged();
emit ratValueChanged();
}
}
示例5: test_video
void test_video() {
VideoCapture cap(CV_CAP_ANY);
ImageProcessor processor;
ImageLoader loader;
NeuralNetwork net;
net.load(NET_FILE_NAME);
//net.visualize_hidden_units(1, 50);
if (!cap.isOpened()) {
cout << "Failed to initialize camera\n";
return;
}
namedWindow("CameraCapture");
namedWindow("ProcessedCapture");
cv::Mat frame;
while (true) {
cap >> frame;
cv::Mat processedFrame = processor.process_image(frame);
if(processedFrame.rows * processedFrame.cols == INPUT_LAYER_SIZE) {
mat input = loader.to_arma_mat(processedFrame);
int label = net.predict(input);
if(label == 0)
putText(frame, "A", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
else if(label == 1)
putText(frame, "E", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
else if(label == 2)
putText(frame, "I", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
else if(label == 3)
putText(frame, "O", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
else if(label == 4)
putText(frame, "U", Point(500, 300), FONT_HERSHEY_SCRIPT_SIMPLEX, 2, Scalar::all(0), 3, 8);
}
imshow("CameraCapture", frame);
imshow("ProcessedCapture", processedFrame);
int key = waitKey(5);
if(key == 13) {
imwrite("captura.jpg", frame);
}
if (key == 27)
break;
}
destroyAllWindows();
}
示例6: Q_D
void ImageLoader::handleReply(AbstractProcessor *processor)
{
Q_D(ImageLoader);
ImageProcessor *imageProcessor = qobject_cast<ImageProcessor *>(processor);
QString imagePath = imageProcessor->imagePath();
if (d->imagePath != imagePath) {
d->imagePath = imagePath;
emit imagePathChanged();
}
}
示例7: data
void ImageLoader::onReplyFinished()
{
QNetworkReply* reply = qobject_cast<QNetworkReply*>(sender());
QString response;
if (reply) {
if (reply->error() == QNetworkReply::NoError) {
const int available = reply->bytesAvailable();
if (available > 0) {
const QByteArray data(reply->readAll());
// Setup the image processing thread
ImageProcessor *imageProcessor = new ImageProcessor(data);
m_thread = new QThread(this);
// Move the image processor to the worker thread
imageProcessor->moveToThread(m_thread);
// Invoke ImageProcessor's start() slot as soon as the worker thread has started
connect(m_thread, SIGNAL(started()), imageProcessor, SLOT(start()));
// Delete the worker thread automatically after it has finished
connect(m_thread, SIGNAL(finished()), m_thread, SLOT(deleteLater()));
/*
* Invoke our onProcessingFinished slot after the processing has finished.
* Since imageProcessor and 'this' are located in different threads we use 'QueuedConnection' to
* allow a cross-thread boundary invocation. In this case the QImage parameter is copied in a thread-safe way
* from the worker thread to the main thread.
*/
connect(imageProcessor, SIGNAL(finished(QImage)), this, SLOT(onImageProcessingFinished(QImage)), Qt::QueuedConnection);
// Terminate the thread after the processing has finished
connect(imageProcessor, SIGNAL(finished(QImage)), m_thread, SLOT(quit()));
m_thread->start();
}
} else {
m_label = tr("Error: %1 status: %2").arg(reply->errorString(), reply->attribute(QNetworkRequest::HttpStatusCodeAttribute).toString());
emit labelChanged();
m_loading = false;
emit loadingChanged();
}
reply->deleteLater();
} else {
m_label = tr("Download failed. Check internet connection");
emit labelChanged();
m_loading = false;
emit loadingChanged();
}
}
示例8: getImageProcessor
void VisionWindow::updateClicked(int xIdx, int yIdx, int buttonIdx){
if(!initialized_) return;
int image = currentBigImageCam_;
ImageProcessor* processor = getImageProcessor(image);
unsigned char* colorTable = processor->getColorTable();
const ImageParams& iparams = processor->getImageParams();
if (doingCalibration_) {
Sample s; s.x = xIdx; s.y = yIdx;
if(image == IMAGE_TOP)
s.camera = Camera::TOP;
else
s.camera = Camera::BOTTOM;
emit calibrationSampleAdded(s);
redrawImages();
}
if (doingClassification_) {
if (buttonIdx == Qt::LeftButton) {
//for(int i=0; i < LUT_SIZE; i++)
//std::cout << colorTable[i] << "\,";
//std::cout << "DONE\n";
memcpy(tempTable,colorTable,LUT_SIZE);
ColorTableMethods::xy2yuv(processor->getImg(), xIdx, yIdx, iparams.width, currentY_, currentU_, currentV_);
updateTable(colorTable, currentY_, currentU_, currentV_);
//for(int i=0; i < LUT_SIZE; i++)
//std::cout << tempTable[i] << "\,";
//sstd::cout << "\n";
colorUpdateAvailable_ = true;
redrawImages();
processor->processFrame();
memcpy(colorTable,tempTable,LUT_SIZE);
} else if (buttonIdx == Qt::RightButton && colorUpdateAvailable_) {
memcpy(undoTable, colorTable, LUT_SIZE);
undoImage_ = image;
updateTable(colorTable, currentY_, currentU_, currentV_);
colorUpdateAvailable_ = false;
redrawImages();
}
}
}
示例9: main
int main(int argc, char** argv) {
if ( argc != 2 ) {
printf("usage: testImageProcessor <Image_Path>\n");
return -1;
}
ImageProcessor imgProc;
imgProc.readImage(argv[1]);
using namespace std;
using namespace cv;
Mat im = imgProc.getImage();
/* cout << im << endl << endl;*/
//cout << endl;
//Point3_<uchar>* p = im.ptr<Point3_<uchar> >(1,1);
//p = im.ptr<Point3_<uchar> >(1,4);
//p = im.ptr<Point3_<uchar> >(1,7);
imgProc.determinePerspTransforms(im);
Mat im_trans_left = imgProc.perspTransIm(LEFT);
Mat im_trans_right = imgProc.perspTransIm(RIGHT);
Mat im_trans_up = imgProc.perspTransIm(UP);
Mat im_trans_down = imgProc.perspTransIm(DOWN);
//vector<uint8_t> vectorBGR = imgProc.convertToBGRVector(im);
//vectorBGR = imProc.thresholdVec(vectorBGR);
/*for (auto i: vectorBGR) {*/
//cout << i << ' ';
//}
/*cout << endl;*/
//imgProc.reconfigureImage(3, 3, 2, 3);
//imwrite("../bin/images/testReshapeImage2_Result.tif", imgProc.getImage());
/*imshow("DEFAULT", im);*/
//imshow("LEFT", im_trans_left);
//imshow("RIGHT", im_trans_right);
//imshow("UP", im_trans_up);
/*imshow("DOWN", im_trans_down);*/
imwrite("images/im_default.jpg", im);
imwrite("images/im_down.jpg", im_trans_down);
imwrite("images/im_left.jpg", im_trans_left);
imwrite("images/im_right.jpg", im_trans_right);
imwrite("images/im_up.jpg", im_trans_up);
/*cout << "Press ENTER to finish" << endl;*/
/*cin.ignore();*/
return 0;
}
示例10: processImages
void processImages( Camera& camera, ImageProcessor& processor, Detector& detector )
{
cout << "Camera "<< camera.id << ": --Processing Images" << endl;
stringstream result_window;
result_window << camera.name << ": detected particles";
processor.addControls();
detector.addControls();
cv::waitKey(10);
size_t number_of_frames = camera.imagelist.size();
camera.frames.resize( number_of_frames );
for (int i = 0; i < number_of_frames; ++i) {
cv::Mat temp_image = camera.frames[i].image.clone();
processor.processImage( temp_image );
cv::imshow("processed image", temp_image );
detector.detectFeatures( temp_image, camera.frames[i].particles);
detector.drawResult( camera.frames[i] );
cv::imshow(result_window.str(), camera.frames[i].image);
}
cv::destroyWindow( result_window.str() );
}
示例11: main
int main(int argc, char **argv)
{
if (argc != 2) {
exit(1);
}
signal(SIGINT, intHandler);
std::string arg = argv[1];
cv::VideoCapture capture(arg); //try to open string, this will attempt
if (!capture.isOpened()) //if this fails, try to open as a video camera
capture.open(atoi(arg.c_str()));
if (!capture.isOpened()) {
std::cerr << "Failed to open a video device or video file!\n" << std::endl;
return 1;
}
ImageProcessor *ip = new HSV_Region_Processor_Min_Alloc(capture);
BotController *bt = new BotController();
Region *dp;
cv::Mat frame;
ip->initialiseWindow();
std::vector<Region *> *regionList;
while (keepRunning) {
capture >> frame;
ip->cleanRegionList();
regionList = ip->processFrame(frame);
std::sort(regionList->begin(), regionList->end(), compareBySize);
dp = (*regionList)[0];
if (dp != NULL && dp->getSize() > 100) {
double angle = ip->angle(frame, *dp);
double distance = ip->distance(frame, *dp);
std::cout << angle << " - " << distance << std::endl;
bt->move(angle, distance);
ip->drawArrow(frame, angle, distance);
//ip->saveFrame(frame);
} else {
std::cout << "No object found, sitting still" << std::endl;
bt->stop();
}
ip->drawFrame(frame);
cv::waitKey(5);
// ip->processKeys(frame);
}
std::cout << "Shutting down" << std::endl;
bt->stop();
}
示例12: printf
void CameraTest::Execute() {
cameraLEDsSubsystem->GreenOn();
cameraLEDsSubsystem->BlueOff();
// Allow time for the LEDs to light up
if(!timer->HasPeriodPassed(0.1)) { return; } // min tested was 80ms
Preferences * prefs = Preferences::GetInstance();
// Capture an image from the camera and save it to flash
timer->Reset();
ColorImage * image;
if (prefs->GetInt("image_retain", 1) == 1) {
cameraSubsystem->RetainImage("/cameratest.jpg");
image = cameraSubsystem->CaptureImage();
cameraSubsystem->RetainImage(NULL); // stop retaining
printf("[CAMERA] Captured image and wrote to /cameratest.jpg in %.1f ms\n", timer->Get() * 1000);
} else {
image = cameraSubsystem->CaptureImage();
printf("[CAMERA] Captured image in %.1f ms\n", timer->Get() * 1000);
}
// Load preferences for filtering threshold image
Threshold threshold = Threshold(prefs->GetInt("hue_low", 100), prefs->GetInt("hue_high", 140),
prefs->GetInt("sat_low", 90), prefs->GetInt("sat_high", 255),
prefs->GetInt("lum_low", 20), prefs->GetInt("lum_high", 255));
// Process the captured image
timer->Reset();
ImageProcessor * ip = new ImageProcessor();
ip->SetThreshold(threshold);
ip->Process(image);
printf("[CAMERA] Image processed in %.1f ms\n", timer->Get() * 1000);
// Write the processed images to flash
if (prefs->GetInt("image_retain", 1) == 1) {
timer->Reset();
ip->WriteImages("/cameratest");
printf("[CAMERA] Processed images written to /cameratest.*.bmp in %.1f ms\n", timer->Get() * 1000);
}
// Generate a target report
timer->Reset();
TargetReport * tr = new TargetReport(ip->GetThresholdImage(), ip->GetFilteredImage());
tr->Generate();
printf("[CAMERA] Target report generated in %.1f ms\n", timer->Get() * 1000);
tr->OutputScores();
finished = true;
delete tr;
delete ip;
}
示例13: SeamCarvingTransform
void SeamCarvingTransform(char * inputFile,
char * outputFile,
int changeX,
int changeY) {
Magick::Image myImageHandler(inputFile);
myImageHandler.write("temp.png");
if (changeX > 0) {
ImageProcessor myImageProcessor;
myImageProcessor.addCols( changeX );
} else if (changeX < 0) {
ImageProcessor myImageProcessor;
myImageProcessor.removeCols( changeX*-1 );
}
if (changeY != 0) {
Magick::Image img("temp.png");
img.rotate(90);
img.write("temp.png");
}
if (changeY > 0) {
ImageProcessor myImageProcessor;
myImageProcessor.addCols( changeY );
} else if (changeY < 0) {
ImageProcessor myImageProcessor;
myImageProcessor.removeCols( changeY*-1 );
}
if (changeY != 0) {
Magick::Image img("temp.png");
img.rotate(-90);
img.write("temp.png");
}
Magick::Image outputImageHandler("temp.png");
outputImageHandler.write(outputFile);
remove("temp.png");
}
示例14: rotationCallback
// Rotation varies from -30 to 30
// Max slider value is 60
// Smaller range to ensure 0 can be found again.
void rotationCallback(int val, void *ipPtr){
ImageProcessor *ip = static_cast<ImageProcessor *>(ipPtr);
ip->rotation = (val - 3000) / -100;
ip->processImage(&ip->hsv, true);
}
示例15: testDistance
/*
* tests the distance() function
*/
void testDistance(){
double result = imageProcessor.distance(cv::Point(100, 15), cv::Point(75, 36));
assert(result < 32.64 + DELTA && result > 32.64 - DELTA);
result = imageProcessor.distance(cv::Point(3, 5), cv::Point(9, 7));
assert(result < 6.32 + DELTA && result > 6.32 - DELTA);
result = imageProcessor.distance(cv::Point(572, 641), cv::Point(894, 127));
assert(result < 606.53 + DELTA && result > 606.53 - DELTA);
printf("distance() test passed.\n");
}