本文整理汇总了C++中ImagePtr类的典型用法代码示例。如果您正苦于以下问题:C++ ImagePtr类的具体用法?C++ ImagePtr怎么用?C++ ImagePtr使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了ImagePtr类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acquireWithWait
ImagePtr VaapiImagePool::acquireWithWait()
{
ImagePtr image;
AutoLock lock(m_lock);
if (m_flushing) {
ERROR("flushing, no new image available");
return image;
}
while (m_freeIndex.empty() && !m_flushing)
m_cond.wait();
if (m_flushing) {
ERROR("flushing, no new image available");
return image;
}
ASSERT(!m_freeIndex.empty());
int32_t index = m_freeIndex.front();
ASSERT(index >=0 && index < m_poolSize);
m_freeIndex.pop_front();
image.reset(m_images[index].get(), ImageRecycler(shared_from_this()));
return image;
}
示例2: detectRegions
void CascadeClassifierNavigationPlayer::detectRegions()
{
if (!m_classifier) {
return;
}
ImagePtr curImage = m_navigator->currentImage();
QSharedPointer<ImageData> imageData = curImage->data().toStrongRef();
if (!imageData) {
return;
}
cv::Mat cvImage = CvUtils::QImageToCvMat(imageData->defaultFrame());
cv::Mat grayImage;
cv::cvtColor(cvImage, grayImage, cv::COLOR_BGR2GRAY);
cv::Mat eqGrayImage;
cv::equalizeHist(grayImage, eqGrayImage);
m_regions.clear();
std::vector<cv::Rect> regions;
m_classifier->detectMultiScale(eqGrayImage, regions, 1.1, 5, 0,
cv::Size(150, 150));
for (const cv::Rect ®ion : regions) {
m_regions << QRect(region.x, region.y, region.width, region.height);
}
}
示例3: calibrator
ImagePtr Calibrator::operator()(const ImagePtr image) const {
unsigned int floatlimit = std::numeric_limits<float>::digits;
// find the appropriate frame to use for the correction images
ImageRectangle frame;
if (rectangle == ImageRectangle()) {
frame = ImageRectangle(ImagePoint(), image->size());
}
// use pixel size to decide which type to use for the result image
if (image->bitsPerPixel() <= floatlimit) {
// create adapters for darks and flats with float values
ConstPixelValueAdapter<float> pvdark(dark);
WindowAdapter<float> wdark(pvdark, frame);
ConstPixelValueAdapter<float> pvflat(flat);
WindowAdapter<float> wflat(pvflat, frame);
TypedCalibrator<float> calibrator(wdark, wflat);
return calibrator(image);
}
ConstPixelValueAdapter<double> pvdark(dark);
WindowAdapter<double> wdark(pvdark, frame);
ConstPixelValueAdapter<double> pvflat(flat);
WindowAdapter<double> wflat(pvflat, frame);
TypedCalibrator<double> calibrator(wdark, wflat);
return calibrator(image);
}
示例4: createDerivedImage
bool SmoothingImageFilter::postProcess()
{
if (!mRawResult)
return false;
ImagePtr input = this->getCopiedInputImage();
if (!input)
return false;
QString uid = input->getUid() + "_sm%1";
QString name = input->getName()+" sm%1";
ImagePtr output = createDerivedImage(mServices->getPatientService(),
uid, name,
mRawResult, input);
mRawResult = NULL;
if (!output)
return false;
mServices->getPatientService()->insertData(output);
// set output
mOutputTypes.front()->setValue(output->getUid());
return true;
}
示例5: assert
StatusEnum
DiskCacheNode::render(const RenderActionArgs& args)
{
assert(args.outputPlanes.size() == 1);
EffectInstancePtr input = getInput(0);
if (!input) {
return eStatusFailed;
}
const std::pair<ImageComponents, ImagePtr>& output = args.outputPlanes.front();
for (std::list<std::pair<ImageComponents, ImagePtr > >::const_iterator it = args.outputPlanes.begin(); it != args.outputPlanes.end(); ++it) {
RectI roiPixel;
ImagePtr srcImg = getImage(0, args.time, args.originalScale, args.view, NULL, &it->first, false /*mapToClipPrefs*/, true /*dontUpscale*/, eStorageModeRAM /*useOpenGL*/, 0 /*textureDepth*/, &roiPixel);
if (!srcImg) {
return eStatusFailed;
}
if ( srcImg->getMipMapLevel() != output.second->getMipMapLevel() ) {
throw std::runtime_error("Host gave image with wrong scale");
}
if ( ( srcImg->getComponents() != output.second->getComponents() ) || ( srcImg->getBitDepth() != output.second->getBitDepth() ) ) {
srcImg->convertToFormat( args.roi, getApp()->getDefaultColorSpaceForBitDepth( srcImg->getBitDepth() ),
getApp()->getDefaultColorSpaceForBitDepth( output.second->getBitDepth() ), 3, true, false, output.second.get() );
} else {
output.second->pasteFrom( *srcImg, args.roi, output.second->usesBitMap() && srcImg->usesBitMap() );
}
}
return eStatusOK;
}
示例6:
bool BinaryThinningImageFilter3DFilter::execute()
{
ImagePtr input = this->getCopiedInputImage();
if (!input)
return false;
if (input->getMax() != 1 || input->getMin() != 0)
{
return false;
}
// report(QString("Creating centerline from \"%1\"...").arg(input->getName()));
itkImageType::ConstPointer itkImage = AlgorithmHelper::getITKfromSSCImage(input);
//Centerline extraction
typedef itk::BinaryThinningImageFilter3D<itkImageType, itkImageType> centerlineFilterType;
centerlineFilterType::Pointer centerlineFilter = centerlineFilterType::New();
centerlineFilter->SetInput(itkImage);
centerlineFilter->Update();
itkImage = centerlineFilter->GetOutput();
//Convert ITK to VTK
itkToVtkFilterType::Pointer itkToVtkFilter = itkToVtkFilterType::New();
itkToVtkFilter->SetInput(itkImage);
itkToVtkFilter->Update();
vtkImageDataPtr rawResult = vtkImageDataPtr::New();
rawResult->DeepCopy(itkToVtkFilter->GetOutput());
mRawResult = rawResult;
return true;
}
示例7: TEST
TEST(ModelTest, Virtual )
{
ImagePtr img = new ImageI();
ImageIPtr imgI = new ImageI();
img->unload();
imgI->unload();
}
示例8: processImage
void WebcamImageProcessor::processImage(ImagePtr image)
{
auto processedImage = image->transformed(m_rotation);
image->swap(processedImage);
Q_EMIT imageProcessed(image);
}
示例9: debug
void Mock1Test::testMock1() {
debug(LOG_DEBUG, DEBUG_LOG, 0, "Mock1Test begin");
ModulePtr module = repository->getModule("mock1");
debug(LOG_DEBUG, DEBUG_LOG, 0, "got module");
module->open();
debug(LOG_DEBUG, DEBUG_LOG, 0, "module open");
DeviceLocatorPtr cl = module->getDeviceLocator();
debug(LOG_DEBUG, DEBUG_LOG, 0, "get DeviceLocator");
std::vector<std::string> cameras = cl->getDevicelist();
debug(LOG_DEBUG, DEBUG_LOG, 0, "get %d devices", cameras.size());
CPPUNIT_ASSERT(cameras.size() == 10);
CameraPtr camera = cl->getCamera("camera:mock1/5");
// for every CCD, take an image
for (unsigned int i = 0; i < camera->nCcds(); i++) {
CcdPtr ccd = camera->getCcd(i);
Exposure exposure;
ImageRectangle frame(ImagePoint(1,1),
ImageSize(ccd->getSize().width() - 2,
ccd->getSize().height() - 2));
exposure.frame(frame);
ccd->startExposure(exposure);
while (ccd->exposureStatus() == Exposure::exposing) {
sleep(1);
}
if (ccd->exposureStatus() == Exposure::exposed) {
ImagePtr image = ccd->getImage();
debug(LOG_DEBUG, DEBUG_LOG, 0,
"result image size: %d x %d",
image->size().width(), image->size().height());
}
}
debug(LOG_DEBUG, DEBUG_LOG, 0, "Mock1Test end");
}
示例10: TEST
TEST(DeleteTest, testSimpleDelete ) {
Fixture f;
f.login();
ServiceFactoryPrx sf = f.client->getSession();
IQueryPrx iquery = sf->getQueryService();
IUpdatePrx iupdate = sf->getUpdateService();
ImagePtr image = new ImageI();
image->setName( rstring("testSimpleDelete") );
image = ImagePtr::dynamicCast( iupdate->saveAndReturnObject( image ) );
omero::api::LongList imageIds;
omero::api::StringLongListMap objects;
ChildOptions options;
Delete2Ptr deleteCmd = new Delete2();
imageIds.push_back( image->getId()->getValue() );
objects["Image"] = imageIds;
deleteCmd->targetObjects = objects;
deleteCmd->childOptions = options;
// Submit and wait for completion
HandlePrx handle = sf->submit( deleteCmd );
CmdCallbackIPtr cb = new CmdCallbackI(f.client, handle);
ResponsePtr resp = cb->loop(10, 500);
ERRPtr err = ERRPtr::dynamicCast(resp);
if (err) {
FAIL() << "Failed to delete image: " << err->category << ", " << err->name << endl;
}
}
示例11: dataPicked
/**
* Trace a ray from clickPosition along the camera view direction and intersect
* the image.
* \param[in] clickPosition the click position in DISPLAY coordinates
* \param[in] renderer the renderer from which to get the camera
* \return the point where the ray intersects the image
*/
void PickerRep::pickLandmark(const Vector3D& clickPosition, vtkRendererPtr renderer)
{
if (!this->mEnabled)
return;
vtkMultiVolumePickerPtr picker = vtkMultiVolumePickerPtr::New();
int hit = picker->Pick(clickPosition[0], clickPosition[1], 0, renderer);
if (!hit)
{
mIsDragging = false;
return;
}
// search for picked data in manager, emit uid if found.
vtkDataSetPtr data = picker->GetDataSet();
if (data)
{
std::map<QString, DataPtr> allData = mDataManager->getData();
for (std::map<QString, DataPtr>::iterator iter = allData.begin(); iter != allData.end(); ++iter)
{
MeshPtr mesh = boost::dynamic_pointer_cast<Mesh>(iter->second);
if (mesh && mesh->getVtkPolyData() == data)
emit dataPicked(iter->first);
ImagePtr image = boost::dynamic_pointer_cast<Image>(iter->second);
if (image && image->getBaseVtkImageData() == data)
emit dataPicked(iter->first);
}
}
Vector3D pick_w(picker->GetPickPosition());
if ( data &&
((mGraphicalPoint && (data == mGraphicalPoint->getPolyData() ))
||(mGlyph && (data == mGlyph->getVtkPolyData() ))
||(mTool && (data == mTool->getGraphicsPolyData() )))
)
{
// We have clicked the picker/tool itself.
// Store click pos and wait for dragging.
mClickedPoint = pick_w;
mIsDragging = true;
mCallbackCommand->SetAbortFlag(1); // abort this event: interactor does not receive it.
return;
}
else
{
mIsDragging = false;
}
if (hit && mSnapToSurface)
{
mPickedPoint = pick_w;
if (mGraphicalPoint)
mGraphicalPoint->setValue(mPickedPoint);
this->setGlyphCenter(mPickedPoint);
emit pointPicked(mPickedPoint);
}
}
示例12: if
shared_ptr<Gdiplus::Bitmap> ImageCache::LoadImageFile(const String& basepath, const String& filename)
{
URI uri = URI::Parse(filename);
uri = URI::MakeAbsolute(basepath, uri);
String newFilename = LibCC::StringToLower(uri.ToString());
CacheMap::iterator it = m_cache.find(newFilename);
if (it != m_cache.end())
return it->second;
ImagePtr image;
if (uri.scheme == URI::SchemeFile)
{
image.reset(Gdiplus::Bitmap::FromFile(uri.path.c_str()));
}
else if (uri.scheme == URI::SchemeRes)
{
image.reset(BitmapFromResource(_Module.GetResourceInstance(), uri.path.c_str(), L"FILE"));
}
if (image)
m_cache[newFilename] = image;
return image;
}
示例13: greenptr
/**
* \brief Combine image, mask and center into a color image
*/
ImagePtr VCurveFocusWork::combine(ImagePtr image, FWHMInfo& fwhminfo) {
// first build the red channel from the mask
Image<unsigned char> *red
= dynamic_cast<Image<unsigned char> *>(&*fwhminfo.mask);
if (NULL == red) {
throw std::logic_error("internal error, mask has not 8bit pixel type");
}
// then build the green channel from the original image
Image<unsigned char> *green = FocusWork::green(image);
ImagePtr greenptr(green);
// create the blue image
CrosshairAdapter<unsigned char> crosshair(image->size(), fwhminfo.maxpoint, 20);
CircleAdapter<unsigned char> circle(image->size(), fwhminfo.center,
fwhminfo.radius);
MaxAdapter<unsigned char> blue(crosshair, circle);
// now use a combination adapter to put all these images together
// into a single color image
CombinationAdapter<unsigned char> combinator(*red, *green, blue);
Image<RGB<unsigned char> > *result
= new Image<RGB<unsigned char> >(combinator);
return ImagePtr(result);
}
示例14: scaleTime
void LightRendererAnimationInfo::render(Camera* cam, Layer* layer, RenderList& instances, RenderBackend* renderbackend) {
Point p = m_anchor.getCalculatedPoint(cam, layer, true);
if(m_anchor.getLayer() == layer) {
int32_t animtime = scaleTime(m_time_scale, TimeManager::instance()->getTime() - m_start_time) % m_animation->getDuration();
ImagePtr img = m_animation->getFrameByTimestamp(animtime);
Rect r;
Rect viewport = cam->getViewPort();
uint32_t width = static_cast<uint32_t>(round(img->getWidth() * cam->getZoom()));
uint32_t height = static_cast<uint32_t>(round(img->getHeight() * cam->getZoom()));
r.x = p.x-width/2;
r.y = p.y-height/2;
r.w = width;
r.h = height;
if(r.intersects(viewport)) {
uint8_t lm = renderbackend->getLightingModel();
img->render(r);
if (m_stencil) {
renderbackend->changeRenderInfos(RENDER_DATA_WITHOUT_Z, 1, m_src, m_dst, false, true, m_stencil_ref, INCR, GEQUAL);
} else if (lm == 1) {
renderbackend->changeRenderInfos(RENDER_DATA_WITHOUT_Z, 1, m_src, m_dst, false, true, 255, KEEP, NOTEQUAL);
}
}
}
}
示例15: calculateGlyphsWidthsAutomatically
void BitmapFont::calculateGlyphsWidthsAutomatically(const ImagePtr& image, const Size& glyphSize)
{
if(!image)
return;
int numHorizontalGlyphs = image->getSize().width() / glyphSize.width();
auto texturePixels = image->getPixels();
// small AI to auto calculate pixels widths
for(int glyph = m_firstGlyph; glyph< 256; ++glyph) {
Rect glyphCoords(((glyph - m_firstGlyph) % numHorizontalGlyphs) * glyphSize.width(),
((glyph - m_firstGlyph) / numHorizontalGlyphs) * glyphSize.height(),
glyphSize.width(),
m_glyphHeight);
int width = glyphSize.width();
for(int x = glyphCoords.left(); x <= glyphCoords.right(); ++x) {
int filledPixels = 0;
// check if all vertical pixels are alpha
for(int y = glyphCoords.top(); y <= glyphCoords.bottom(); ++y) {
if(texturePixels[(y * image->getSize().width() * 4) + (x*4) + 3] != 0)
filledPixels++;
}
if(filledPixels > 0)
width = x - glyphCoords.left() + 1;
}
// store glyph size
m_glyphsSize[glyph].resize(width, m_glyphHeight);
}
}