当前位置: 首页>>代码示例>>C++>>正文


C++ DataContainer::addData方法代码示例

本文整理汇总了C++中DataContainer::addData方法的典型用法代码示例。如果您正苦于以下问题:C++ DataContainer::addData方法的具体用法?C++ DataContainer::addData怎么用?C++ DataContainer::addData使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在DataContainer的用法示例。


在下文中一共展示了DataContainer::addData方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: renderImageImpl

    void SliceExtractor::renderImageImpl(DataContainer& dataContainer, const ImageRepresentationGL::ScopedRepresentation& img) {

        // prepare OpenGL
        _shader->activate();
        cgt::TextureUnit inputUnit, tfUnit;
        img->bind(_shader, inputUnit);
        p_transferFunction.getTF()->bind(_shader, tfUnit);

        cgt::mat4 identity = cgt::mat4::identity;

        _shader->setUniform("_texCoordsMatrix", _texCoordMatrix);
        _shader->setUniform("_modelMatrix", identity);
        _shader->setUniform("_viewMatrix", _viewMatrix);
        _shader->setUniform("_projectionMatrix", identity);
        _shader->setUniform("_useTexturing", true);
        _shader->setUniform("_useSolidColor", true);

        // render slice
        FramebufferActivationGuard fag(this);
        createAndAttachColorTexture();
        createAndAttachDepthTexture();
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
        QuadRdr.renderQuad();

        if (p_renderCrosshair.getValue())
            renderCrosshair(img);

        renderGeometry(dataContainer, img);

        _shader->deactivate();
        cgt::TextureUnit::setZeroUnit();

        dataContainer.addData(p_targetImageID.getValue(), new RenderData(_fbo));
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:34,代码来源:sliceextractor.cpp

示例2: updateResult

    void MultiVolumeRaycaster::updateResult(DataContainer& dataContainer) {
        ImageRepresentationGL::ScopedRepresentation image1(dataContainer, p_sourceImage1.getValue());
        ImageRepresentationGL::ScopedRepresentation image2(dataContainer, p_sourceImage2.getValue());
        ImageRepresentationGL::ScopedRepresentation image3(dataContainer, p_sourceImage3.getValue());
        ScopedTypedData<CameraData> camera(dataContainer, p_camera.getValue());
        ScopedTypedData<RenderData> geometryImage(dataContainer, p_geometryImageId.getValue(), true);
        ScopedTypedData<LightSourceData> light(dataContainer, p_lightId.getValue());

        std::vector<const ImageRepresentationGL*> images;
        if (image1) {
            images.push_back(image1);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY1){
                _vhm1->createHierarchy(image1, p_transferFunction1.getTF());
                validate(INVALID_VOXEL_HIERARCHY1);
            }
        }
        if (image2) {
            images.push_back(image2);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY2){
                _vhm2->createHierarchy(image2, p_transferFunction2.getTF());
                validate(INVALID_VOXEL_HIERARCHY2);
            }
        }
        if (image3) {
            images.push_back(image3);

            if (getInvalidationLevel() & INVALID_VOXEL_HIERARCHY3){
                _vhm3->createHierarchy(image3, p_transferFunction3.getTF());
                validate(INVALID_VOXEL_HIERARCHY3);
            }
        }
        

        if (images.size() >= 3 && camera != nullptr) {
            auto eepp = computeEntryExitPoints(images, camera, geometryImage);
            dataContainer.addData(p_outputImageId.getValue() + ".entrypoints", eepp.first);
            dataContainer.addData(p_outputImageId.getValue() + ".exitpoints", eepp.second);

            auto rc = performRaycasting(dataContainer, images, camera, eepp.first, eepp.second, light);
            dataContainer.addData(p_outputImageId.getValue(), rc);
        }
        else {
            LDEBUG("No suitable input data found!");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:47,代码来源:multivolumeraycaster.cpp

示例3: updateResult

void FiberReader::updateResult(DataContainer& dataContainer) {
    const std::string& fileName = p_url.getValue();
    if (cgt::FileSystem::fileExtension(fileName) == "trk") {
        dataContainer.addData(p_outputId.getValue(), readTrkFile(fileName));
    }
    else {
        LERROR("Unknown file extension.");
    }
}
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:9,代码来源:fiberreader.cpp

示例4: updateResult

    void RawImageReader::updateResult(DataContainer& data) {
        size_t dimensionality = 3;
        if (p_size.getValue().z == 1) {
            dimensionality = (p_size.getValue().y == 1) ? 1 : 2;
        }

        ImageData* image = new ImageData(dimensionality, p_size.getValue(), p_numChannels.getValue());
        ImageRepresentationDisk::create(image, p_url.getValue(), p_baseType.getOptionValue(), p_offset.getValue(), p_endianness.getOptionValue());
        image->setMappingInformation(ImageMappingInformation(p_size.getValue(), p_imageOffset.getValue(), p_voxelSize.getValue()));
        data.addData(p_targetImageID.getValue(), image);
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:11,代码来源:rawimagereader.cpp

示例5: updateResult

    void TextRenderer::updateResult(DataContainer& data) {
        if (_atlas == nullptr)
            return;

        FramebufferActivationGuard fag(this);
        createAndAttachColorTexture();
        createAndAttachDepthTexture();

        const cgt::mat4 trafoMatrix = cgt::mat4::createTranslation(cgt::vec3(-1.f, -1.f, 0.f)) * cgt::mat4::createScale(cgt::vec3(2.f / _viewportSizeProperty->getValue().x, 2.f / _viewportSizeProperty->getValue().y, 1.f));
        cgt::vec2 pos(static_cast<float>(p_position.getValue().x), static_cast<float>(_viewportSizeProperty->getValue().y - p_position.getValue().y));
        _atlas->renderText(p_text.getValue(), pos, p_color.getValue(), trafoMatrix);

        data.addData(p_outputImage.getValue(), new RenderData(_fbo));
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:14,代码来源:textrenderer.cpp

示例6: updateResult

void GlGradientVolumeGenerator::updateResult(DataContainer& data) {
    ImageRepresentationGL::ScopedRepresentation img(data, p_inputImage.getValue());

    if (img != 0) {
        const cgt::svec3& size = img->getSize();

        cgt::TextureUnit inputUnit;
        inputUnit.activate();

        // create texture for result
        cgt::Texture* resultTexture = new cgt::Texture(GL_TEXTURE_3D, cgt::ivec3(size), GL_RGB16F, cgt::Texture::LINEAR);

        // activate shader and bind textures
        _shader->activate();
        img->bind(_shader, inputUnit);

        // activate FBO and attach texture
        _fbo->activate();
        glViewport(0, 0, static_cast<GLsizei>(size.x), static_cast<GLsizei>(size.y));

        // render quad to compute difference measure by shader
        for (int z = 0; z < static_cast<int>(size.z); ++z) {
            float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
            _shader->setUniform("_zTexCoord", zTexCoord);
            _fbo->attachTexture(resultTexture, GL_COLOR_ATTACHMENT0, 0, z);
            QuadRdr.renderQuad();
        }
        _fbo->detachAll();
        _fbo->deactivate();
        _shader->deactivate();

        // put resulting image into DataContainer
        ImageData* id = new ImageData(3, size, 3);
        ImageRepresentationGL::create(id, resultTexture);
        id->setMappingInformation(img->getParent()->getMappingInformation());
        data.addData(p_outputImage.getValue(), id);

        cgt::TextureUnit::setZeroUnit();
        LGL_ERROR;
    }
    else {
        LDEBUG("No suitable input image found.");
    }
}
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:44,代码来源:glgradientvolumegenerator.cpp

示例7: updateResult

    void CudaConfidenceMapsSolver::updateResult(DataContainer& data) {

        ImageRepresentationLocal::ScopedRepresentation img(data, p_inputImage.getValue());
        if (img != 0) {
            bool use8Neighbourhood = p_use8Neighbourhood.getValue();
            float gradientScaling = p_gradientScaling.getValue();
            float alpha = p_paramAlpha.getValue();
            float beta = p_paramBeta.getValue();
            float gamma = p_paramGamma.getValue();

            // Setup the solver with the current Alpha-Beta-Filter settings
            _solver.enableAlphaBetaFilter(p_useAlphaBetaFilter.getValue());
            _solver.setAlphaBetaFilterParameters(p_filterAlpha.getValue(), p_filterBeta.getValue());

            cgt::ivec3 size = img->getSize();
            auto image = (unsigned char*)img->getWeaklyTypedPointer()._pointer;

            // Copy the image on the GPU and generate the equation system
            _solver.uploadImage(image, size.x, size.y, gradientScaling, alpha, beta, gamma, use8Neighbourhood);

            // Solve the equation system using Conjugate Gradient
            if (p_useFixedIterationCount.getValue()) {
                _solver.solveWithFixedIterationCount(p_iterationBudget.getValue());
            }
            else {
                _solver.solveWithFixedTimeBudget(p_millisecondBudget.getValue());
            }

            const float *solution = _solver.getSolution(size.x, size.y);

            // FIXME: Instead of copying the solution to a local representation first it would make
            // sense to directly create an opengl representation!
            ImageData *id = new ImageData(img->getParent()->getDimensionality(), size, img->getParent()->getNumChannels());
            cgt::Texture* resultTexture = new cgt::Texture(GL_TEXTURE_2D, size, GL_R32F, cgt::Texture::LINEAR);
            resultTexture->setWrapping(cgt::Texture::MIRRORED_REPEAT);
            resultTexture->uploadTexture(reinterpret_cast<const GLubyte*>(solution), GL_RED, GL_FLOAT);
            ImageRepresentationGL::create(id, resultTexture);
            id->setMappingInformation(img->getParent()->getMappingInformation());
            data.addData(p_outputConfidenceMap.getValue(), id);
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:41,代码来源:cudaconfidencemapssolver.cpp

示例8: render

    void ViewportSplitter::render(DataContainer& dataContainer) {
        cgt::vec2 vps(p_viewportSizeProperty->getValue());
        cgt::vec2 evps(p_subViewViewportSize.getValue());

        cgt::TextureUnit rtUnit, colorUnit, depthUnit;
        rtUnit.activate();
        cgt::Texture* tex = new cgt::Texture(GL_TEXTURE_2D, cgt::ivec3(p_viewportSizeProperty->getValue(), 1), GL_RGBA8, cgt::Texture::LINEAR);
        tex->setWrapping(cgt::Texture::CLAMP_TO_EDGE);

        _fbo->activate();
        _fbo->attachTexture(tex, GL_COLOR_ATTACHMENT0);
        glViewport(0, 0, static_cast<GLsizei>(vps.x), static_cast<GLsizei>(vps.y));

        _copyShader->activate();
        _copyShader->setUniform("_projectionMatrix", cgt::mat4::createOrtho(0, vps.x, vps.y, 0, -1, 1));
        glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);

        for (size_t i = 0; i < _numSubViews; ++i) {
            if (p_inputImageIds[i] != nullptr) {
                ScopedTypedData<RenderData> rd(dataContainer, p_inputImageIds[i]->getValue());
                if (rd != nullptr) {
                    rd->bind(_copyShader, colorUnit, depthUnit);

                    _copyShader->setUniform("_modelMatrix", cgt::mat4::createScale(cgt::vec3(evps.x, evps.y, .5f)));
                    if (_splitMode == HORIZONTAL)
                        _copyShader->setUniform("_viewMatrix", cgt::mat4::createTranslation(cgt::vec3(float(i) * evps.x, 0.f, 0.f)));
                    else if (_splitMode == VERTICAL)
                        _copyShader->setUniform("_viewMatrix", cgt::mat4::createTranslation(cgt::vec3(0.f, float(_numSubViews - i - 1) * evps.y, 0.f)));

                    _quad->render(GL_TRIANGLE_FAN);
                }
            }
        }

        _copyShader->deactivate();
        dataContainer.addData(p_outputImageId.getValue(), new RenderData(_fbo));

        _fbo->detachAll();
        _fbo->deactivate();
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:40,代码来源:viewportsplitter.cpp

示例9: processImpl

    void SimpleRaycaster::processImpl(DataContainer& data, ImageRepresentationGL::ScopedRepresentation& image) {
        ScopedTypedData<LightSourceData> light(data, p_lightId.getValue());

        if (p_enableShading.getValue() == false || light != nullptr) {
            FramebufferActivationGuard fag(this);
            createAndAttachTexture(GL_RGBA8);
            createAndAttachTexture(GL_RGBA32F);
            createAndAttachTexture(GL_RGBA32F);
            createAndAttachDepthTexture();

            static const GLenum buffers[] = { GL_COLOR_ATTACHMENT0, GL_COLOR_ATTACHMENT1 , GL_COLOR_ATTACHMENT2 };
            glDrawBuffers(3, buffers);

            if (p_enableShading.getValue() && light != nullptr) {
                light->bind(_shader, "_lightSource");
            }
            if (p_enableShadowing.getValue()) {
                _shader->setUniform("_shadowIntensity", p_shadowIntensity.getValue());
            }

            glEnable(GL_DEPTH_TEST);
            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            QuadRdr.renderQuad();

            // restore state
            glDrawBuffers(1, buffers);
            glDisable(GL_DEPTH_TEST);
            LGL_ERROR;

            data.addData(p_targetImageID.getValue(), new RenderData(_fbo));


        
        }
        else {
            LDEBUG("Could not load light source from DataContainer.");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:38,代码来源:simpleraycaster.cpp

示例10: updateResult

    void VirtualMirrorCombine::updateResult(DataContainer& data) {
        ScopedTypedData<RenderData> normalImage(data, p_normalImageID.getValue());
        ScopedTypedData<RenderData> mirrorImage(data, p_mirrorImageID.getValue());
        ScopedTypedData<RenderData> mirrorRendered(data, p_mirrorRenderID.getValue());

        if (normalImage != 0 && mirrorImage != 0 && mirrorRendered != 0) {
            glEnable(GL_DEPTH_TEST);
            glDepthFunc(GL_ALWAYS);

            FramebufferActivationGuard fag(this);
            createAndAttachColorTexture();
            createAndAttachDepthTexture();

            _shader->activate();
            decorateRenderProlog(data, _shader);

            cgt::TextureUnit normalColorUnit, normalDepthUnit, mirrorColorUnit, mirrorDepthUnit, mirrorRenderedDepthUnit;
            normalImage->bind(_shader, normalColorUnit, normalDepthUnit, "_normalColor", "_normalDepth", "_normalTexParams");
            mirrorImage->bind(_shader, mirrorColorUnit, mirrorDepthUnit, "_mirrorColor", "_mirrorDepth", "_mirrorTexParams");
            mirrorRendered->bindDepthTexture(_shader, mirrorRenderedDepthUnit, "_mirrorRenderedDepth", "_mirrorRenderedTexParams");

            glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
            QuadRdr.renderQuad();

            _shader->deactivate();
            cgt::TextureUnit::setZeroUnit();
            glDepthFunc(GL_LESS);
            glDisable(GL_DEPTH_TEST);
            LGL_ERROR;

            data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
        }
        else {
            LDEBUG("No suitable input images found.");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:36,代码来源:virtualmirrorcombine.cpp

示例11: ReadImageDirect


//.........这里部分代码省略.........

            LDEBUG("Reading Image with Reader " << imageIO->GetNameOfClass());
            LDEBUG("Pixel Type is " << imageIO->GetComponentTypeAsString(pixelType));
            LDEBUG("numDimensions: " << numDimensions);

            if (numDimensions > 3) {
                LERROR("Error: Dimensions higher than 3 not supported!");
                return;
            }

            itk::ImageIORegion ioRegion(numDimensions);
            itk::ImageIORegion::IndexType ioStart = ioRegion.GetIndex();
            itk::ImageIORegion::SizeType ioSize = ioRegion.GetSize();

            cgt::vec3 imageOffset(0.f);
            cgt::vec3 voxelSize(1.f);
            cgt::ivec3 size_i(1);

            //we assured above that numDimensions is < 3
            for (int i = 0; i < static_cast<int>(numDimensions); i++) {
                size_i[i] = imageIO->GetDimensions(i);
                imageOffset[i] = imageIO->GetOrigin(i);
                voxelSize[i] = imageIO->GetSpacing(i);
                ioStart[i] = 0;
                ioSize[i] = size_i[i];
            }

            cgt::svec3 size(size_i);
            size_t dimensionality = (size_i[2] == 1) ? ((size_i[1] == 1) ? 1 : 2) : 3;

            LDEBUG("Image Size is " << size);
            LDEBUG("Voxel Size is " << voxelSize);
            LDEBUG("Image Offset is " << imageOffset);
            LDEBUG("component size: " << imageIO->GetComponentSize());
            LDEBUG("components: " << imageIO->GetNumberOfComponents());
            LDEBUG("pixel type (string): " << imageIO->GetPixelTypeAsString(imageIO->GetPixelType())); // 'vector'
            LDEBUG("pixel type: " << imageIO->GetPixelType()); // '5'

            switch (pixelType) {
            case itk::ImageIOBase::CHAR:
                wtp._baseType = WeaklyTypedPointer::INT8; break;
            case itk::ImageIOBase::UCHAR:
                wtp._baseType = WeaklyTypedPointer::UINT8; break;
            case itk::ImageIOBase::SHORT:
                wtp._baseType = WeaklyTypedPointer::INT16; break;
            case itk::ImageIOBase::USHORT:
                wtp._baseType = WeaklyTypedPointer::UINT16; break;
            case itk::ImageIOBase::INT:
                wtp._baseType = WeaklyTypedPointer::INT32; break;
            case itk::ImageIOBase::UINT:
                wtp._baseType = WeaklyTypedPointer::UINT32; break;
            case itk::ImageIOBase::DOUBLE:
                LWARNING("Pixel Type is DOUBLE. Conversion to float may result in loss of precision!");
            case itk::ImageIOBase::FLOAT:
                wtp._baseType = WeaklyTypedPointer::FLOAT; break;


            default:
                LERROR("Error while loading ITK image: unsupported type: " << pixelType);
                return;
            }

            wtp._numChannels = imageIO->GetNumberOfComponents();

            //Setup the image region to read
            ioRegion.SetIndex(ioStart);
            ioRegion.SetSize(ioSize);
            imageIO->SetIORegion(ioRegion);

            if (pixelType != itk::ImageIOBase::DOUBLE) {
                //Finally, allocate buffer and read the image data
                wtp._pointer = new uint8_t[imageIO->GetImageSizeInBytes()];
                imageIO->Read(wtp._pointer);
            }
            else {
                //convert float volume to double volume
                double * inputBuf = new double[imageIO->GetImageSizeInComponents()];
                wtp._pointer = new uint8_t[imageIO->GetImageSizeInComponents() * sizeof(float)];
                imageIO->Read(inputBuf);

                double * dptr = inputBuf;
                float * fptr = static_cast<float*>(wtp._pointer);
                for (int i = 0, s = imageIO->GetImageSizeInComponents(); i < s; ++i) {
                    *fptr = *dptr;
                    fptr++;
                    dptr++;
                }
                delete[] inputBuf;
            }

            ImageData* image = new ImageData(dimensionality, size, wtp._numChannels);
            ImageRepresentationLocal::create(image, wtp);

            image->setMappingInformation(ImageMappingInformation(size, imageOffset/* + p_imageOffset.getValue()*/, voxelSize /** p_voxelSize.getValue()*/));
            data.addData(p_targetImageID.getValue(), image);
        }
        else {
            LWARNING("Unable to create ImageIO Instance; No suitable reader found!");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:101,代码来源:itkreader.cpp

示例12: ReadImageSeries


//.........这里部分代码省略.........

            cgt::svec3 size(size_i);
            size_t dimensionality = (size_i[2] == 1) ? ((size_i[1] == 1) ? 1 : 2) : 3;
            if (dimensionality > 2) {
                LERROR("Error: Cannot load image series with more than two dimensions!");
                return;
            }

            LDEBUG("Image Size is " << size);
            LDEBUG("Voxel Size is " << voxelSize);
            LDEBUG("Image Offset is " << imageOffset);
            LDEBUG("component size: " << imageIO->GetComponentSize());
            LDEBUG("components: " << imageIO->GetNumberOfComponents());
            LDEBUG("pixel type (string): " << imageIO->GetPixelTypeAsString(imageIO->GetPixelType()));
            LDEBUG("pixel type: " << imageIO->GetPixelType());

            switch (pixelType) {
            case itk::ImageIOBase::CHAR:
                wtp._baseType = WeaklyTypedPointer::INT8; break;
            case itk::ImageIOBase::UCHAR:
                wtp._baseType = WeaklyTypedPointer::UINT8; break;
            case itk::ImageIOBase::SHORT:
                wtp._baseType = WeaklyTypedPointer::INT16; break;
            case itk::ImageIOBase::USHORT:
                wtp._baseType = WeaklyTypedPointer::UINT16; break;
            case itk::ImageIOBase::INT:
                wtp._baseType = WeaklyTypedPointer::INT32; break;
            case itk::ImageIOBase::UINT:
                wtp._baseType = WeaklyTypedPointer::UINT32; break;
            case itk::ImageIOBase::DOUBLE:
                LWARNING("Pixel Type is DOUBLE. Conversion to float may result in loss of precision!");
            case itk::ImageIOBase::FLOAT:
                wtp._baseType = WeaklyTypedPointer::FLOAT; break;


            default:
                LERROR("Error while loading ITK image: unsupported type: " << pixelType);
                return;
            }

            wtp._numChannels = imageIO->GetNumberOfComponents();

            //Setup the image region to read
            ioRegion.SetIndex(ioStart);
            ioRegion.SetSize(ioSize);
            imageIO->SetIORegion(ioRegion);

            //allocate a temporary buffer if necessary
            double* inputBuf = (pixelType == itk::ImageIOBase::DOUBLE) ? new double[imageIO->GetImageSizeInComponents()] : nullptr;
            size_t sliceSize = (pixelType == itk::ImageIOBase::DOUBLE) ? imageIO->GetImageSizeInComponents() * sizeof(float) : imageIO->GetImageSizeInBytes();
            wtp._pointer = new uint8_t[numSlices * sliceSize];
            for (int idx = 0; idx < numSlices; ++idx) {
                itk::ImageIOBase::Pointer fileIO = imageIO;
                    //itk::ImageIOFactory::CreateImageIO(imageFileNames[idx].c_str(), itk::ImageIOFactory::ReadMode);
                fileIO->SetFileName(imageFileNames[idx]);
                fileIO->ReadImageInformation();
                fileIO->SetIORegion(ioRegion);

                size_t currentSliceSize = (pixelType == itk::ImageIOBase::DOUBLE) ? imageIO->GetImageSizeInComponents() * sizeof(float) : fileIO->GetImageSizeInBytes();
                if (currentSliceSize != sliceSize) {
                    LERROR("Image " << imageFileNames[idx] << " has different dimensionality or data type!");
                    delete static_cast<uint8_t*>(wtp._pointer);
                    delete inputBuf;
                    wtp._pointer = nullptr;
                    return;
                }

                uint8_t* sliceBuffer = static_cast<uint8_t*>(wtp._pointer) + idx * sliceSize;

                if (pixelType != itk::ImageIOBase::DOUBLE) {
                    // directly read slice into buffer
                    fileIO->Read(sliceBuffer);
                }
                else {
                    //convert float volume to double volume
                    fileIO->Read(inputBuf);

                    double* dptr = inputBuf;
                    float* fptr = reinterpret_cast<float*>(sliceBuffer);
                    for (int i = 0, s = fileIO->GetImageSizeInComponents(); i < s; ++i) {
                        *fptr = static_cast<float>(*dptr);
                        fptr++;
                        dptr++;
                    }
                }
            }
            delete[] inputBuf;

            size[2] = numSlices;
            //series adds one dimension
            ImageData* image = new ImageData(dimensionality+1, size, wtp._numChannels);
            ImageRepresentationLocal::create(image, wtp);

            image->setMappingInformation(ImageMappingInformation(size, imageOffset/* + p_imageOffset.getValue()*/, voxelSize /** p_voxelSize.getValue()*/));
            data.addData(p_targetImageID.getValue(), image);
        }
        else {
            LWARNING("Unable to create ImageIO Instance; No suitable reader found!");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:101,代码来源:itkreader.cpp

示例13: updateResult

    void GlGaussianFilter::updateResult(DataContainer& data) {
        ImageRepresentationGL::ScopedRepresentation img(data, p_inputImage.getValue());

        if (img != 0) {
            if (img->getParent()->getDimensionality() > 1) {
                cgt::ivec3 size = img->getSize();
                int halfKernelSize = static_cast<int>(2.5 * p_sigma.getValue());
                cgtAssert(halfKernelSize < MAX_HALF_KERNEL_SIZE, "halfKernelSize too big -> kernel uniform buffer will be out of bounds!")

                cgt::TextureUnit inputUnit, kernelUnit;
                inputUnit.activate();

                // create texture for result
                cgt::Texture* resultTextures[2];
                for (size_t i = 0; i < 2; ++i) {
                    resultTextures[i] = new cgt::Texture(img->getTexture()->getType(), size, img->getTexture()->getInternalFormat(), cgt::Texture::LINEAR);
                }

                // we need to distinguish 2D and 3D case
                cgt::Shader* leShader = (size.z == 1) ? _shader2D : _shader3D;

                // activate shader
                leShader->activate();
                leShader->setUniform("_halfKernelSize", halfKernelSize);

                // bind kernel buffer texture
                kernelUnit.activate();
                glBindTexture(GL_TEXTURE_BUFFER, _kernelBufferTexture);
                glTexBuffer(GL_TEXTURE_BUFFER, GL_R32F, _kernelBuffer->getId());
                leShader->setUniform("_kernel", kernelUnit.getUnitNumber());
                LGL_ERROR;

                // activate FBO and attach texture
                _fbo->activate();
                glViewport(0, 0, static_cast<GLsizei>(size.x), static_cast<GLsizei>(size.y));

                // start 3 passes of convolution: in X, Y and Z direction:
                {
                    // X pass
                    leShader->setUniform("_direction", cgt::ivec3(1, 0, 0));
                    img->bind(leShader, inputUnit);

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        if (size.z > 1)
                            leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[0], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                {
                    // Y pass
                    leShader->setUniform("_direction", cgt::ivec3(0, 1, 0));
                    inputUnit.activate();
                    resultTextures[0]->bind();

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        if (size.z > 1)
                            leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[1], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                // we need the third pass only in the 3D case
                if (size.z > 1) {
                    // Z pass
                    leShader->setUniform("_direction", cgt::ivec3(0, 0, 1));
                    inputUnit.activate();
                    resultTextures[1]->bind();

                    // render quad to compute difference measure by shader
                    for (int z = 0; z < size.z; ++z) {
                        float zTexCoord = static_cast<float>(z)/static_cast<float>(size.z) + .5f/static_cast<float>(size.z);
                        leShader->setUniform("_zTexCoord", zTexCoord);
                        _fbo->attachTexture(resultTextures[0], GL_COLOR_ATTACHMENT0, 0, z);
                        LGL_ERROR;
                        QuadRdr.renderQuad();
                    }
                }
                else {
                    // in the 2D case we just swap the result textures, so that we write the correct image out in the lines below.
                    std::swap(resultTextures[0], resultTextures[1]);
                }

                _fbo->detachAll();
                _fbo->deactivate();
                leShader->deactivate();

                // put resulting image into DataContainer
                ImageData* id = new ImageData(img->getParent()->getDimensionality(), size, img->getParent()->getNumChannels());
                ImageRepresentationGL::create(id, resultTextures[0]);
                id->setMappingInformation(img->getParent()->getMappingInformation());
                data.addData(p_outputImage.getValue(), id);

                delete resultTextures[1];
//.........这里部分代码省略.........
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:101,代码来源:glgaussianfilter.cpp

示例14: img

    void SliceRenderer3D::updateResult(DataContainer& data) {
		std::cout << "Entering updateResult of SliceRenderer3D " << std::endl;
		ImageRepresentationGL::ScopedRepresentation img(data, p_sourceImageID.getValue());
        ScopedTypedData<CameraData> camera(data, p_camera.getValue());

        if (img != nullptr && camera != nullptr) {
            if (img->getDimensionality() == 3) {
                const cgt::Camera& cam = camera->getCamera();

                // Creating the slice proxy geometry works as follows:
                // Create the cube proxy geometry for the volume, then clip the cube against the slice plane.
                // The closing face is the slice proxy geometry.
                // This is probably not the fastest, but an elegant solution, which also supports arbitrary slice orientations. :)
                cgt::Bounds volumeExtent = img->getParent()->getWorldBounds();
                std::unique_ptr<MeshGeometry> cube = GeometryDataFactory::createCube(volumeExtent, cgt::Bounds(cgt::vec3(0.f), cgt::vec3(1.f)));

                cgt::vec3 normal(0.f, 0.f, 0.f);
                float p = 0.0f;

				switch (p_sliceOrientation.getOptionValue()) {
				case XY_PLANE:
					normal = cgt::vec3(0.f, 0.f, 1.f);
					p = img->getParent()->getMappingInformation().getOffset().z + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().z);
					break;
				case XZ_PLANE:
					normal = cgt::vec3(0.f, 1.f, 0.f);
					p = img->getParent()->getMappingInformation().getOffset().y + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().y);
					break;
				case YZ_PLANE:
					normal = cgt::vec3(1.f, 0.f, 0.f);
					p = img->getParent()->getMappingInformation().getOffset().x + (p_sliceNumber.getValue() * img->getParent()->getMappingInformation().getVoxelSize().x);
					break;
				}
				MeshGeometry clipped = cube->clipAgainstPlane(p, normal, true);
				const FaceGeometry& slice = clipped.getFaces().back(); // the last face is the closing face

				glEnable(GL_DEPTH_TEST);
                _shader->activate();

                _shader->setIgnoreUniformLocationError(true);
                _shader->setUniform("_viewportSizeRCP", 1.f / cgt::vec2(getEffectiveViewportSize()));
                _shader->setUniform("_projectionMatrix", cam.getProjectionMatrix());
                _shader->setUniform("_viewMatrix", cam.getViewMatrix());

                cgt::TextureUnit inputUnit, tfUnit;
                img->bind(_shader, inputUnit);
                p_transferFunction.getTF()->bind(_shader, tfUnit);

                FramebufferActivationGuard fag(this);
                createAndAttachColorTexture();
                createAndAttachDepthTexture();
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
                slice.render(GL_TRIANGLE_FAN);

                _shader->deactivate();
                cgt::TextureUnit::setZeroUnit();
                glDisable(GL_DEPTH_TEST);

                data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
            }
            else {
                LERROR("Input image must have dimensionality of 3.");
            }
        }
        else {
            LDEBUG("No suitable input image found.");
        }
		std::cout << "Exiting updateResult of SliceRenderer3D " << std::endl;
	}
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:69,代码来源:slicerenderer3d.cpp

示例15: img

    void SliceRenderer2D::updateResult(DataContainer& data) {
        ImageRepresentationGL::ScopedRepresentation img(data, p_sourceImageID.getValue());

        if (img != 0) {
            if (img->getDimensionality() == 2) {
                cgt::vec3 imgSize(img->getSize());
             
                float renderTargetRatio = static_cast<float>(getEffectiveViewportSize().x) / static_cast<float>(getEffectiveViewportSize().y);

                cgt::vec2 topLeft_px(static_cast<float>(p_cropLeft.getValue()), static_cast<float>(p_cropTop.getValue()));
                cgt::vec2 bottomRight_px(static_cast<float>(imgSize.x - p_cropRight.getValue()), static_cast<float>(imgSize.y - p_cropBottom.getValue()));
                cgt::vec2 croppedSize = bottomRight_px - topLeft_px;

                float sliceRatio =
                    (static_cast<float>(croppedSize.x) * img.getImageData()->getMappingInformation().getVoxelSize().x)
                    / (static_cast<float>(croppedSize.y) * img.getImageData()->getMappingInformation().getVoxelSize().y);
       
                // configure model matrix so that slices are rendered with correct aspect posNormalized
                float ratioRatio = sliceRatio / renderTargetRatio;
                cgt::mat4 viewMatrix = (ratioRatio > 1) ? cgt::mat4::createScale(cgt::vec3(1.f, 1.f / ratioRatio, 1.f)) : cgt::mat4::createScale(cgt::vec3(ratioRatio, 1.f, 1.f));
                viewMatrix.t11 *= -1;

                // prepare OpenGL
                _shader->activate();
                cgt::TextureUnit inputUnit, tfUnit;
                img->bind(_shader, inputUnit);
                p_transferFunction.getTF()->bind(_shader, tfUnit);

                if (p_invertXAxis.getValue())
                    viewMatrix *= cgt::mat4::createScale(cgt::vec3(-1, 1, 1));

                if (p_invertYAxis.getValue())
                    viewMatrix *= cgt::mat4::createScale(cgt::vec3(1, -1, 1));


                cgt::vec2 topLeft = topLeft_px / imgSize.xy();
                cgt::vec2 bottomRight = bottomRight_px / imgSize.xy();

                _shader->setUniform("_viewMatrix", viewMatrix);
                _shader->setUniform("_topLeft", topLeft);
                _shader->setUniform("_bottomRight", bottomRight);

                // render slice
                FramebufferActivationGuard fag(this);
                createAndAttachColorTexture();
                createAndAttachDepthTexture();
                glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT);
                QuadRdr.renderQuad();


                _shader->deactivate();
                cgt::TextureUnit::setZeroUnit();

                data.addData(p_targetImageID.getValue(), new RenderData(_fbo));
            }
            else {
                LERROR("Input image must have dimensionality of 2.");
            }
        }
        else {
            LDEBUG("No suitable input image found.");
        }
    }
开发者ID:tusharuiit,项目名称:2014-2015_HiwiMedicalXTTVisualization,代码行数:63,代码来源:slicerenderer2d.cpp


注:本文中的DataContainer::addData方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。