当前位置: 首页>>代码示例>>C++>>正文


C++ FImage类代码示例

本文整理汇总了C++中FImage的典型用法代码示例。如果您正苦于以下问题:C++ FImage类的具体用法?C++ FImage怎么用?C++ FImage使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了FImage类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: image

void MainWindow::on_bSobelXY_clicked() {
    if(original != nullImage){
        EdgeType type = (EdgeType)ui->cbEdgeTypes->currentIndex();
        FImage image(original);
        Kernel sobelX = Kernel::createSobelKernelX();
        Convolution convX = Convolution(sobelX, type);
        FImage resultX = convX.apply(image);
        resultX.normalize();

        Kernel sobelY = Kernel::createSobelKernelY();
        Convolution convY = Convolution(sobelY, type);
        FImage resultY = convY.apply(image);
        resultY.normalize();

        FImage result(resultX.getWidth(), resultY.getHeight());
        for(int x = 0; x < resultX.getWidth(); ++x) {
            for(int y = 0; y < resultX.getHeight(); ++y) {
                float valX = resultX.getValue(x, y);
                float valY = resultY.getValue(x, y);
                result.setValue(x, y, sqrtf(valX * valX + valY * valY));
            }
        }
        result.normalize();
        ui->imageLabel->setPixmap(QPixmap::fromImage(result.toQImage()));
    }
}
开发者ID:alexey8pcd,项目名称:CyberVision_Lab1,代码行数:26,代码来源:mainwindow.cpp

示例2: ApplyInverseRealFourierTransform_1

static void ApplyInverseRealFourierTransform_1( GenericImage<P>& image, const FComplexImage& dft, bool parallel, int maxProcessors )
{
   FImage tmp;
   tmp.Status() = image.Status();
   image.FreeData();

   ApplyInverseRealFourierTransform_2( tmp, dft, parallel, maxProcessors );

   image.SetStatusCallback( 0 );
   image.Assign( tmp );
   image.Status() = tmp.Status();
}
开发者ID:aleixpuig,项目名称:PCL,代码行数:12,代码来源:FourierTransform.cpp

示例3: minLambdasStore

FImage InterestPointsDetector::calculateMinLambdasStore(
        const FImage& derXStore, const FImage& derYStore)
{
    FImage minLambdasStore(source.getWidth(), source.getHeight());
    int areaSize = 2;
    for (int x = 0; x < source.getWidth(); ++x) {
        for (int y = 0; y < source.getHeight(); ++y) {
            float a = 0;
            float b = 0;
            float c = 0;
            for (int u = -areaSize; u <= areaSize; ++u) {
                for (int v = -areaSize; v <= areaSize; ++v) {
                    if (u == 0 && v == 0){
                        continue;
                    }
                    int xi = ImageUtil::handleEdgeEffect(
                                 x + u, source.getWidth(), type);
                    int yi = ImageUtil::handleEdgeEffect(
                                 y + v, source.getHeight(), type);
                    float ix = 0;
                    float iy = 0;
                    if(ImageUtil::insideImage(xi, yi)){
                         ix = derXStore.getValue(xi, yi);
                         iy = derYStore.getValue(xi, yi);
                    }
                    a += ix * ix;
                    b += ix * iy;
                    c += iy * iy;

                }
            }
            float trA = a + c;
            float detA = a * c - b * b;
            float rad = sqrtf(trA * trA - 4 * detA);
            float lambda1 = (trA - rad) / 2;
            float lambda2 = (trA + rad) / 2;
            float lambdaMin = lambda1 < lambda2 ? lambda1 : lambda2;
            minLambdasStore.setValue(x, y, lambdaMin);
        }
    }

    return minLambdasStore;
}
开发者ID:alexey8pcd,项目名称:CyberVision_Lab1,代码行数:43,代码来源:interestpointsdetector.cpp

示例4: mexFunction

void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
	FImage Im;
	Im.LoadMatlabImage(prhs[0]);	// height * width * channels (in that order)

	cv::Mat img = MxArray(prhs[0]).toMat(CV_32F);	// Convert to single floating point grayscale image
	int rows = img.rows;
	int cols = img.cols;
	int channels = img.channels();

	cv::Mat img2, img3;
	cv::GaussianBlur(img, img2, cv::Size(5, 5), 0.67, 0, cv::BORDER_REPLICATE);

	int w = (int)ceil(img.cols / 2.0f);
	int h = (int)ceil(img.rows / 2.0f);
	cv::resize(img, img3, cv::Size(w, h), 0, 0, cv::INTER_CUBIC);

	plhs[0] = MxArray(img2);
	plhs[1] = MxArray(img3);
}
开发者ID:YuvalNirkin,项目名称:DenseCorrespondences,代码行数:20,代码来源:Pyramids.cpp

示例5: point

vector<InterestPoint> InterestPointsDetector::determinePointsByRadius(
        float threshold, int radius, const  FImage &minValuesStore)
{
    int width = minValuesStore.getWidth();
    int height = minValuesStore.getHeight();

    vector<InterestPoint> points = vector<InterestPoint>();
    for (int x = 0; x < width; ++x) {
        for (int y = 0; y < height; ++y) {
            float localMax = minValuesStore.getValue(x, y);
            if(localMax < threshold) {
                continue;
            }

            boolean addPoint = true;
            for (int dpx = -radius; dpx <= radius; ++dpx) {
                for (int dpy = -radius; dpy <= radius && addPoint; ++dpy) {
                    if(dpx == 0 && dpy == 0){
                        continue;
                    }
                    int xi = ImageUtil::handleEdgeEffect(x + dpx, width, type);
                    int yi = ImageUtil::handleEdgeEffect(y + dpy, height, type);
                    float neiValue = 1e9;
                    if(ImageUtil::insideImage(xi, yi)){
                        neiValue = minValuesStore.getValue(xi, yi);
                    }
                    if (neiValue >= localMax){
                        addPoint = false;
                        break;
                    }
                }
            }
            if(addPoint){
                InterestPoint point(x, y, localMax);
                points.push_back(point);
            }
        }
    }
    return points;
}
开发者ID:alexey8pcd,项目名称:CyberVision_Lab1,代码行数:40,代码来源:interestpointsdetector.cpp

示例6: UseOriginal

/**
 * Fills the output structure with the original uncompressed mip information
 *
 * @param InImage The mip to compress
 * @param OutCompressImage The output image (uncompressed in this case)
 */
static void UseOriginal(const FImage& InImage, FCompressedImage2D& OutCompressedImage, EPixelFormat CompressedPixelFormat, bool bSRGB)
{   
	// Get Raw Data
	FImage Image;
	InImage.CopyTo(Image, ERawImageFormat::BGRA8, bSRGB);

    // Fill out the output information
    OutCompressedImage.SizeX = Image.SizeX;
    OutCompressedImage.SizeY = Image.SizeY;
    OutCompressedImage.PixelFormat = CompressedPixelFormat;
	
	// Output Data
	OutCompressedImage.RawData.Init(Image.SizeX * Image.SizeY * 4);
    void* MipData = (void*)Image.RawData.GetData();
    FMemory::Memcpy(MipData, Image.RawData.GetData(), Image.SizeX * Image.SizeY * 4);
}
开发者ID:Codermay,项目名称:Unreal4,代码行数:22,代码来源:TextureFormatPVR.cpp

示例7: CopyImage

/**
 * Copies an image accounting for format differences. Sizes must match.
 *
 * @param SrcImage - The source image to copy from.
 * @param DestImage - The destination image to copy to.
 */
static void CopyImage(const FImage& SrcImage, FImage& DestImage)
{
    check(SrcImage.SizeX == DestImage.SizeX);
    check(SrcImage.SizeY == DestImage.SizeY);
    check(SrcImage.NumSlices == DestImage.NumSlices);

    const int32 NumTexels = SrcImage.SizeX * SrcImage.SizeY * SrcImage.NumSlices;

    if (SrcImage.Format == DestImage.Format &&
            SrcImage.GammaSpace == DestImage.GammaSpace)
    {
        DestImage.RawData = SrcImage.RawData;
    }
    else if (SrcImage.Format == ERawImageFormat::RGBA32F)
    {
        // Convert from 32-bit linear floating point.
        const FLinearColor* SrcColors = SrcImage.AsRGBA32F();

        switch (DestImage.Format)
        {
        case ERawImageFormat::G8:
        {
            uint8* DestLum = DestImage.AsG8();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                DestLum[TexelIndex] = SrcColors[TexelIndex].ToFColor(DestImage.IsGammaCorrected()).R;
            }
        }
        break;

        case ERawImageFormat::BGRA8:
        {
            FColor* DestColors = DestImage.AsBGRA8();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                DestColors[TexelIndex] = SrcColors[TexelIndex].ToFColor(DestImage.IsGammaCorrected());
            }
        }
        break;

        case ERawImageFormat::BGRE8:
        {
            FColor* DestColors = DestImage.AsBGRE8();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                DestColors[TexelIndex] = SrcColors[TexelIndex].ToRGBE();
            }
        }
        break;

        case ERawImageFormat::RGBA16:
        {
            uint16* DestColors = DestImage.AsRGBA16();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                int32 DestIndex = TexelIndex * 4;
                DestColors[DestIndex + 0] = FMath::Clamp(FMath::FloorToInt(SrcColors[TexelIndex].R * 65535.999f), 0, 65535);
                DestColors[DestIndex + 1] = FMath::Clamp(FMath::FloorToInt(SrcColors[TexelIndex].G * 65535.999f), 0, 65535);
                DestColors[DestIndex + 2] = FMath::Clamp(FMath::FloorToInt(SrcColors[TexelIndex].B * 65535.999f), 0, 65535);
                DestColors[DestIndex + 3] = FMath::Clamp(FMath::FloorToInt(SrcColors[TexelIndex].A * 65535.999f), 0, 65535);
            }
        }
        break;

        case ERawImageFormat::RGBA16F:
        {
            FFloat16Color* DestColors = DestImage.AsRGBA16F();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                DestColors[TexelIndex] = FFloat16Color(SrcColors[TexelIndex]);
            }
        }
        break;
        }
    }
    else if (DestImage.Format == ERawImageFormat::RGBA32F)
    {
        // Convert to 32-bit linear floating point.
        FLinearColor* DestColors = DestImage.AsRGBA32F();
        switch (SrcImage.Format)
        {
        case ERawImageFormat::G8:
        {
            const uint8* SrcLum = SrcImage.AsG8();
            for (int32 TexelIndex = 0; TexelIndex < NumTexels; ++TexelIndex)
            {
                FColor SrcColor(SrcLum[TexelIndex],SrcLum[TexelIndex],SrcLum[TexelIndex],255);

                switch ( SrcImage.GammaSpace )
                {
                case EGammaSpace::Linear:
                    DestColors[TexelIndex] = SrcColor.ReinterpretAsLinear();
                    break;
                case EGammaSpace::sRGB:
//.........这里部分代码省略.........
开发者ID:zhaoyizheng0930,项目名称:UnrealEngine,代码行数:101,代码来源:ImageCore.cpp

示例8: InitImageStorage

/**
 * Initializes storage for an image.
 *
 * @param Image - The image to initialize storage for.
 */
static void InitImageStorage(FImage& Image)
{
    int32 NumBytes = Image.SizeX * Image.SizeY * Image.NumSlices * Image.GetBytesPerPixel();
    Image.RawData.Empty(NumBytes);
    Image.RawData.AddUninitialized(NumBytes);
}
开发者ID:zhaoyizheng0930,项目名称:UnrealEngine,代码行数:11,代码来源:ImageCore.cpp

示例9: CompressImage

	virtual bool CompressImage(
			const FImage& InImage,
			const struct FTextureBuildSettings& BuildSettings,
			bool bImageHasAlphaChannel,
			FCompressedImage2D& OutCompressedImage
		) const override
	{
		// Get Raw Image Data from passed in FImage
		FImage Image;
		InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.bSRGB);

		// Determine the compressed pixel format and compression parameters
		EPixelFormat CompressedPixelFormat = PF_Unknown;
		FString CompressionParameters = TEXT("");

		if (BuildSettings.TextureFormatName == GTextureFormatNameASTC_RGB ||
		  ((BuildSettings.TextureFormatName == GTextureFormatNameASTC_RGBAuto) && !bImageHasAlphaChannel))
		{
			CompressedPixelFormat = GetQualityFormat();
			CompressionParameters = FString::Printf(TEXT("%s -esw bgra -ch 1 1 1 0"), *GetQualityString());
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameASTC_RGBA ||
			   ((BuildSettings.TextureFormatName == GTextureFormatNameASTC_RGBAuto) && bImageHasAlphaChannel))
		{
			CompressedPixelFormat = GetQualityFormat();
			CompressionParameters = FString::Printf(TEXT("%s -esw bgra -ch 1 1 1 1 -alphablend"), *GetQualityString());
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameASTC_NormalAG)
		{
			CompressedPixelFormat = GetQualityFormat(FORCED_NORMAL_MAP_COMPRESSION_SIZE_VALUE);
			CompressionParameters = FString::Printf(TEXT("%s -esw 0g0b -ch 0 1 0 1 -oplimit 1000 -mincorrel 0.99 -dblimit 60 -b 2.5 -v 3 1 1 0 50 0 -va 1 1 0 50"), *GetQualityString(FORCED_NORMAL_MAP_COMPRESSION_SIZE_VALUE, -1));
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameASTC_NormalRG)
		{
			CompressedPixelFormat = GetQualityFormat(FORCED_NORMAL_MAP_COMPRESSION_SIZE_VALUE);
			CompressionParameters = FString::Printf(TEXT("%s -esw bg00 -ch 1 1 0 0 -oplimit 1000 -mincorrel 0.99 -dblimit 60 -b 2.5 -v 3 1 1 0 50 0 -va 1 1 0 50"), *GetQualityString(FORCED_NORMAL_MAP_COMPRESSION_SIZE_VALUE, -1));
		}

		// Compress the image, slice by slice
		bool bCompressionSucceeded = true;
		int32 SliceSizeInTexels = Image.SizeX * Image.SizeY;
		for (int32 SliceIndex = 0; SliceIndex < Image.NumSlices && bCompressionSucceeded; ++SliceIndex)
		{
			TArray<uint8> CompressedSliceData;
			bCompressionSucceeded = CompressSliceToASTC(
				Image.AsBGRA8() + (SliceIndex * SliceSizeInTexels),
				Image.SizeX,
				Image.SizeY,
				CompressionParameters,
				CompressedSliceData
			);
			OutCompressedImage.RawData.Append(CompressedSliceData);
		}

		if (bCompressionSucceeded)
		{
			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = CompressedPixelFormat;
		}
		return bCompressionSucceeded;
	}
开发者ID:Codermay,项目名称:Unreal4,代码行数:62,代码来源:TextureFormatASTC.cpp

示例10: DEBUG_DEBUG

SmallRemappedImageCache::MRemappedImage *
SmallRemappedImageCache::getRemapped(const PanoramaData& pano,
                                     const PanoramaOptions & popts,
                                     unsigned int imgNr,
                                     vigra::Rect2D outputROI,
                                     AppBase::MultiProgressDisplay& progress)
{
    // always map to HDR mode. curve and exposure is applied in preview window, for speed
    PanoramaOptions opts = popts;
    opts.outputMode = PanoramaOptions::OUTPUT_HDR;
    opts.outputExposureValue = 0.0;

    // return old image, if already in cache and if it has changed since the last rendering
    if (set_contains(m_images, imgNr)) {
        // return cached image if the parameters of the image have not changed
        SrcPanoImage oldParam = m_imagesParam[imgNr];
        if (oldParam == pano.getSrcImage(imgNr)
                && m_panoOpts[imgNr].getHFOV() == opts.getHFOV()
                && m_panoOpts[imgNr].getWidth() == opts.getWidth()
                && m_panoOpts[imgNr].getHeight() == opts.getHeight()
                && m_panoOpts[imgNr].getProjection() == opts.getProjection()
                && m_panoOpts[imgNr].getProjectionParameters() == opts.getProjectionParameters()
           )
        {
            DEBUG_DEBUG("using cached remapped image " << imgNr);
            return m_images[imgNr];
        }
    }

    ImageCache::getInstance().softFlush();

    typedef  BasicImageView<RGBValue<unsigned char> > BRGBImageView;

//    typedef NumericTraits<PixelType>::RealPromote RPixelType;

    // remap image
    DEBUG_DEBUG("remapping image " << imgNr);

    // load image
    const SrcPanoImage & img = pano.getImage(imgNr);

    ImageCache::EntryPtr e = ImageCache::getInstance().getSmallImage(img.getFilename().c_str());
    if ( (e->image8->width() == 0) && (e->image16->width() == 0) && (e->imageFloat->width() == 0) ) {
        throw std::runtime_error("could not retrieve small source image for preview generation");
    }
    Size2D srcImgSize;
    if (e->image8->width() > 0)
        srcImgSize = e->image8->size();
    else if (e->image16->width() > 0)
        srcImgSize = e->image16->size();
    else
        srcImgSize = e->imageFloat->size();

    MRemappedImage *remapped = new MRemappedImage;
    SrcPanoImage srcPanoImg = pano.getSrcImage(imgNr);
    // adjust distortion parameters for small preview image
    srcPanoImg.resize(srcImgSize);

    FImage srcFlat;
    // use complete image, by supplying an empty mask image
    BImage srcMask;

    if (img.getVigCorrMode() & SrcPanoImage::VIGCORR_FLATFIELD) {
        ImageCache::EntryPtr e = ImageCache::getInstance().getSmallImage(img.getFlatfieldFilename().c_str());
        if (!e) {
            throw std::runtime_error("could not retrieve flatfield image for preview generation");
        }
        if (e->image8->width()) {
            srcFlat.resize(e->image8->size());
            copyImage(srcImageRange(*(e->image8),
                                    RGBToGrayAccessor<RGBValue<UInt8> >()),
                      destImage(srcFlat));
        } else if (e->image16->width()) {
            srcFlat.resize(e->image16->size());
            copyImage(srcImageRange(*(e->image16),
                                           RGBToGrayAccessor<RGBValue<vigra::UInt16> >()),
                             destImage(srcFlat));
        } else {
            srcFlat.resize(e->imageFloat->size());
            copyImage(srcImageRange(*(e->imageFloat),
                                    RGBToGrayAccessor<RGBValue<float> >()),
                      destImage(srcFlat));
        }
    }
    progress.pushTask(AppBase::ProgressTask("remapping", "", 0));

    // compute the bounding output rectangle here!
    vigra::Rect2D outROI = estimateOutputROI(pano, opts, imgNr);
    DEBUG_DEBUG("srcPanoImg size: " << srcPanoImg.getSize() << " pano roi:" << outROI);

    if (e->imageFloat->width()) {
        // remap image
        remapImage(*(e->imageFloat),
                   srcMask,
                   srcFlat,
                   srcPanoImg,
                   opts,
                   outROI,
                   *remapped,
                   progress);
//.........这里部分代码省略.........
开发者ID:TopPano,项目名称:hugin_lite,代码行数:101,代码来源:CachedImageRemapper.cpp

示例11: inputState

void FRecord::record()
{
	FStateInput inputState(0);

	if(getFlag(RECORD_FLASH_R) == true)
	{	
		inputState[FLASH_CHANNEL_R] = flashIntensityR;
		(*dmx)->setSystemState(inputState);
	}

	if(getFlag(RECORD_FLASH_G) == true)
	{	
		inputState[FLASH_CHANNEL_G] = flashIntensityG;
		(*dmx)->setSystemState(inputState);
	}

	if(getFlag(RECORD_FLASH_B) == true)
	{	
		inputState[FLASH_CHANNEL_B] = flashIntensityB;
		(*dmx)->setSystemState(inputState);
	}
	msleep(1000);

	nSamples++;

	if(*camera == NULL)
		return;

	//cimg_library::CImg<unsigned char>* image;
	FImage<unsigned char> image;
	(*camera)->grab(&image());

	image.traceEdges();

	unsigned char color[] = {255,0,0};

	image.paintEdges(color);

	QApplication::beep();

	//image.speckImage()->display("title");

	dateTime = QDateTime::currentDateTime();
	int year = dateTime.date().year();
	int month = dateTime.date().month();
	int day = dateTime.date().day();
	int hour = dateTime.time().hour();
	int min = dateTime.time().minute();
	int sec = dateTime.time().second();
	int msec = dateTime.time().msec();

	FStateSens sensState;
	(*camera)->getSensorState(&sensState);

	FStateSysLed ledState;
	(*dmx)->getSystemState(&ledState);

	FStateSysRgb rgbState;
	(*camera)->getSystemState(&rgbState);

	FStateLab labState;
	//(*color)->rgb2lab(rgbState[F_STATE_SYS_RGB_R](), rgbState[F_STATE_SYS_RGB_G](), rgbState[F_STATE_SYS_RGB_B]());
	//(*color)->getSensorState(labState);
	
	image.blobs();
	
	if(getFlag(RECORD_IMAGES_ON) == true)
	{
		char saveString[255];
		sprintf(saveString, "%s\\%s_%d%02d%02d_%02d%02d%02d.bmp",mImagePath, mBaseName, year, month, day, hour, min, sec);

		image()->save(saveString);
	}

	if(getFlag(RECORD_CSV_ON) == true)
	{
		char saveString[255];
		sprintf(saveString, "%s\\%s_%d%02d%02d.csv", mCsvPath, mBaseName, year, month, day);
		
		QFile file(saveString);
		bool bHeaderWrite = false;
		bHeaderWrite = !file.exists();

		if(bHeaderWrite == true)
		{
			if(file.open(QIODevice::WriteOnly | QIODevice::Text))
			{
				std::string lineBuffer;
				lineBuffer.empty();

				writeTimeStampHeader(file);

				std::string label;
				label.empty();
				char separator = ';';

				sensState.labelString(label,separator);
				ledState.labelString(label,separator);
				rgbState.labelString(label,separator);
				refState.labelString(label,separator);
//.........这里部分代码省略.........
开发者ID:eberlid,项目名称:flouryzer,代码行数:101,代码来源:frecord.cpp

示例12: CompressImage

	virtual bool CompressImage(
		const FImage& InImage,
		const struct FTextureBuildSettings& BuildSettings,
		bool bImageHasAlphaChannel,
		FCompressedImage2D& OutCompressedImage
		) const override
	{
		FImage Image;
		InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.GetGammaSpace());

		EPixelFormat CompressedPixelFormat = PF_Unknown;

		if (BuildSettings.TextureFormatName == GTextureFormatNameAutoETC1)
		{
			if (bImageHasAlphaChannel)
			{
				// ETC1 can't support an alpha channel, store uncompressed
				OutCompressedImage.SizeX = Image.SizeX;
				OutCompressedImage.SizeY = Image.SizeY;
				OutCompressedImage.PixelFormat = PF_B8G8R8A8;
				OutCompressedImage.RawData = Image.RawData;
				return true;
			}
			else
			{
				CompressedPixelFormat = PF_ETC1;
			}
		}
		else		
		if (BuildSettings.TextureFormatName == GTextureFormatNameETC1)
		{
			CompressedPixelFormat = PF_ETC1;
		}
		else
		if (BuildSettings.TextureFormatName == GTextureFormatNameETC2_RGB ||
		   (BuildSettings.TextureFormatName == GTextureFormatNameAutoETC2 && !bImageHasAlphaChannel))
		{
			CompressedPixelFormat = PF_ETC2_RGB;
		}
		else
		if (BuildSettings.TextureFormatName == GTextureFormatNameETC2_RGBA ||
		   (BuildSettings.TextureFormatName == GTextureFormatNameAutoETC2 && bImageHasAlphaChannel))
		{
			CompressedPixelFormat = PF_ETC2_RGBA;
		}
		else
		if (BuildSettings.TextureFormatName == GTextureFormatNameATC_RGB || 
		   (BuildSettings.TextureFormatName == GTextureFormatNameAutoATC && !bImageHasAlphaChannel))
		{
			CompressedPixelFormat = PF_ATC_RGB;
		}
		else
		if (BuildSettings.TextureFormatName == GTextureFormatNameATC_RGBA_I ||
		   (BuildSettings.TextureFormatName == GTextureFormatNameAutoATC && bImageHasAlphaChannel) )
		{
			CompressedPixelFormat = PF_ATC_RGBA_I;
		}
		else
		if (BuildSettings.TextureFormatName == GTextureFormatNameATC_RGBA_E)
		{
			CompressedPixelFormat = PF_ATC_RGBA_E;
		}

		check(CompressedPixelFormat != PF_Unknown);

		bool bCompressionSucceeded = true;
		int32 SliceSize = Image.SizeX * Image.SizeY;
		for (int32 SliceIndex = 0; SliceIndex < Image.NumSlices && bCompressionSucceeded; ++SliceIndex)
		{
			TArray<uint8> CompressedSliceData;
			bCompressionSucceeded = CompressImageUsingQonvert(
				Image.AsBGRA8() + SliceIndex * SliceSize,
				CompressedPixelFormat,
				Image.SizeX,
				Image.SizeY,
				CompressedSliceData
				);
			OutCompressedImage.RawData.Append(CompressedSliceData);
		}

		if (bCompressionSucceeded)
		{
			OutCompressedImage.SizeX = FMath::Max(Image.SizeX, 4);
			OutCompressedImage.SizeY = FMath::Max(Image.SizeY, 4);
			OutCompressedImage.PixelFormat = CompressedPixelFormat;
		}

		return bCompressionSucceeded;
	}
开发者ID:PopCap,项目名称:GameIdea,代码行数:89,代码来源:TextureFormatAndroid.cpp

示例13: CompressImage

	virtual bool CompressImage(
		const FImage& InImage,
		const struct FTextureBuildSettings& BuildSettings,
		bool bImageHasAlphaChannel,
		FCompressedImage2D& OutCompressedImage
		) const override
	{
		bool bCompressionSucceeded = false;

		const int iWidthInBlocks	= ((InImage.SizeX + 3) & ~ 3) / 4;
		const int iHeightInBlocks	= ((InImage.SizeY + 3) & ~ 3) / 4;
		const int iOutputBytes		= iWidthInBlocks * iHeightInBlocks * 16;
		OutCompressedImage.RawData.AddUninitialized(iOutputBytes);

		// When we allow async tasks to execute we do so with 4 lines of the image per task
		// This isn't optimal for long thin textures, but works well with how ISPC works
		const int iScansPerTask = 4;
		const int iNumTasks = FMath::Max((InImage.SizeY / iScansPerTask) - 1, 0);
		const bool bUseTasks = true;

		EPixelFormat CompressedPixelFormat = PF_Unknown;
		if ( BuildSettings.TextureFormatName == GTextureFormatNameBC6H )
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::RGBA16F, false);

			bc6h_enc_settings settings;
			GetProfile_bc6h_basic(&settings);

			if ( bUseTasks )
			{
				class FIntelCompressWorker : public FNonAbandonableTask
				{
				public:
					FIntelCompressWorker(bc6h_enc_settings* pEncSettings, FImage* pInImage, FCompressedImage2D* pOutImage, int yStart, int yEnd)
						: mpEncSettings(pEncSettings)
						, mpInImage(pInImage)
						, mpOutImage(pOutImage)
						, mYStart(yStart)
						, mYEnd(yEnd)
					{
					}

					void DoWork()
					{
						IntelBC6HCompressScans(mpEncSettings, mpInImage, mpOutImage, mYStart, mYEnd);
					}

					FORCEINLINE TStatId GetStatId() const
					{
						RETURN_QUICK_DECLARE_CYCLE_STAT(FIntelCompressWorker, STATGROUP_ThreadPoolAsyncTasks);
					}

					bc6h_enc_settings*	mpEncSettings;
					FImage*				mpInImage;
					FCompressedImage2D*	mpOutImage;
					int					mYStart;
					int					mYEnd;
				};
				typedef FAsyncTask<FIntelCompressWorker> FIntelCompressTask;

				// One less task because we'll do the final + non multiple of 4 inside this task
				TIndirectArray<FIntelCompressTask> CompressionTasks;
				CompressionTasks.Reserve(iNumTasks);
				for ( int iTask=0; iTask < iNumTasks; ++iTask )
				{
					auto* AsyncTask = new(CompressionTasks) FIntelCompressTask(&settings, &Image, &OutCompressedImage, iTask * iScansPerTask, (iTask + 1) * iScansPerTask);
					AsyncTask->StartBackgroundTask();
				}

				IntelBC6HCompressScans(&settings, &Image, &OutCompressedImage, iScansPerTask * iNumTasks, InImage.SizeY);

				// Wait completion
				for (int32 TaskIndex = 0; TaskIndex < CompressionTasks.Num(); ++TaskIndex)
				{
					CompressionTasks[TaskIndex].EnsureCompletion();
				}
			}
			else
			{
				IntelBC6HCompressScans(&settings, &Image, &OutCompressedImage, 0, InImage.SizeY);
			}

			CompressedPixelFormat = PF_BC6H;
			bCompressionSucceeded = true;
		}
		else if ( BuildSettings.TextureFormatName == GTextureFormatNameBC7 )
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.bSRGB);

			bc7_enc_settings settings;
			if ( bImageHasAlphaChannel )
			{
				GetProfile_alpha_basic(&settings);
			}
			else
			{
				GetProfile_basic(&settings);
			}
//.........这里部分代码省略.........
开发者ID:Codermay,项目名称:Unreal4,代码行数:101,代码来源:TextureFormatIntelISPCTexComp.cpp

示例14: CompressImage

	virtual bool CompressImage(
		const FImage& InImage,
		const struct FTextureBuildSettings& BuildSettings,
		bool bImageHasAlphaChannel,
		FCompressedImage2D& OutCompressedImage
		) const override
	{
		if (BuildSettings.TextureFormatName == GTextureFormatNameG8)
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::G8, BuildSettings.GetGammaSpace());

			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = PF_G8;
			OutCompressedImage.RawData = Image.RawData;

			return true;
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameVU8)
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.GetGammaSpace());

			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = PF_V8U8;

			uint32 NumTexels = Image.SizeX * Image.SizeY * Image.NumSlices;
			OutCompressedImage.RawData.Empty(NumTexels * 2);
			OutCompressedImage.RawData.AddUninitialized(NumTexels * 2);
			const FColor* FirstColor = Image.AsBGRA8();
			const FColor* LastColor = FirstColor + NumTexels;
			int8* Dest = (int8*)OutCompressedImage.RawData.GetData();

			for (const FColor* Color = FirstColor; Color < LastColor; ++Color)
			{
				*Dest++ = (int32)Color->R - 128;
				*Dest++ = (int32)Color->G - 128;
			}

			return true;
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameBGRA8)
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.GetGammaSpace());

			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = PF_B8G8R8A8;
			OutCompressedImage.RawData = Image.RawData;

			return true;
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameRGBA8)
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.GetGammaSpace());

			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = PF_B8G8R8A8;

			// swizzle each texel
			uint32 NumTexels = Image.SizeX * Image.SizeY * Image.NumSlices;
			OutCompressedImage.RawData.Empty(NumTexels * 4);
			OutCompressedImage.RawData.AddUninitialized(NumTexels * 4);
			const FColor* FirstColor = Image.AsBGRA8();
			const FColor* LastColor = FirstColor + NumTexels;
			int8* Dest = (int8*)OutCompressedImage.RawData.GetData();

			for (const FColor* Color = FirstColor; Color < LastColor; ++Color)
			{
				*Dest++ = (int32)Color->R;
				*Dest++ = (int32)Color->G;
				*Dest++ = (int32)Color->B;
				*Dest++ = (int32)Color->A;
			}

			return true;
		}
		else if (BuildSettings.TextureFormatName == GTextureFormatNameXGXR8)
		{
			FImage Image;
			InImage.CopyTo(Image, ERawImageFormat::BGRA8, BuildSettings.GetGammaSpace());

			OutCompressedImage.SizeX = Image.SizeX;
			OutCompressedImage.SizeY = Image.SizeY;
			OutCompressedImage.PixelFormat = PF_B8G8R8A8;

			// swizzle each texel
			uint32 NumTexels = Image.SizeX * Image.SizeY * Image.NumSlices;
			OutCompressedImage.RawData.Empty(NumTexels * 4);
			OutCompressedImage.RawData.AddUninitialized(NumTexels * 4);
			const FColor* FirstColor = Image.AsBGRA8();
			const FColor* LastColor = FirstColor + NumTexels;
			int8* Dest = (int8*)OutCompressedImage.RawData.GetData();

			for (const FColor* Color = FirstColor; Color < LastColor; ++Color)
//.........这里部分代码省略.........
开发者ID:PickUpSU,项目名称:UnrealEngine4,代码行数:101,代码来源:TextureFormatUncompressed.cpp


注:本文中的FImage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。