当前位置: 首页>>代码示例>>C++>>正文


C++ shared_ptr::GetImage方法代码示例

本文整理汇总了C++中tr1::shared_ptr::GetImage方法的典型用法代码示例。如果您正苦于以下问题:C++ shared_ptr::GetImage方法的具体用法?C++ shared_ptr::GetImage怎么用?C++ shared_ptr::GetImage使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tr1::shared_ptr的用法示例。


在下文中一共展示了shared_ptr::GetImage方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: deinterlaced_image

// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
tr1::shared_ptr<Frame> Deinterlace::GetFrame(tr1::shared_ptr<Frame> frame, int frame_number)
{
	// Get original size of frame's image
	int original_width = frame->GetImage()->width();
	int original_height = frame->GetImage()->height();

	// Get the frame's image
	tr1::shared_ptr<QImage> image = frame->GetImage();
	const unsigned char* pixels = image->bits();

	// Create a smaller, new image
	QImage deinterlaced_image(image->width(), image->height() / 2, QImage::Format_RGBA8888);
	const unsigned char* deinterlaced_pixels = deinterlaced_image.bits();

	// Loop through the scanlines of the image (even or odd)
	int start = 0;
	if (isOdd)
		start = 1;
	for (int row = start; row < image->height(); row += 2) {
		memcpy((unsigned char*)deinterlaced_pixels, pixels + (row * image->bytesPerLine()), image->bytesPerLine());
		deinterlaced_pixels += image->bytesPerLine();
	}

	// Resize deinterlaced image back to original size, and update frame's image
	image = tr1::shared_ptr<QImage>(new QImage(deinterlaced_image.scaled(original_width, original_height, Qt::IgnoreAspectRatio, Qt::FastTransformation)));

	// Update image on frame
	frame->AddImage(image);

	// return the modified frame
	return frame;
}
开发者ID:bryanagee,项目名称:libopenshot,代码行数:34,代码来源:Deinterlace.cpp

示例2:

// This method is required for all derived classes of EffectBase, and returns a
// modified openshot::Frame object
tr1::shared_ptr<Frame> ChromaKey::GetFrame(tr1::shared_ptr<Frame> frame, int frame_number)
{
	// Determine the current HSL (Hue, Saturation, Lightness) for the Chrome
	int threshold = fuzz.GetInt(frame_number);
	long mask_R = color.red.GetInt(frame_number);
	long mask_G = color.green.GetInt(frame_number);
	long mask_B = color.blue.GetInt(frame_number);

	// Get source image's pixels
	tr1::shared_ptr<QImage> image = frame->GetImage();
	unsigned char *pixels = (unsigned char *) image->bits();

	// Loop through pixels
	for (int pixel = 0, byte_index=0; pixel < image->width() * image->height(); pixel++, byte_index+=4)
	{
		// Get the RGB values from the pixel
		unsigned char R = pixels[byte_index];
		unsigned char G = pixels[byte_index + 1];
		unsigned char B = pixels[byte_index + 2];
		unsigned char A = pixels[byte_index + 3];

		// Get distance between mask color and pixel color
		long distance = Color::GetDistance((long)R, (long)G, (long)B, mask_R, mask_G, mask_B);

		// Alpha out the pixel (if color similar)
		if (distance <= threshold)
			// MATCHED - Make pixel transparent
			pixels[byte_index + 3] = 0;
	}

	// return the modified frame
	return frame;
}
开发者ID:bryanagee,项目名称:libopenshot,代码行数:35,代码来源:ChromaKey.cpp

示例3: add_layer

// Process a new layer of video or audio
void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
{
	// Get the clip's frame & image
	tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);

	// No frame found... so bail
	if (!source_frame)
		return;

	// Debug output
	AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);

	/* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
	if (source_clip->Waveform())
	{
		// Debug output
		AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);

		// Get the color of the waveform
		int red = source_clip->wave_color.red.GetInt(clip_frame_number);
		int green = source_clip->wave_color.green.GetInt(clip_frame_number);
		int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
		int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);

		// Generate Waveform Dynamically (the size of the timeline)
		tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
		source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
	}

	/* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
	 * effects on the top clip. */
	if (is_top_clip)
		source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());

	// Declare an image to hold the source frame's image
	tr1::shared_ptr<QImage> source_image;

	/* COPY AUDIO - with correct volume */
	if (source_clip->Reader()->info.has_audio) {

		// Debug output
		AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);

		if (source_frame->GetAudioChannelsCount() == info.channels)
			for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
			{
				float initial_volume = 1.0f;
				float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
				float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)

				// If no ramp needed, set initial volume = clip's volume
				if (isEqual(previous_volume, volume))
					initial_volume = volume;

				// Apply ramp to source frame (if needed)
				if (!isEqual(previous_volume, volume))
					source_frame->ApplyGainRamp(channel, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);

				// TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
				// Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
				// number of samples returned is variable... and does not match the number expected.
				// This is a crude solution at best. =)
				if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
					// Force timeline frame to match the source frame
					new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);

				// Copy audio samples (and set initial volume).  Mix samples with existing audio samples.  The gains are added together, to
				// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
				new_frame->AddAudio(false, channel, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);

			}
		else
			// Debug output
			AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);

	}

	// Skip out if only an audio frame
	if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
		// Skip the rest of the image processing for performance reasons
		return;

	// Debug output
	AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);

	// Get actual frame image data
	source_image = source_frame->GetImage();

	// Get some basic image properties
	int source_width = source_image->width();
	int source_height = source_image->height();

	/* ALPHA & OPACITY */
	if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
	{
		float alpha = source_clip->alpha.GetValue(clip_frame_number);

		// Get source image's pixels
		unsigned char *pixels = (unsigned char *) source_image->bits();
//.........这里部分代码省略.........
开发者ID:nwgat,项目名称:libopenshot,代码行数:101,代码来源:Timeline.cpp


注:本文中的tr1::shared_ptr::GetImage方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。