本文整理汇总了C++中tr1::shared_ptr::ResizeAudio方法的典型用法代码示例。如果您正苦于以下问题:C++ shared_ptr::ResizeAudio方法的具体用法?C++ shared_ptr::ResizeAudio怎么用?C++ shared_ptr::ResizeAudio使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tr1::shared_ptr
的用法示例。
在下文中一共展示了shared_ptr::ResizeAudio方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ResampleMappedAudio
//.........这里部分代码省略.........
// Force the audio resampling to happen in order (1st thread to last thread), so the waveform
// is smooth and continuous.
#pragma omp ordered
{
// setup resample context
if (!avr) {
avr = avresample_alloc_context();
av_opt_set_int(avr, "in_channel_layout", channel_layout_in_frame, 0);
av_opt_set_int(avr, "out_channel_layout", info.channel_layout, 0);
av_opt_set_int(avr, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int(avr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int(avr, "in_sample_rate", sample_rate_in_frame, 0);
av_opt_set_int(avr, "out_sample_rate", info.sample_rate, 0);
av_opt_set_int(avr, "in_channels", channels_in_frame, 0);
av_opt_set_int(avr, "out_channels", info.channels, 0);
avresample_open(avr);
}
// Convert audio samples
nb_samples = avresample_convert(avr, // audio resample context
audio_converted->data, // output data pointers
audio_converted->linesize[0], // output plane size, in bytes. (0 if unknown)
audio_converted->nb_samples, // maximum number of samples that the output buffer can hold
audio_frame->data, // input data pointers
audio_frame->linesize[0], // input plane size, in bytes (0 if unknown)
audio_frame->nb_samples); // number of input samples to convert
}
// Create a new array (to hold all resampled S16 audio samples)
int16_t* resampled_samples = new int16_t[(nb_samples * info.channels)];
// Copy audio samples over original samples
memcpy(resampled_samples, audio_converted->data[0], (nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) * info.channels));
// Free frames
free(audio_frame->data[0]); // TODO: Determine why av_free crashes on Windows
AV_FREE_FRAME(&audio_frame);
av_free(audio_converted->data[0]);
AV_FREE_FRAME(&audio_converted);
frame_samples = NULL;
// Resize the frame to hold the right # of channels and samples
int channel_buffer_size = nb_samples;
frame->ResizeAudio(info.channels, channel_buffer_size, info.sample_rate, info.channel_layout);
AppendDebugMethod("FrameMapper::ResampleMappedAudio (Audio successfully resampled)", "nb_samples", nb_samples, "total_frame_samples", total_frame_samples, "info.sample_rate", info.sample_rate, "channels_in_frame", channels_in_frame, "info.channels", info.channels, "info.channel_layout", info.channel_layout);
// Array of floats (to hold samples for each channel)
float *channel_buffer = new float[channel_buffer_size];
// Divide audio into channels. Loop through each channel
for (int channel_filter = 0; channel_filter < info.channels; channel_filter++)
{
// Init array
for (int z = 0; z < channel_buffer_size; z++)
channel_buffer[z] = 0.0f;
// Loop through all samples and add them to our Frame based on channel.
// Toggle through each channel number, since channel data is stored like (left right left right)
int channel = 0;
int position = 0;
for (int sample = 0; sample < (nb_samples * info.channels); sample++)
{
// Only add samples for current channel
if (channel_filter == channel)
{
// Add sample (convert from (-32768 to 32768) to (-1.0 to 1.0))
channel_buffer[position] = resampled_samples[sample] * (1.0f / (1 << 15));
// Increment audio position
position++;
}
// increment channel (if needed)
if ((channel + 1) < info.channels)
// move to next channel
channel ++;
else
// reset channel
channel = 0;
}
// Add samples to frame for this channel
frame->AddAudio(true, channel_filter, 0, channel_buffer, position, 1.0f);
AppendDebugMethod("FrameMapper::ResampleMappedAudio (Add audio to channel)", "number of samples", position, "channel_filter", channel_filter, "", -1, "", -1, "", -1, "", -1);
}
// Update frame's audio meta data
frame->SampleRate(info.sample_rate);
frame->ChannelsLayout(info.channel_layout);
// clear channel buffer
delete[] channel_buffer;
channel_buffer = NULL;
// Delete arrays
delete[] resampled_samples;
resampled_samples = NULL;
}
示例2: add_layer
// Process a new layer of video or audio
void Timeline::add_layer(tr1::shared_ptr<Frame> new_frame, Clip* source_clip, long int clip_frame_number, long int timeline_frame_number, bool is_top_clip)
{
// Get the clip's frame & image
tr1::shared_ptr<Frame> source_frame = GetOrCreateFrame(source_clip, clip_frame_number);
// No frame found... so bail
if (!source_frame)
return;
// Debug output
AppendDebugMethod("Timeline::add_layer", "new_frame->number", new_frame->number, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1, "", -1, "", -1);
/* REPLACE IMAGE WITH WAVEFORM IMAGE (IF NEEDED) */
if (source_clip->Waveform())
{
// Debug output
AppendDebugMethod("Timeline::add_layer (Generate Waveform Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
// Get the color of the waveform
int red = source_clip->wave_color.red.GetInt(clip_frame_number);
int green = source_clip->wave_color.green.GetInt(clip_frame_number);
int blue = source_clip->wave_color.blue.GetInt(clip_frame_number);
int alpha = source_clip->wave_color.alpha.GetInt(clip_frame_number);
// Generate Waveform Dynamically (the size of the timeline)
tr1::shared_ptr<QImage> source_image = source_frame->GetWaveform(info.width, info.height, red, green, blue, alpha);
source_frame->AddImage(tr1::shared_ptr<QImage>(source_image));
}
/* Apply effects to the source frame (if any). If multiple clips are overlapping, only process the
* effects on the top clip. */
if (is_top_clip)
source_frame = apply_effects(source_frame, timeline_frame_number, source_clip->Layer());
// Declare an image to hold the source frame's image
tr1::shared_ptr<QImage> source_image;
/* COPY AUDIO - with correct volume */
if (source_clip->Reader()->info.has_audio) {
// Debug output
AppendDebugMethod("Timeline::add_layer (Copy Audio)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
if (source_frame->GetAudioChannelsCount() == info.channels)
for (int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
{
float initial_volume = 1.0f;
float previous_volume = source_clip->volume.GetValue(clip_frame_number - 1); // previous frame's percentage of volume (0 to 1)
float volume = source_clip->volume.GetValue(clip_frame_number); // percentage of volume (0 to 1)
// If no ramp needed, set initial volume = clip's volume
if (isEqual(previous_volume, volume))
initial_volume = volume;
// Apply ramp to source frame (if needed)
if (!isEqual(previous_volume, volume))
source_frame->ApplyGainRamp(channel, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
// TODO: Improve FrameMapper (or Timeline) to always get the correct number of samples per frame.
// Currently, the ResampleContext sometimes leaves behind a few samples for the next call, and the
// number of samples returned is variable... and does not match the number expected.
// This is a crude solution at best. =)
if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount())
// Force timeline frame to match the source frame
new_frame->ResizeAudio(info.channels, source_frame->GetAudioSamplesCount(), info.sample_rate, info.channel_layout);
// Copy audio samples (and set initial volume). Mix samples with existing audio samples. The gains are added together, to
// be sure to set the gain's correctly, so the sum does not exceed 1.0 (of audio distortion will happen).
new_frame->AddAudio(false, channel, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), initial_volume);
}
else
// Debug output
AppendDebugMethod("Timeline::add_layer (No Audio Copied - Wrong # of Channels)", "source_clip->Reader()->info.has_audio", source_clip->Reader()->info.has_audio, "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(), "info.channels", info.channels, "clip_frame_number", clip_frame_number, "timeline_frame_number", timeline_frame_number, "", -1);
}
// Skip out if only an audio frame
if (!source_clip->Waveform() && !source_clip->Reader()->info.has_video)
// Skip the rest of the image processing for performance reasons
return;
// Debug output
AppendDebugMethod("Timeline::add_layer (Get Source Image)", "source_frame->number", source_frame->number, "source_clip->Waveform()", source_clip->Waveform(), "clip_frame_number", clip_frame_number, "", -1, "", -1, "", -1);
// Get actual frame image data
source_image = source_frame->GetImage();
// Get some basic image properties
int source_width = source_image->width();
int source_height = source_image->height();
/* ALPHA & OPACITY */
if (source_clip->alpha.GetValue(clip_frame_number) != 1.0)
{
float alpha = source_clip->alpha.GetValue(clip_frame_number);
// Get source image's pixels
unsigned char *pixels = (unsigned char *) source_image->bits();
//.........这里部分代码省略.........