本文整理汇总了C++中Mixer::MixGetCurrentTime方法的典型用法代码示例。如果您正苦于以下问题:C++ Mixer::MixGetCurrentTime方法的具体用法?C++ Mixer::MixGetCurrentTime怎么用?C++ Mixer::MixGetCurrentTime使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Mixer
的用法示例。
在下文中一共展示了Mixer::MixGetCurrentTime方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: MixAndRender
bool MixAndRender(TrackList *tracks, TrackFactory *trackFactory,
double rate, sampleFormat format,
double startTime, double endTime,
WaveTrack **newLeft, WaveTrack **newRight)
{
// This function was formerly known as "Quick Mix". It takes one or
// more tracks as input; of all tracks that are selected, it mixes
// them together, applying any envelopes, amplitude gain, panning,
// and real-time effects in the process. The resulting pair of
// tracks (stereo) are "rendered" and have no effects, gain, panning,
// or envelopes.
WaveTrack **waveArray;
Track *t;
int numWaves = 0;
int numMono = 0;
bool mono = false;
int w;
TrackListIterator iter(tracks);
t = iter.First();
while (t) {
if (t->GetSelected() && t->GetKind() == Track::Wave) {
numWaves++;
float pan = ((WaveTrack*)t)->GetPan();
if (t->GetChannel() == Track::MonoChannel && pan == 0)
numMono++;
}
t = iter.Next();
}
if (numMono == numWaves)
mono = true;
double totalTime = 0.0;
waveArray = new WaveTrack *[numWaves];
w = 0;
t = iter.First();
while (t) {
if (t->GetSelected() && t->GetKind() == Track::Wave) {
waveArray[w++] = (WaveTrack *) t;
if (t->GetEndTime() > totalTime)
totalTime = t->GetEndTime();
}
t = iter.Next();
}
WaveTrack *mixLeft = trackFactory->NewWaveTrack(format, rate);
mixLeft->SetName(_("Mix"));
WaveTrack *mixRight = 0;
if (mono) {
mixLeft->SetChannel(Track::MonoChannel);
}
else {
mixRight = trackFactory->NewWaveTrack(format, rate);
mixRight->SetName(_("Mix"));
mixLeft->SetChannel(Track::LeftChannel);
mixRight->SetChannel(Track::RightChannel);
mixLeft->SetLinked(true);
mixRight->SetTeamed(true);
}
int maxBlockLen = mixLeft->GetIdealBlockSize();
if (startTime == endTime) {
startTime = 0.0;
endTime = totalTime;
}
Mixer *mixer = new Mixer(numWaves, waveArray, tracks->GetTimeTrack(),
startTime, endTime, mono ? 1 : 2, maxBlockLen, false,
rate, format);
wxYield();
GetActiveProject()->ProgressShow(_NoAcc("&Mix and Render"),
_("Mixing and rendering tracks"));
wxBusyCursor busy;
bool cancelling = false;
while(!cancelling) {
sampleCount blockLen = mixer->Process(maxBlockLen);
if (blockLen == 0)
break;
if (mono) {
samplePtr buffer = mixer->GetBuffer();
mixLeft->Append(buffer, format, blockLen);
}
else {
samplePtr buffer;
buffer = mixer->GetBuffer(0);
mixLeft->Append(buffer, format, blockLen);
buffer = mixer->GetBuffer(1);
mixRight->Append(buffer, format, blockLen);
}
int progressvalue = int (1000 * (mixer->MixGetCurrentTime() / totalTime));
//.........这里部分代码省略.........
示例2: Export
//.........这里部分代码省略.........
if (levelPref < 0 || levelPref > 8) {
levelPref = 5;
}
encoder.set_do_exhaustive_model_search(flacLevels[levelPref].do_exhaustive_model_search);
encoder.set_do_escape_coding(flacLevels[levelPref].do_escape_coding);
if (numChannels != 2) {
encoder.set_do_mid_side_stereo(false);
encoder.set_loose_mid_side_stereo(false);
}
else {
encoder.set_do_mid_side_stereo(flacLevels[levelPref].do_mid_side_stereo);
encoder.set_loose_mid_side_stereo(flacLevels[levelPref].loose_mid_side_stereo);
}
encoder.set_qlp_coeff_precision(flacLevels[levelPref].qlp_coeff_precision);
encoder.set_min_residual_partition_order(flacLevels[levelPref].min_residual_partition_order);
encoder.set_max_residual_partition_order(flacLevels[levelPref].max_residual_partition_order);
encoder.set_rice_parameter_search_dist(flacLevels[levelPref].rice_parameter_search_dist);
encoder.set_max_lpc_order(flacLevels[levelPref].max_lpc_order);
#ifdef LEGACY_FLAC
encoder.init();
#else
wxFFile f; // will be closed when it goes out of scope
if (!f.Open(fName, wxT("w+b"))) {
wxMessageBox(wxString::Format(_("FLAC export couldn't open %s"), fName.c_str()));
return false;
}
// Even though there is an init() method that takes a filename, use the one that
// takes a file handle because wxWidgets can open a file with a Unicode name and
// libflac can't (under Windows).
int status = encoder.init(f.fp());
if (status != FLAC__STREAM_ENCODER_INIT_STATUS_OK) {
wxMessageBox(wxString::Format(_("FLAC encoder failed to initialize\nStatus: %d"), status));
return false;
}
#endif
if (mMetadata) {
::FLAC__metadata_object_delete(mMetadata);
}
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = CreateMixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
numChannels, SAMPLES_PER_RUN, false,
rate, format, true, mixerSpec);
delete [] waveTracks;
int i, j;
FLAC__int32 **tmpsmplbuf = new FLAC__int32*[numChannels];
for (i = 0; i < numChannels; i++) {
tmpsmplbuf[i] = (FLAC__int32 *) calloc(SAMPLES_PER_RUN, sizeof(FLAC__int32));
}
{
ProgressDialog progress(wxFileName(fName).GetName(),
selectionOnly ?
_("Exporting the selected audio as FLAC") :
_("Exporting the entire project as FLAC"));
while (updateResult == eProgressSuccess) {
sampleCount samplesThisRun = mixer->Process(SAMPLES_PER_RUN);
if (samplesThisRun == 0) { //stop encoding
break;
}
else {
for (i = 0; i < numChannels; i++) {
samplePtr mixed = mixer->GetBuffer(i);
if (format == int24Sample) {
for (j = 0; j < samplesThisRun; j++) {
tmpsmplbuf[i][j] = ((int *)mixed)[j];
}
}
else {
for (j = 0; j < samplesThisRun; j++) {
tmpsmplbuf[i][j] = ((short *)mixed)[j];
}
}
}
encoder.process(tmpsmplbuf, samplesThisRun);
}
updateResult = progress.Update(mixer->MixGetCurrentTime() - t0, t1 - t0);
}
f.Detach(); // libflac closes the file
encoder.finish();
}
for (i = 0; i < numChannels; i++) {
free(tmpsmplbuf[i]);
}
delete mixer;
delete[] tmpsmplbuf;
return updateResult;
}
示例3: Export
//.........这里部分代码省略.........
format = int16Sample;
int maxBlockLen = 44100 * 5;
wxProgressDialog *progress = NULL;
wxYield();
wxStartTimer();
wxBusyCursor busy;
bool cancelling = false;
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(mExportSelection, &numWaveTracks, &waveTracks);
Mixer *mixer = new Mixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
m_t0, m_t1,
info.channels, maxBlockLen, true,
mInRate, format);
while(!cancelling) {
sampleCount numSamples = mixer->Process(maxBlockLen);
if (numSamples == 0)
break;
samplePtr mixed = mixer->GetBuffer();
if (format == int16Sample)
sf_writef_short(sf, (short *)mixed, numSamples);
else
sf_writef_float(sf, (float *)mixed, numSamples);
if (!progress && wxGetElapsedTime(false) > 500) {
wxString message;
if (mExportSelection)
message =
wxString::
Format(_("Exporting the selected audio as a %s file"),
(const char *) formatStr);
else
message =
wxString::
Format(_("Exporting the entire project as a %s file"),
(const char *) formatStr);
progress =
new wxProgressDialog(_("Export"),
message,
1000,
parent,
wxPD_CAN_ABORT |
wxPD_REMAINING_TIME | wxPD_AUTO_HIDE);
}
if (progress) {
int progressvalue = int (1000 * ((mixer->MixGetCurrentTime()-m_t0) /
(m_t1-m_t0)));
cancelling = !progress->Update(progressvalue);
}
}
delete mixer;
delete[] waveTracks;
err = sf_close(sf);
if (err) {
char buffer[1000];
sf_error_str(sf, buffer, 1000);
wxMessageBox(wxString::Format
(_("Error (file may not have been written): %s"),
buffer));
}
#ifdef __WXMAC__
FSSpec spec;
wxMacFilename2FSSpec(fName, &spec);
FInfo finfo;
if (FSpGetFInfo(&spec, &finfo) == noErr) {
finfo.fdType = sf_header_mactype(mSoundFileFormat & SF_FORMAT_TYPEMASK);
finfo.fdCreator = AUDACITY_CREATOR;
FSpSetFInfo(&spec, &finfo);
}
#endif
if (progress)
delete progress;
return true;
}
示例4: EncodeAudioFrame
//.........这里部分代码省略.........
if (mEncAudioCodecCtx->frame_size == 1) { wxASSERT(pkt.size == mEncAudioEncodedBufSiz); }
if (pkt.size < 0)
{
wxLogMessage(wxT("FFmpeg : ERROR - Can't encode audio frame."));
return false;
}
// Rescale from the codec time_base to the AVStream time_base.
if (mEncAudioCodecCtx->coded_frame && mEncAudioCodecCtx->coded_frame->pts != int64_t(AV_NOPTS_VALUE))
pkt.pts = FFmpegLibsInst->av_rescale_q(mEncAudioCodecCtx->coded_frame->pts, mEncAudioCodecCtx->time_base, mEncAudioStream->time_base);
//wxLogMessage(wxT("FFmpeg : (%d) Writing audio frame with PTS: %lld."), mEncAudioCodecCtx->frame_number, pkt.pts);
pkt.stream_index = mEncAudioStream->index;
pkt.data = mEncAudioEncodedBuf;
pkt.flags |= PKT_FLAG_KEY;
// Write the encoded audio frame to the output file.
if ((ret = FFmpegLibsInst->av_interleaved_write_frame(mEncFormatCtx, &pkt)) != 0)
{
wxLogMessage(wxT("FFmpeg : ERROR - Failed to write audio frame to file."));
return false;
}
}
return true;
}
int ExportFFmpeg::Export(AudacityProject *project,
int channels, wxString fName,
bool selectionOnly, double t0, double t1, MixerSpec *mixerSpec, Tags *metadata, int subformat)
{
if (!CheckFFmpegPresence())
return false;
mChannels = channels;
// subformat index may not correspond directly to fmts[] index, convert it
mSubFormat = AdjustFormatIndex(subformat);
if (channels > ExportFFmpegOptions::fmts[mSubFormat].maxchannels)
{
wxLogMessage(wxT("Attempted to export %d channels, but max. channels = %d"),channels,ExportFFmpegOptions::fmts[mSubFormat].maxchannels);
wxMessageBox(wxString::Format(_("Attempted to export %d channels, but max. channels for selected output format is %d"),channels,ExportFFmpegOptions::fmts[mSubFormat].maxchannels),_("Error"));
return false;
}
mName = fName;
TrackList *tracks = project->GetTracks();
bool ret = true;
if (mSubFormat >= FMT_LAST) return false;
wxString shortname(ExportFFmpegOptions::fmts[mSubFormat].shortname);
if (mSubFormat == FMT_OTHER)
shortname = gPrefs->Read(wxT("/FileFormats/FFmpegFormat"),wxT("matroska"));
ret = Init(shortname.mb_str(),project, metadata);
if (!ret) return false;
int pcmBufferSize = 1024;
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = new Mixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
channels, pcmBufferSize, true,
mSampleRate, int16Sample, true, mixerSpec);
delete [] waveTracks;
ProgressDialog *progress = new ProgressDialog(wxFileName(fName).GetName(),
selectionOnly ?
wxString::Format(_("Exporting selected audio as %s"), ExportFFmpegOptions::fmts[mSubFormat].description) :
wxString::Format(_("Exporting entire file as %s"), ExportFFmpegOptions::fmts[mSubFormat].description));
int updateResult = eProgressSuccess;
while(updateResult == eProgressSuccess) {
sampleCount pcmNumSamples = mixer->Process(pcmBufferSize);
if (pcmNumSamples == 0)
break;
short *pcmBuffer = (short *)mixer->GetBuffer();
EncodeAudioFrame(pcmBuffer,(pcmNumSamples)*sizeof(int16_t)*mChannels);
updateResult = progress->Update(mixer->MixGetCurrentTime()-t0, t1-t0);
}
delete progress;
delete mixer;
Finalize();
return updateResult;
}
void AddStringTagUTF8(char field[], int size, wxString value)
{
memset(field,0,size);
memcpy(field,value.ToUTF8(),(int)strlen(value.ToUTF8()) > size -1 ? size -1 : strlen(value.ToUTF8()));
}
示例5: Export
int ExportFFmpeg::Export(AudacityProject *project,
int channels, const wxString &fName,
bool selectionOnly, double t0, double t1, MixerSpec *mixerSpec, const Tags *metadata, int subformat)
{
if (!CheckFFmpegPresence())
return false;
mChannels = channels;
// subformat index may not correspond directly to fmts[] index, convert it
mSubFormat = AdjustFormatIndex(subformat);
if (channels > ExportFFmpegOptions::fmts[mSubFormat].maxchannels)
{
wxMessageBox(
wxString::Format(
_("Attempted to export %d channels, but maximum number of channels for selected output format is %d"),
channels,
ExportFFmpegOptions::fmts[mSubFormat].maxchannels),
_("Error"));
return false;
}
mName = fName;
TrackList *tracks = project->GetTracks();
bool ret = true;
if (mSubFormat >= FMT_LAST) return false;
wxString shortname(ExportFFmpegOptions::fmts[mSubFormat].shortname);
if (mSubFormat == FMT_OTHER)
shortname = gPrefs->Read(wxT("/FileFormats/FFmpegFormat"),wxT("matroska"));
ret = Init(shortname.mb_str(),project, metadata, subformat);
if (!ret) return false;
int pcmBufferSize = 1024;
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = CreateMixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
channels, pcmBufferSize, true,
mSampleRate, int16Sample, true, mixerSpec);
delete[] waveTracks;
int updateResult = eProgressSuccess;
{
ProgressDialog progress(wxFileName(fName).GetName(),
selectionOnly ?
wxString::Format(_("Exporting selected audio as %s"), ExportFFmpegOptions::fmts[mSubFormat].description) :
wxString::Format(_("Exporting entire file as %s"), ExportFFmpegOptions::fmts[mSubFormat].description));
while (updateResult == eProgressSuccess) {
sampleCount pcmNumSamples = mixer->Process(pcmBufferSize);
if (pcmNumSamples == 0)
break;
short *pcmBuffer = (short *)mixer->GetBuffer();
EncodeAudioFrame(pcmBuffer, (pcmNumSamples)*sizeof(int16_t)*mChannels);
updateResult = progress.Update(mixer->MixGetCurrentTime() - t0, t1 - t0);
}
}
delete mixer;
Finalize();
return updateResult;
}
示例6: MixAndRender
//.........这里部分代码省略.........
mixLeft->SetName(usefulIter.First()->GetName()); /* set name of output track to be the same as the sole input track */
else
mixLeft->SetName(_("Mix"));
mixLeft->SetOffset(mixStartTime);
WaveTrack *mixRight = 0;
if (mono) {
mixLeft->SetChannel(Track::MonoChannel);
}
else {
mixRight = trackFactory->NewWaveTrack(format, rate);
if (oneinput) {
if (usefulIter.First()->GetLink() != NULL) // we have linked track
mixLeft->SetName(usefulIter.First()->GetLink()->GetName()); /* set name to match input track's right channel!*/
else
mixLeft->SetName(usefulIter.First()->GetName()); /* set name to that of sole input channel */
}
else
mixRight->SetName(_("Mix"));
mixLeft->SetChannel(Track::LeftChannel);
mixRight->SetChannel(Track::RightChannel);
mixRight->SetOffset(mixStartTime);
mixLeft->SetLinked(true);
}
int maxBlockLen = mixLeft->GetIdealBlockSize();
// If the caller didn't specify a time range, use the whole range in which
// any input track had clips in it.
if (startTime == endTime) {
startTime = mixStartTime;
endTime = mixEndTime;
}
Mixer *mixer = new Mixer(numWaves, waveArray,
Mixer::WarpOptions(tracks->GetTimeTrack()),
startTime, endTime, mono ? 1 : 2, maxBlockLen, false,
rate, format);
::wxSafeYield();
ProgressDialog *progress = new ProgressDialog(_("Mix and Render"),
_("Mixing and rendering tracks"));
int updateResult = eProgressSuccess;
while(updateResult == eProgressSuccess) {
sampleCount blockLen = mixer->Process(maxBlockLen);
if (blockLen == 0)
break;
if (mono) {
samplePtr buffer = mixer->GetBuffer();
mixLeft->Append(buffer, format, blockLen);
}
else {
samplePtr buffer;
buffer = mixer->GetBuffer(0);
mixLeft->Append(buffer, format, blockLen);
buffer = mixer->GetBuffer(1);
mixRight->Append(buffer, format, blockLen);
}
updateResult = progress->Update(mixer->MixGetCurrentTime() - startTime, endTime - startTime);
}
delete progress;
mixLeft->Flush();
if (!mono)
mixRight->Flush();
if (updateResult == eProgressCancelled || updateResult == eProgressFailed)
{
delete mixLeft;
if (!mono)
delete mixRight;
} else {
*newLeft = mixLeft;
if (!mono)
*newRight = mixRight;
#if 0
int elapsedMS = wxGetElapsedTime();
double elapsedTime = elapsedMS * 0.001;
double maxTracks = totalTime / (elapsedTime / numWaves);
// Note: these shouldn't be translated - they're for debugging
// and profiling only.
printf(" Tracks: %d\n", numWaves);
printf(" Mix length: %f sec\n", totalTime);
printf("Elapsed time: %f sec\n", elapsedTime);
printf("Max number of tracks to mix in real time: %f\n", maxTracks);
#endif
}
delete[] waveArray;
delete mixer;
return (updateResult == eProgressSuccess || updateResult == eProgressStopped);
}
示例7: ExportOGG
//.........这里部分代码省略.........
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = new Mixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
stereo? 2: 1, SAMPLES_PER_RUN, false,
rate, floatSample);
while(!cancelling && !eos) {
float **vorbis_buffer = vorbis_analysis_buffer(&dsp, SAMPLES_PER_RUN);
sampleCount samplesThisRun = mixer->Process(SAMPLES_PER_RUN);
if (samplesThisRun == 0) {
// Tell the library that we wrote 0 bytes - signalling the end.
vorbis_analysis_wrote(&dsp, 0);
}
else {
float *left = (float *)mixer->GetBuffer(0);
memcpy(vorbis_buffer[0], left, sizeof(float)*SAMPLES_PER_RUN);
if(stereo) {
float *right = (float *)mixer->GetBuffer(1);
memcpy(vorbis_buffer[1], right, sizeof(float)*SAMPLES_PER_RUN);
}
// tell the encoder how many samples we have
vorbis_analysis_wrote(&dsp, samplesThisRun);
}
// I don't understand what this call does, so here is the comment
// from the example, verbatim:
//
// vorbis does some data preanalysis, then divvies up blocks
// for more involved (potentially parallel) processing. Get
// a single block for encoding now
while(vorbis_analysis_blockout(&dsp, &block) == 1) {
// analysis, assume we want to use bitrate management
vorbis_analysis(&block, NULL);
vorbis_bitrate_addblock(&block);
while(vorbis_bitrate_flushpacket(&dsp, &packet)) {
// add the packet to the bitstream
ogg_stream_packetin(&stream, &packet);
// From vorbis-tools-1.0/oggenc/encode.c:
// If we've gone over a page boundary, we can do actual output,
// so do so (for however many pages are available).
while (!eos) {
int result = ogg_stream_pageout(&stream, &page);
if (!result)
break;
outFile.Write(page.header, page.header_len);
outFile.Write(page.body, page.body_len);
if (ogg_page_eos(&page))
eos = 1;
}
}
}
if(progress) {
int progressvalue = int (1000 * ((mixer->MixGetCurrentTime()-t0) /
(t1-t0)));
cancelling = !progress->Update(progressvalue);
}
else if(wxGetElapsedTime(false) > 500) {
wxString message = selectionOnly ?
_("Exporting the selected audio as Ogg Vorbis") :
_("Exporting the entire project as Ogg Vorbis");
progress = new wxProgressDialog(
_("Export"),
message,
1000,
parent,
wxPD_CAN_ABORT | wxPD_REMAINING_TIME | wxPD_AUTO_HIDE);
}
}
delete mixer;
ogg_stream_clear(&stream);
vorbis_block_clear(&block);
vorbis_dsp_clear(&dsp);
vorbis_info_clear(&info);
outFile.Close();
if(progress)
delete progress;
return true;
}
示例8: Export
//.........这里部分代码省略.........
ogg_stream_packetin(&stream, &bitstream_header);
ogg_stream_packetin(&stream, &comment_header);
ogg_stream_packetin(&stream, &codebook_header);
// Flushing these headers now guarentees that audio data will
// start on a NEW page, which apparently makes streaming easier
while (ogg_stream_flush(&stream, &page)) {
outFile.Write(page.header, page.header_len);
outFile.Write(page.body, page.body_len);
}
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = CreateMixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
numChannels, SAMPLES_PER_RUN, false,
rate, floatSample, true, mixerSpec);
delete [] waveTracks;
ProgressDialog *progress = new ProgressDialog(wxFileName(fName).GetName(),
selectionOnly ?
_("Exporting the selected audio as Ogg Vorbis") :
_("Exporting the entire project as Ogg Vorbis"));
while (updateResult == eProgressSuccess && !eos) {
float **vorbis_buffer = vorbis_analysis_buffer(&dsp, SAMPLES_PER_RUN);
sampleCount samplesThisRun = mixer->Process(SAMPLES_PER_RUN);
if (samplesThisRun == 0) {
// Tell the library that we wrote 0 bytes - signalling the end.
vorbis_analysis_wrote(&dsp, 0);
}
else {
for (int i = 0; i < numChannels; i++) {
float *temp = (float *)mixer->GetBuffer(i);
memcpy(vorbis_buffer[i], temp, sizeof(float)*SAMPLES_PER_RUN);
}
// tell the encoder how many samples we have
vorbis_analysis_wrote(&dsp, samplesThisRun);
}
// I don't understand what this call does, so here is the comment
// from the example, verbatim:
//
// vorbis does some data preanalysis, then divvies up blocks
// for more involved (potentially parallel) processing. Get
// a single block for encoding now
while (vorbis_analysis_blockout(&dsp, &block) == 1) {
// analysis, assume we want to use bitrate management
vorbis_analysis(&block, NULL);
vorbis_bitrate_addblock(&block);
while (vorbis_bitrate_flushpacket(&dsp, &packet)) {
// add the packet to the bitstream
ogg_stream_packetin(&stream, &packet);
// From vorbis-tools-1.0/oggenc/encode.c:
// If we've gone over a page boundary, we can do actual output,
// so do so (for however many pages are available).
while (!eos) {
int result = ogg_stream_pageout(&stream, &page);
if (!result) {
break;
}
outFile.Write(page.header, page.header_len);
outFile.Write(page.body, page.body_len);
if (ogg_page_eos(&page)) {
eos = 1;
}
}
}
}
updateResult = progress->Update(mixer->MixGetCurrentTime()-t0, t1-t0);
}
delete progress;;
delete mixer;
ogg_stream_clear(&stream);
vorbis_block_clear(&block);
vorbis_dsp_clear(&dsp);
vorbis_info_clear(&info);
vorbis_comment_clear(&comment);
outFile.Close();
return updateResult;
}
示例9: ExportCL
bool ExportCL(AudacityProject *project, bool stereo, wxString fName,
bool selectionOnly, double t0, double t1, MixerSpec *mixerSpec)
{
int rate = int(project->GetRate() + 0.5);
wxWindow *parent = project;
TrackList *tracks = project->GetTracks();
wxString command = gPrefs->Read(wxT("/FileFormats/ExternalProgramExportCommand"), wxT("lame - '%f'"));
command.Replace(wxT("%f"), fName);
/* establish parameters */
int channels = stereo ? 2 : 1;
unsigned long totalSamples = (unsigned long)((t1 - t0) * rate + 0.5);
unsigned long sampleBytes = totalSamples * channels * SAMPLE_SIZE(int16Sample);
/* fill up the wav header */
wav_header header;
header.riffID[0] = 'R';
header.riffID[1] = 'I';
header.riffID[2] = 'F';
header.riffID[3] = 'F';
header.riffType[0] = 'W';
header.riffType[1] = 'A';
header.riffType[2] = 'V';
header.riffType[3] = 'E';
header.lenAfterRiff = sampleBytes + 32;
header.fmtID[0] = 'f';
header.fmtID[1] = 'm';
header.fmtID[2] = 't';
header.fmtID[3] = ' ';
header.formatChunkLen = 16;
header.formatTag = 1;
header.channels = channels;
header.sampleRate = rate;
header.bitsPerSample = SAMPLE_SIZE(int16Sample) * 8;
header.blockAlign = header.bitsPerSample * header.channels;
header.avgBytesPerSec = header.sampleRate * header.blockAlign;
header.dataID[0] = 'd';
header.dataID[1] = 'a';
header.dataID[2] = 't';
header.dataID[3] = 'a';
header.dataLen = sampleBytes;
FILE *pipe = popen(OSFILENAME(command), "w");
/* write the header */
fwrite( &header, sizeof(wav_header), 1, pipe );
sampleCount maxBlockLen = 44100 * 5;
bool cancelling = false;
int numWaveTracks;
WaveTrack **waveTracks;
tracks->GetWaveTracks(selectionOnly, &numWaveTracks, &waveTracks);
Mixer *mixer = new Mixer(numWaveTracks, waveTracks,
tracks->GetTimeTrack(),
t0, t1,
channels, maxBlockLen, true,
rate, int16Sample, true, mixerSpec);
GetActiveProject()->ProgressShow(_("Export"),
selectionOnly ?
_("Exporting the selected audio using command-line encoder") :
_("Exporting the entire project using command-line encoder"));
while(!cancelling) {
sampleCount numSamples = mixer->Process(maxBlockLen);
if (numSamples == 0)
break;
samplePtr mixed = mixer->GetBuffer();
char *buffer = new char[numSamples * SAMPLE_SIZE(int16Sample) * channels];
wxASSERT(buffer);
// Byte-swapping is neccesary on big-endian machines, since
// WAV files are little-endian
#if wxBYTE_ORDER == wxBIG_ENDIAN
{
short *buffer = (short*)mixed;
for( int i = 0; i < numSamples; i++ )
buffer[i] = wxINT16_SWAP_ON_BE(buffer[i]);
}
#endif
fwrite( mixed, numSamples * channels * SAMPLE_SIZE(int16Sample), 1, pipe );
int progressvalue = int (1000 * ((mixer->MixGetCurrentTime()-t0) /
(t1-t0)));
cancelling = !GetActiveProject()->ProgressUpdate(progressvalue);
delete[]buffer;
}
GetActiveProject()->ProgressHide();
//.........这里部分代码省略.........