当前位置: 首页>>代码示例>>C++>>正文


C++ AudioTrack类代码示例

本文整理汇总了C++中AudioTrack的典型用法代码示例。如果您正苦于以下问题:C++ AudioTrack类的具体用法?C++ AudioTrack怎么用?C++ AudioTrack使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了AudioTrack类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: android_mute

/**
 * \brief mute output
 * \param audec pointer to audec
 * \param en  1 = mute, 0 = unmute
 * \return 0 on success otherwise negative error code
 */
extern "C" int android_mute(struct aml_audio_dec* audec, adec_bool_t en)
{
    adec_print("android out mute");

    audio_out_operations_t *out_ops = &audec->aout_ops;
    AudioTrack *track = (AudioTrack *)out_ops->private_data;

    Mutex::Autolock _l(mLock);

    if (!track) {
        adec_print("No track instance!\n");
        return -1;
    }

    track->mute(en);

    return 0;
}
开发者ID:VanirAOSP,项目名称:packages_amlogic,代码行数:24,代码来源:android-out.cpp

示例2: android_set_volume

/**
 * \brief set output volume
 * \param audec pointer to audec
 * \param vol volume value
 * \return 0 on success otherwise negative error code
 */
extern "C" int android_set_volume(struct aml_audio_dec* audec, float vol)
{
    adec_print("android set volume");

    audio_out_operations_t *out_ops = &audec->aout_ops;
    AudioTrack *track = (AudioTrack *)out_ops->private_data;

    Mutex::Autolock _l(mLock);

    if (!track) {
        adec_print("No track instance!\n");
        return -1;
    }

    track->setVolume(vol, vol);

    return 0;
}
开发者ID:VanirAOSP,项目名称:packages_amlogic,代码行数:24,代码来源:android-out.cpp

示例3: aluChannelsFromFormat

status_t APlaybackDevice::open()
{
    status_t err;
    int sampleRateInHz;
    int channelConfig;
    int audioFormat;
    int bufferSizeInBytes;

    LOG_FUNC_START

    sampleRateInHz = mDevice->Frequency;
    channelConfig = aluChannelsFromFormat(mDevice->Format) == 1 ?
        AUDIO_CHANNEL_OUT_MONO : AUDIO_CHANNEL_OUT_STEREO;

    audioFormat = aluBytesFromFormat(mDevice->Format) == 1 ?
        AUDIO_FORMAT_PCM_8_BIT : AUDIO_FORMAT_PCM_16_BIT;

    err = AudioTrack::getMinFrameCount(&bufferSizeInBytes, audioFormat, sampleRateInHz);
    RETURN_IF(err);

    LOGV("rate(%i), channel(%i), format(%i), buffSize(%i), numUpdates(%i)",
        sampleRateInHz, channelConfig, audioFormat, bufferSizeInBytes, mDevice->NumUpdates);

    err = mAudioTrack.set(AUDIO_STREAM_MUSIC,
                          sampleRateInHz,
                          audioFormat,
                          channelConfig,
                          bufferSizeInBytes,     // frameCount
                          0,                     // flags
                          0, 0);                 // callback, callback data (user)
    RETURN_IF(err);

    err = mAudioTrack.initCheck();
    RETURN_IF(err);

    if(mBuffer) {
        delete mBuffer;
    }
    mBuffer = new AAudioBuffer(bufferSizeInBytes);

    LOG_FUNC_END

    return NO_ERROR;
}
开发者ID:dexmas,项目名称:WaloEngine,代码行数:44,代码来源:android.cpp

示例4: getAudioTrackVolume

PyObject* getAudioTrackVolume(PyObject*, PyObject* args)
{
	const char* trackname;

	if (!PyArg_ParseTuple(args, "s", &trackname))
	{
		return NULL;
	}

	Track* t = song->findTrack(QString(trackname));
	if (t == NULL)
		return NULL;

	if (t->type() == Track::DRUM || t->type() == Track::MIDI)
		return NULL;

	AudioTrack* track = (AudioTrack*) t;
	return Py_BuildValue("d", track->volume());
}
开发者ID:OpenGanesh,项目名称:oom,代码行数:19,代码来源:pyapi.cpp

示例5: aluFrameSizeFromFormat

int APlaybackDevice::handlePlayback()
{
    int bufferSizeInSamples;

    bufferSizeInSamples = mBuffer->size() / aluFrameSizeFromFormat(mDevice->Format);

    mAudioTrack.start();

    while(mPlaybackEnabled) {
        aluMixData(mDevice, mBuffer->data(), bufferSizeInSamples);
        if(!write(mBuffer)) {
            LOGE("Can't write audio buffer into audio track");
            mPlaybackEnabled = false;
        }
    }

    mAudioTrack.stop();
    mAudioTrack.flush();
    return 0;
}
开发者ID:dexmas,项目名称:WaloEngine,代码行数:20,代码来源:android.cpp

示例6: append

void AudioTrackList::append(PassRefPtr<AudioTrack> prpTrack)
{
    RefPtr<AudioTrack> track = prpTrack;

    // Insert tracks in the media file order.
    size_t index = track->inbandTrackIndex();
    size_t insertionIndex;
    for (insertionIndex = 0; insertionIndex < m_inbandTracks.size(); ++insertionIndex) {
        AudioTrack* otherTrack = static_cast<AudioTrack*>(m_inbandTracks[insertionIndex].get());
        if (otherTrack->inbandTrackIndex() > index)
            break;
    }
    m_inbandTracks.insert(insertionIndex, track);


    ASSERT(!track->mediaElement() || track->mediaElement() == mediaElement());
    track->setMediaElement(mediaElement());

    scheduleAddTrackEvent(track.release());
}
开发者ID:AndriyKalashnykov,项目名称:webkit,代码行数:20,代码来源:AudioTrackList.cpp

示例7: android_stop

/**
 * \brief stop output
 * \param audec pointer to audec
 * \return 0 on success otherwise negative error code
 */
extern "C" int android_stop(struct aml_audio_dec* audec)
{
    adec_print("android out stop");

    audio_out_operations_t *out_ops = &audec->aout_ops;
    AudioTrack *track = (AudioTrack *)out_ops->private_data;

    Mutex::Autolock _l(mLock);

    if (!track) {
        adec_print("No track instance!\n");
        return -1;
    }

    track->stop();

    /* release AudioTrack */
    delete track;
    out_ops->private_data = NULL;

    return 0;
}
开发者ID:VanirAOSP,项目名称:packages_amlogic,代码行数:27,代码来源:android-out.cpp

示例8: while

bool APlaybackDevice::write(AAudioBuffer* buffer)
{
    ssize_t length, size;

    length = 0;
    while(length < buffer->size()) {
        size = mAudioTrack.write(buffer->data() + length, buffer->size() - length);
        if(size < 0) {
            return false;
        }
        length += size;
    }
    return true;
}
开发者ID:dexmas,项目名称:WaloEngine,代码行数:14,代码来源:android.cpp

示例9: routingChanged

void AudioPortConfig::routingChanged()
{
    //---------------------------------------------------
    //  populate lists
    //---------------------------------------------------

    routeList->clear();
    newSrcList->clear();
    newDstList->clear();
    tracksList->clear();
    btnConnectOut->setEnabled(false);
    connectButton->setEnabled(false);
    removeButton->setEnabled(false);

    TrackList* tl = song->tracks();
    for (ciTrack i = tl->begin(); i != tl->end(); ++i)
    {
        if ((*i)->isMidiTrack())
            continue;
        AudioTrack* track = (AudioTrack*) (*i);
        if (track->type() == Track::WAVE_OUTPUT_HELPER || track->type() == Track::WAVE_INPUT_HELPER)
        {
            for (int channel = 0; channel < track->channels(); ++channel)
            {
                Route r(track, channel);
                tracksList->addItem(r.name());
            }
        }
        else
            tracksList->addItem(Route(track, -1).name());
    }
    if(selectedIndex < tracksList->count())
        tracksList->setCurrentRow(selectedIndex, QItemSelectionModel::ClearAndSelect);
    //if(_selected)
    //	setSelected(_selected->name());
}
开发者ID:Adamiko,项目名称:los,代码行数:36,代码来源:apconfig.cpp

示例10: LOGD

status_t MediaPlayerService::AudioOutput::open(uint32_t sampleRate, int channelCount, int format, int bufferCount)
{
    // Check argument "bufferCount" against the mininum buffer count
    if (bufferCount < mMinBufferCount) {
        LOGD("bufferCount (%d) is too small and increased to %d", bufferCount, mMinBufferCount);
        bufferCount = mMinBufferCount;

    }
    LOGV("open(%u, %d, %d, %d)", sampleRate, channelCount, format, bufferCount);
    if (mTrack) close();
    int afSampleRate;
    int afFrameCount;
    int frameCount;

    if (AudioSystem::getOutputFrameCount(&afFrameCount, mStreamType) != NO_ERROR) {
        return NO_INIT;
    }
    if (AudioSystem::getOutputSamplingRate(&afSampleRate, mStreamType) != NO_ERROR) {
        return NO_INIT;
    }

    frameCount = (sampleRate*afFrameCount*bufferCount)/afSampleRate;
    AudioTrack *t = new AudioTrack(mStreamType, sampleRate, format, channelCount, frameCount);
    if ((t == 0) || (t->initCheck() != NO_ERROR)) {
        LOGE("Unable to create audio track");
        delete t;
        return NO_INIT;
    }

    LOGV("setVolume");
    t->setVolume(mLeftVolume, mRightVolume);
    mMsecsPerFrame = 1.e3 / (float) sampleRate;
    mLatency = t->latency() + kAudioVideoDelayMs;
    mTrack = t;
    return NO_ERROR;
}
开发者ID:,项目名称:,代码行数:36,代码来源:

示例11: LOG

void MediaSource::removeSourceBuffer(SourceBuffer& buffer, ExceptionCode& ec)
{
    LOG(MediaSource, "MediaSource::removeSourceBuffer() %p", this);
    Ref<SourceBuffer> protect(buffer);

    // 2. If sourceBuffer specifies an object that is not in sourceBuffers then
    // throw a NOT_FOUND_ERR exception and abort these steps.
    if (!m_sourceBuffers->length() || !m_sourceBuffers->contains(buffer)) {
        ec = NOT_FOUND_ERR;
        return;
    }

    // 3. If the sourceBuffer.updating attribute equals true, then run the following steps: ...
    buffer.abortIfUpdating();

    // 4. Let SourceBuffer audioTracks list equal the AudioTrackList object returned by sourceBuffer.audioTracks.
    RefPtr<AudioTrackList> audioTracks = buffer.audioTracks();

    // 5. If the SourceBuffer audioTracks list is not empty, then run the following steps:
    if (audioTracks->length()) {
        // 5.1 Let HTMLMediaElement audioTracks list equal the AudioTrackList object returned by the audioTracks
        // attribute on the HTMLMediaElement.
        // 5.2 Let the removed enabled audio track flag equal false.
        bool removedEnabledAudioTrack = false;

        // 5.3 For each AudioTrack object in the SourceBuffer audioTracks list, run the following steps:
        while (audioTracks->length()) {
            AudioTrack* track = audioTracks->lastItem();

            // 5.3.1 Set the sourceBuffer attribute on the AudioTrack object to null.
            track->setSourceBuffer(nullptr);

            // 5.3.2 If the enabled attribute on the AudioTrack object is true, then set the removed enabled
            // audio track flag to true.
            if (track->enabled())
                removedEnabledAudioTrack = true;

            // 5.3.3 Remove the AudioTrack object from the HTMLMediaElement audioTracks list.
            // 5.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
            // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement audioTracks list.
            if (mediaElement())
                mediaElement()->removeAudioTrack(track);

            // 5.3.5 Remove the AudioTrack object from the SourceBuffer audioTracks list.
            // 5.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
            // cancelable, and that uses the TrackEvent interface, at the SourceBuffer audioTracks list.
            audioTracks->remove(track);
        }

        // 5.4 If the removed enabled audio track flag equals true, then queue a task to fire a simple event
        // named change at the HTMLMediaElement audioTracks list.
        if (removedEnabledAudioTrack)
            mediaElement()->audioTracks()->scheduleChangeEvent();
    }

    // 6. Let SourceBuffer videoTracks list equal the VideoTrackList object returned by sourceBuffer.videoTracks.
    RefPtr<VideoTrackList> videoTracks = buffer.videoTracks();

    // 7. If the SourceBuffer videoTracks list is not empty, then run the following steps:
    if (videoTracks->length()) {
        // 7.1 Let HTMLMediaElement videoTracks list equal the VideoTrackList object returned by the videoTracks
        // attribute on the HTMLMediaElement.
        // 7.2 Let the removed selected video track flag equal false.
        bool removedSelectedVideoTrack = false;

        // 7.3 For each VideoTrack object in the SourceBuffer videoTracks list, run the following steps:
        while (videoTracks->length()) {
            VideoTrack* track = videoTracks->lastItem();

            // 7.3.1 Set the sourceBuffer attribute on the VideoTrack object to null.
            track->setSourceBuffer(nullptr);

            // 7.3.2 If the selected attribute on the VideoTrack object is true, then set the removed selected
            // video track flag to true.
            if (track->selected())
                removedSelectedVideoTrack = true;

            // 7.3.3 Remove the VideoTrack object from the HTMLMediaElement videoTracks list.
            // 7.3.4 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
            // cancelable, and that uses the TrackEvent interface, at the HTMLMediaElement videoTracks list.
            if (mediaElement())
                mediaElement()->removeVideoTrack(track);

            // 7.3.5 Remove the VideoTrack object from the SourceBuffer videoTracks list.
            // 7.3.6 Queue a task to fire a trusted event named removetrack, that does not bubble and is not
            // cancelable, and that uses the TrackEvent interface, at the SourceBuffer videoTracks list.
            videoTracks->remove(track);
        }

        // 7.4 If the removed selected video track flag equals true, then queue a task to fire a simple event
        // named change at the HTMLMediaElement videoTracks list.
        if (removedSelectedVideoTrack)
            mediaElement()->videoTracks()->scheduleChangeEvent();
    }

    // 8. Let SourceBuffer textTracks list equal the TextTrackList object returned by sourceBuffer.textTracks.
    RefPtr<TextTrackList> textTracks = buffer.textTracks();

    // 9. If the SourceBuffer textTracks list is not empty, then run the following steps:
    if (textTracks->length()) {
//.........这里部分代码省略.........
开发者ID:zosimos,项目名称:webkit,代码行数:101,代码来源:MediaSource.cpp

示例12: name2route

Route name2route(const QString& rn, bool /*dst*/, int rtype)/*{{{*/
{
	// printf("name2route %s\n", rn.toLatin1().constData());
	int channel = -1;
	QString s(rn);
	// Support old route style in oom files. Obsolete.
	if (rn.size() >= 2 && rn[0].isNumber() && rn[1] == ':')
	{
		channel = rn[0].toAscii() - int('1');
		s = rn.mid(2);
	}

	if (rtype == -1)
	{
		if (checkAudioDevice())
		{
			void* p = audioDevice->findPort(s.toLatin1().constData());
			if (p)
				return Route(p, channel);
		}

		TrackList* tl = song->tracks();
		for (iTrack i = tl->begin(); i != tl->end(); ++i)
		{
			if ((*i)->isMidiTrack())
			{
				MidiTrack* track = (MidiTrack*) * i;
				if (track->name() == s)
					return Route(track, channel);
			}
			else
			{
				AudioTrack* track = (AudioTrack*) * i;
				if (track->name() == s)
					return Route(track, channel);
			}
		}

		for (iMidiDevice i = midiDevices.begin(); i != midiDevices.end(); ++i)
		{
			if ((*i)->name() == s)
				return Route(*i, channel);
		}

		// p3.3.49
		if (s.left(ROUTE_MIDIPORT_NAME_PREFIX.length()) == ROUTE_MIDIPORT_NAME_PREFIX)
		{
			bool ok = false;
			int port = s.mid(ROUTE_MIDIPORT_NAME_PREFIX.length()).toInt(&ok);
			if (ok)
				return Route(port, channel);
		}
	}
	else
	{
		if (rtype == Route::TRACK_ROUTE)
		{
			TrackList* tl = song->tracks();
			for (iTrack i = tl->begin(); i != tl->end(); ++i)
			{
				if ((*i)->isMidiTrack())
				{
					MidiTrack* track = (MidiTrack*) * i;
					if (track->name() == s)
						return Route(track, channel);
				}
				else
				{
					AudioTrack* track = (AudioTrack*) * i;
					if (track->name() == s)
						return Route(track, channel);
				}
			}
		}// TODO Distinguish the device types
		else if (rtype == Route::MIDI_DEVICE_ROUTE)
		{
			for (iMidiDevice i = midiDevices.begin(); i != midiDevices.end(); ++i)
			{
				if ((*i)->name() == s)
					return Route(*i, channel);
			}
		}
		else if (rtype == Route::JACK_ROUTE)
		{
			if (checkAudioDevice())
			{
				void* p = audioDevice->findPort(s.toLatin1().constData());
				if (p)
					return Route(p, channel);
			}
		}
		else if (rtype == Route::MIDI_PORT_ROUTE) // p3.3.49
		{
			if (s.left(ROUTE_MIDIPORT_NAME_PREFIX.length()) == ROUTE_MIDIPORT_NAME_PREFIX)
			{
				bool ok = false;
				int port = s.mid(ROUTE_MIDIPORT_NAME_PREFIX.length()).toInt(&ok);
				if (ok)
					return Route(port, channel);
			}
//.........这里部分代码省略.........
开发者ID:87maxi,项目名称:oom,代码行数:101,代码来源:route.cpp

示例13: SNDDMA_Init

qboolean SNDDMA_Init(void)
{
  if ( ! enableSound() ) {
    return false;
  }

  gDMAByteIndex = 0;

  // Initialize the AudioTrack.

  status_t result = gAudioTrack.set(
    AudioSystem::DEFAULT, // stream type
    SAMPLE_RATE,   // sample rate
    BITS_PER_SAMPLE == 16 ? AudioSystem::PCM_16_BIT : AudioSystem::PCM_8_BIT,      // format (8 or 16)
    (CHANNEL_COUNT > 1) ? AudioSystem::CHANNEL_OUT_STEREO : AudioSystem::CHANNEL_OUT_MONO,       // channel mask
    0,       // default buffer size
    0, // flags
    AndroidQuakeSoundCallback, // callback
    0,  // user
    0); // default notification size

  LOGI("AudioTrack status  = %d (%s)\n", result, result == NO_ERROR ? "success" : "error");

  if ( result == NO_ERROR ) {
    LOGI("AudioTrack latency = %u ms\n", gAudioTrack.latency());
    LOGI("AudioTrack format = %u bits\n", gAudioTrack.format() == AudioSystem::PCM_16_BIT ? 16 : 8);
    LOGI("AudioTrack sample rate = %u Hz\n", gAudioTrack.getSampleRate());
    LOGI("AudioTrack frame count = %d\n", int(gAudioTrack.frameCount()));
    LOGI("AudioTrack channel count = %d\n", gAudioTrack.channelCount());

    // Initialize Quake's idea of a DMA buffer.

    shm = &sn;
    memset((void*)&sn, 0, sizeof(sn));

    shm->splitbuffer = false;	// Not used.
    shm->samplebits = gAudioTrack.format() == AudioSystem::PCM_16_BIT ? 16 : 8;
    shm->speed = gAudioTrack.getSampleRate();
    shm->channels = gAudioTrack.channelCount();
    shm->samples = TOTAL_BUFFER_SIZE / BYTES_PER_SAMPLE;
    shm->samplepos = 0; // Not used.
    shm->buffer = (unsigned char*) Hunk_AllocName(TOTAL_BUFFER_SIZE, (char*) "shmbuf");
    shm->submission_chunk = 1; // Not used.

    shm->soundalive = true;

    if ( (shm->samples & 0x1ff) != 0 ) {
      LOGE("SNDDDMA_Init: samples must be power of two.");
      return false;
    }

    if ( shm->buffer == 0 ) {
      LOGE("SNDDDMA_Init: Could not allocate sound buffer.");
      return false;
    }

    gAudioTrack.setVolume(1.0f, 1.0f);
    gAudioTrack.start();
  }

  return result == NO_ERROR;
}
开发者ID:,项目名称:,代码行数:62,代码来源:

示例14: SNDDMA_Shutdown

/*
==============
SNDDMA_Shutdown

Reset the sound device for exiting
===============
*/
void SNDDMA_Shutdown(void)
{
  gAudioTrack.stop();
}
开发者ID:,项目名称:,代码行数:11,代码来源:

示例15: switch

bool Song::event(QEvent* _e)
{
	if (_e->type() != QEvent::User)
		return false; //ignore all events except user events, which are events from Python bridge subsystem

	QPybridgeEvent* e = (QPybridgeEvent*) _e;
	switch (e->getType())
	{
		case QPybridgeEvent::SONG_UPDATE:
			this->update(e->getP1());
			break;
		case QPybridgeEvent::SONGLEN_CHANGE:
			this->setLen(e->getP1());
			break;
		case QPybridgeEvent::SONG_POSCHANGE:
			this->setPos(e->getP1(), e->getP2());
			break;
		case QPybridgeEvent::SONG_SETPLAY:
			this->setPlay(true);
			break;
		case QPybridgeEvent::SONG_SETSTOP:
			this->setStop(true);
			break;
		case QPybridgeEvent::SONG_REWIND:
			this->rewindStart();
			break;
		case QPybridgeEvent::SONG_SETMUTE:
		{
			Track* track = this->findTrack(e->getS1());
			if (track == NULL)
				return false;

			bool muted = e->getP1() == 1;
			track->setMute(muted);
			this->update(SC_MUTE | SC_TRACK_MODIFIED);
			break;
		}
		case QPybridgeEvent::SONG_SETCTRL:
		{
			Track* t = this->findTrack(e->getS1());
			if (t == NULL)
				return false;

			if (t->isMidiTrack() == false)
				return false;

			MidiTrack* track = (MidiTrack*) t;
			int chan = track->outChannel();

			int num = e->getP1();
			int val = e->getP2();
			int tick = song->cpos();
			MidiPlayEvent ev(tick, track->outPort(), chan, ME_CONTROLLER, num, val, t);
			audio->msgPlayMidiEvent(&ev);
			song->update(SC_MIDI_CONTROLLER);
			break;
		}
		case QPybridgeEvent::SONG_SETAUDIOVOL:
		{
			Track* t = this->findTrack(e->getS1());
			if (t == NULL)
				return false;

			if (t->type() == Track::DRUM || t->type() == Track::MIDI)
				return false;

			AudioTrack* track = (AudioTrack*) t;
			track->setVolume(e->getD1());
			break;
		}
		case QPybridgeEvent::SONG_IMPORT_PART:
		{
			Track* track = this->findTrack(e->getS1());
			QString filename = e->getS2();
			unsigned int tick = e->getP1();
			if (track == NULL)
				return false;

			oom->importPartToTrack(filename, tick, track);
			break;
		}
		case QPybridgeEvent::SONG_TOGGLE_EFFECT:
		{
			Track* t = this->findTrack(e->getS1());
			if (t == NULL)
				return false;

			if (t->type() != Track::WAVE)
				return false;

			int fxid = e->getP1();


			int onoff = (e->getP2() == 1);

			AudioTrack* track = (AudioTrack*) t;
			Pipeline* pipeline = track->efxPipe();
			const Pipeline* pipeline = track->efxPipe();
			if(pipeline)
			{
//.........这里部分代码省略.........
开发者ID:OpenGanesh,项目名称:oom,代码行数:101,代码来源:pyapi.cpp


注:本文中的AudioTrack类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。