当前位置: 首页>>代码示例>>Java>>正文


Java AudioTrack.STATE_INITIALIZED属性代码示例

本文整理汇总了Java中android.media.AudioTrack.STATE_INITIALIZED属性的典型用法代码示例。如果您正苦于以下问题:Java AudioTrack.STATE_INITIALIZED属性的具体用法?Java AudioTrack.STATE_INITIALIZED怎么用?Java AudioTrack.STATE_INITIALIZED使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在android.media.AudioTrack的用法示例。


在下文中一共展示了AudioTrack.STATE_INITIALIZED属性的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: createAudioTrack

private void createAudioTrack() throws InitializationException {
    // The AudioTrack configurations parameters used here, are guaranteed to
    // be supported on all devices.

    // AudioFormat.CHANNEL_OUT_MONO should be used in place of deprecated
    // AudioFormat.CHANNEL_CONFIGURATION_MONO, but it is not available for
    // API level 3.

    // Output buffer for playing should be as short as possible, so
    // AudioBufferPlayed events are not invoked long before audio buffer is
    // actually played. Also, when AudioTrack is stopped, it is filled with
    // silence of length audioTrackBufferSizeInBytes. If the silence is too
    // long, it causes a delay before the next recorded data starts playing.
    audioTrackBufferSizeInBytes = AudioTrack.getMinBufferSize(
            SpeechTrainerConfig.SAMPLE_RATE_HZ,
            AudioFormat.CHANNEL_CONFIGURATION_MONO,
            AudioFormat.ENCODING_PCM_16BIT);
    if (audioTrackBufferSizeInBytes <= 0) {
        throw new InitializationException("Failed to initialize playback.");
    }

    audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
            SpeechTrainerConfig.SAMPLE_RATE_HZ,
            AudioFormat.CHANNEL_CONFIGURATION_MONO, AudioFormat.ENCODING_PCM_16BIT,
            audioTrackBufferSizeInBytes,
            AudioTrack.MODE_STREAM);
    if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
        audioTrack = null;
        throw new InitializationException("Failed to initialize playback.");
    }
}
 
开发者ID:sdrausty,项目名称:buildAPKsApps,代码行数:31,代码来源:ControllerFactory.java

示例2: startPlayout

private boolean startPlayout() {
  Logging.d(TAG, "startPlayout");
  assertTrue(audioTrack != null);
  assertTrue(audioThread == null);
  if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
    reportWebRtcAudioTrackStartError("AudioTrack instance is not successfully initialized.");
    return false;
  }
  audioThread = new AudioTrackThread("AudioTrackJavaThread");
  audioThread.start();
  return true;
}
 
开发者ID:Piasy,项目名称:AppRTC-Android,代码行数:12,代码来源:WebRtcAudioTrack.java

示例3: startPlayout

private boolean startPlayout() {
  Logging.d(TAG, "startPlayout");
  assertTrue(audioTrack != null);
  assertTrue(audioThread == null);
  if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
    Logging.e(TAG, "AudioTrack instance is not successfully initialized.");
    return false;
  }
  audioThread = new AudioTrackThread("AudioTrackJavaThread");
  audioThread.start();
  return true;
}
 
开发者ID:lgyjg,项目名称:AndroidRTC,代码行数:12,代码来源:WebRtcAudioTrack.java

示例4: audioTrackRelease

private void audioTrackRelease() {
  if (mAudioTrack != null) {
    if (mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
      mAudioTrack.stop();
    mAudioTrack.release();
  }
  mAudioTrack = null;
}
 
开发者ID:coding-dream,项目名称:TPlayer,代码行数:8,代码来源:MediaPlayer.java

示例5: getAudioTrack

/**
 * Helper method to double check the returned {@link SaiyAudioTrack} object hasn't been released
 * elsewhere.
 *
 * @return the {@link SaiyAudioTrack} object, or null it the creation process failed.
 */
private SaiyAudioTrack getAudioTrack() {
    if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
        audioTrack = SaiyAudioTrack.getSaiyAudioTrack();
        audioTrack.setListener(listener);
        return audioTrack;
    } else {
        return audioTrack;
    }
}
 
开发者ID:brandall76,项目名称:Saiy-PS,代码行数:15,代码来源:SaiyTextToSpeech.java

示例6: isSpeaking

@Override
public boolean isSpeaking() {

    if (audioTrack != null && audioTrack.getState() == AudioTrack.STATE_INITIALIZED) {
        if (DEBUG) {
            MyLog.i(CLS_NAME, "isSpeaking: audioTrack STATE_INITIALIZED");
        }

        if (audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PLAYING
                || audioTrack.getPlayState() == AudioTrack.PLAYSTATE_PAUSED) {
            if (DEBUG) {
                MyLog.i(CLS_NAME, "isSpeaking: audioTrack PLAYSTATE_PLAYING/PLAYSTATE_PAUSED");
            }
            return true;
        } else {
            if (DEBUG) {
                MyLog.i(CLS_NAME, "isSpeaking: audioTrack not playing");
            }
        }
    }

    final boolean speakingSuper = super.isSpeaking();

    if (DEBUG) {
        MyLog.i(CLS_NAME, "isSpeaking: speakingSuper " + speakingSuper);
    }

    return speakingSuper;
}
 
开发者ID:brandall76,项目名称:Saiy-PS,代码行数:29,代码来源:SaiyTextToSpeech.java

示例7: audioTrackRelease

private void audioTrackRelease() {
    if (mAudioTrack != null) {
        if (mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
            mAudioTrack.stop();
        mAudioTrack.release();
    }
    mAudioTrack = null;
}
 
开发者ID:WangZhiYao,项目名称:VideoDemo,代码行数:8,代码来源:MediaPlayer.java

示例8: initPlayout

private boolean initPlayout(int sampleRate, int channels) {
  Logging.d(TAG, "initPlayout(sampleRate=" + sampleRate + ", channels=" + channels + ")");
  final int bytesPerFrame = channels * (BITS_PER_SAMPLE / 8);
  byteBuffer = byteBuffer.allocateDirect(bytesPerFrame * (sampleRate / BUFFERS_PER_SECOND));
  Logging.d(TAG, "byteBuffer.capacity: " + byteBuffer.capacity());
  emptyBytes = new byte[byteBuffer.capacity()];
  // Rather than passing the ByteBuffer with every callback (requiring
  // the potentially expensive GetDirectBufferAddress) we simply have the
  // the native class cache the address to the memory once.
  nativeCacheDirectBufferAddress(byteBuffer, nativeAudioTrack);

  // Get the minimum buffer size required for the successful creation of an
  // AudioTrack object to be created in the MODE_STREAM mode.
  // Note that this size doesn't guarantee a smooth playback under load.
  // TODO(henrika): should we extend the buffer size to avoid glitches?
  final int channelConfig = channelCountToConfiguration(channels);
  final int minBufferSizeInBytes =
      AudioTrack.getMinBufferSize(sampleRate, channelConfig, AudioFormat.ENCODING_PCM_16BIT);
  Logging.d(TAG, "AudioTrack.getMinBufferSize: " + minBufferSizeInBytes);
  // For the streaming mode, data must be written to the audio sink in
  // chunks of size (given by byteBuffer.capacity()) less than or equal
  // to the total buffer size |minBufferSizeInBytes|. But, we have seen
  // reports of "getMinBufferSize(): error querying hardware". Hence, it
  // can happen that |minBufferSizeInBytes| contains an invalid value.
  if (minBufferSizeInBytes < byteBuffer.capacity()) {
    reportWebRtcAudioTrackInitError("AudioTrack.getMinBufferSize returns an invalid value.");
    return false;
  }

  // Ensure that prevision audio session was stopped correctly before trying
  // to create a new AudioTrack.
  if (audioTrack != null) {
    reportWebRtcAudioTrackInitError("Conflict with existing AudioTrack.");
    return false;
  }
  try {
    // Create an AudioTrack object and initialize its associated audio buffer.
    // The size of this buffer determines how long an AudioTrack can play
    // before running out of data.
    if (WebRtcAudioUtils.runningOnLollipopOrHigher()) {
      // If we are on API level 21 or higher, it is possible to use a special AudioTrack
      // constructor that uses AudioAttributes and AudioFormat as input. It allows us to
      // supersede the notion of stream types for defining the behavior of audio playback,
      // and to allow certain platforms or routing policies to use this information for more
      // refined volume or routing decisions.
      audioTrack = createAudioTrackOnLollipopOrHigher(
          sampleRate, channelConfig, minBufferSizeInBytes);
    } else {
      // Use default constructor for API levels below 21.
      audioTrack =
          createAudioTrackOnLowerThanLollipop(sampleRate, channelConfig, minBufferSizeInBytes);
    }
  } catch (IllegalArgumentException e) {
    reportWebRtcAudioTrackInitError(e.getMessage());
    releaseAudioResources();
    return false;
  }

  // It can happen that an AudioTrack is created but it was not successfully
  // initialized upon creation. Seems to be the case e.g. when the maximum
  // number of globally available audio tracks is exceeded.
  if (audioTrack == null || audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
    reportWebRtcAudioTrackInitError("Initialization of audio track failed.");
    releaseAudioResources();
    return false;
  }
  logMainParameters();
  logMainParametersExtended();
  return true;
}
 
开发者ID:Piasy,项目名称:AppRTC-Android,代码行数:70,代码来源:WebRtcAudioTrack.java

示例9: audioTrackStart

private void audioTrackStart() {
  if (mAudioTrack != null && mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED && mAudioTrack.getPlayState() != AudioTrack.PLAYSTATE_PLAYING)
    mAudioTrack.play();
}
 
开发者ID:coding-dream,项目名称:TPlayer,代码行数:4,代码来源:MediaPlayer.java

示例10: audioTrackPause

private void audioTrackPause() {
  if (mAudioTrack != null && mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
    mAudioTrack.pause();
}
 
开发者ID:coding-dream,项目名称:TPlayer,代码行数:4,代码来源:MediaPlayer.java

示例11: audioTrackPause

private void audioTrackPause() {
    if (mAudioTrack != null && mAudioTrack.getState() == AudioTrack.STATE_INITIALIZED)
        mAudioTrack.pause();
}
 
开发者ID:WangZhiYao,项目名称:VideoDemo,代码行数:4,代码来源:MediaPlayer.java

示例12: processMidi

private void processMidi() {
	int status = 0;
	int size = 0;

	// Init midi
	if ((size = this.init()) == 0) {
		return;
	}

	short[] buffer = new short[size];

	// Create audio track
	this.audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, SAMPLE_RATE, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT, BUFFER_SIZE, AudioTrack.MODE_STREAM);
	
	// Check audiotrack
	if( audioTrack == null ) {
		this.shutdown();
		return;
	}

	// Check state
	int state = this.audioTrack.getState();

	if (state != AudioTrack.STATE_INITIALIZED) {
		this.audioTrack.release();
		this.shutdown();
		return;
	}

	// Play track
	this.audioTrack.play();

	// Keep running until stopped
	while( this.thread != null ) {
		
		// Write the midi events
		synchronized (this.mutex) {
			for(byte[] queuedEvent : this.queuedEvents) {
				this.write(queuedEvent);
			}
			this.queuedEvents.clear();
		}
		
		// Render the audio
		if (this.render(buffer) == 0) {
			break;
		}
		// Write audio to audiotrack
		status = this.audioTrack.write(buffer, 0, buffer.length);

		if (status < 0) {
			break;
		}
	}

	// Render and write the last bit of audio
	if( status > 0 ) {
		if (this.render(buffer) > 0) {
			this.audioTrack.write(buffer, 0, buffer.length);
		}
	}
	// Shut down audio
	this.shutdown();
	this.audioTrack.release();
}
 
开发者ID:theokyr,项目名称:TuxGuitar-1.3.1-fork,代码行数:65,代码来源:MidiDriver.java


注:本文中的android.media.AudioTrack.STATE_INITIALIZED属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。