本文整理汇总了Java中org.webrtc.Logging.w方法的典型用法代码示例。如果您正苦于以下问题:Java Logging.w方法的具体用法?Java Logging.w怎么用?Java Logging.w使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.webrtc.Logging
的用法示例。
在下文中一共展示了Logging.w方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: createAudioTrackOnLollipopOrHigher
import org.webrtc.Logging; //导入方法依赖的package包/类
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
// performance when Android O is supported. Add some logging in the mean time.
final int nativeOutputSampleRate =
AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
if (sampleRateInHz != nativeOutputSampleRate) {
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
}
if (usageAttribute != DEFAULT_USAGE) {
Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
}
// Create an audio track where the audio usage is for VoIP and the content type is speech.
return new AudioTrack(
new AudioAttributes.Builder()
.setUsage(usageAttribute)
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
.build(),
new AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRateInHz)
.setChannelMask(channelConfig)
.build(),
bufferSizeInBytes,
AudioTrack.MODE_STREAM,
AudioManager.AUDIO_SESSION_ID_GENERATE);
}
示例2: setAEC
import org.webrtc.Logging; //导入方法依赖的package包/类
public boolean setAEC(boolean enable) {
Logging.d(TAG, "setAEC(" + enable + ")");
if (!canUseAcousticEchoCanceler()) {
Logging.w(TAG, "Platform AEC is not supported");
shouldEnableAec = false;
return false;
}
if (aec != null && (enable != shouldEnableAec)) {
Logging.e(TAG, "Platform AEC state can't be modified while recording");
return false;
}
shouldEnableAec = enable;
return true;
}
示例3: setNS
import org.webrtc.Logging; //导入方法依赖的package包/类
public boolean setNS(boolean enable) {
Logging.d(TAG, "setNS(" + enable + ")");
if (!canUseNoiseSuppressor()) {
Logging.w(TAG, "Platform NS is not supported");
shouldEnableNs = false;
return false;
}
if (ns != null && (enable != shouldEnableNs)) {
Logging.e(TAG, "Platform NS state can't be modified while recording");
return false;
}
shouldEnableNs = enable;
return true;
}
示例4: isAcousticEchoCancelerBlacklisted
import org.webrtc.Logging; //导入方法依赖的package包/类
public static boolean isAcousticEchoCancelerBlacklisted() {
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
}
return isBlacklisted;
}
示例5: isNoiseSuppressorBlacklisted
import org.webrtc.Logging; //导入方法依赖的package包/类
public static boolean isNoiseSuppressorBlacklisted() {
List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
if (isBlacklisted) {
Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
}
return isBlacklisted;
}
示例6: create
import org.webrtc.Logging; //导入方法依赖的package包/类
static WebRtcAudioEffects create() {
// Return null if VoIP effects (AEC, AGC and NS) are not supported.
if (!WebRtcAudioUtils.runningOnJellyBeanOrHigher()) {
Logging.w(TAG, "API level 16 or higher is required!");
return null;
}
return new WebRtcAudioEffects();
}
示例7: setAudioTrackUsageAttribute
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized void setAudioTrackUsageAttribute(int usage) {
Logging.w(TAG, "Default usage attribute is changed from: "
+ DEFAULT_USAGE + " to " + usage);
usageAttribute = usage;
}
示例8: setSpeakerMute
import org.webrtc.Logging; //导入方法依赖的package包/类
public static void setSpeakerMute(boolean mute) {
Logging.w(TAG, "setSpeakerMute(" + mute + ")");
speakerMute = mute;
}
示例9: setMicrophoneMute
import org.webrtc.Logging; //导入方法依赖的package包/类
public static void setMicrophoneMute(boolean mute) {
Logging.w(TAG, "setMicrophoneMute(" + mute + ")");
microphoneMute = mute;
}
示例10: setStereoInput
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized void setStereoInput(boolean enable) {
Logging.w(TAG, "Overriding default input behavior: setStereoInput(" + enable + ')');
useStereoInput = enable;
}
示例11: setStereoOutput
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized void setStereoOutput(boolean enable) {
Logging.w(TAG, "Overriding default output behavior: setStereoOutput(" + enable + ')');
useStereoOutput = enable;
}
示例12: setWebRtcBasedAutomaticGainControl
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized void setWebRtcBasedAutomaticGainControl(boolean enable) {
// TODO(henrika): deprecated; remove when no longer used by any client.
Logging.w(TAG, "setWebRtcBasedAutomaticGainControl() is deprecated");
}
示例13: useWebRtcBasedAcousticEchoCanceler
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized boolean useWebRtcBasedAcousticEchoCanceler() {
if (useWebRtcBasedAcousticEchoCanceler) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC AEC!");
}
return useWebRtcBasedAcousticEchoCanceler;
}
示例14: useWebRtcBasedNoiseSuppressor
import org.webrtc.Logging; //导入方法依赖的package包/类
public static synchronized boolean useWebRtcBasedNoiseSuppressor() {
if (useWebRtcBasedNoiseSuppressor) {
Logging.w(TAG, "Overriding default behavior; now using WebRTC NS!");
}
return useWebRtcBasedNoiseSuppressor;
}