本文整理汇总了Java中org.webrtc.Logging.d方法的典型用法代码示例。如果您正苦于以下问题:Java Logging.d方法的具体用法?Java Logging.d怎么用?Java Logging.d使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.webrtc.Logging
的用法示例。
在下文中一共展示了Logging.d方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getNativeOutputSampleRate
import org.webrtc.Logging; //导入方法依赖的package包/类
private int getNativeOutputSampleRate() {
// Override this if we're running on an old emulator image which only
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
if (WebRtcAudioUtils.runningOnEmulator()) {
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
return 8000;
}
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
// If so, use that value and return here.
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
Logging.d(TAG, "Default sample rate is overriden to "
+ WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
return WebRtcAudioUtils.getDefaultSampleRateHz();
}
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
} else {
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
示例2: updateFrameDimensionsAndReportEvents
import org.webrtc.Logging; //导入方法依赖的package包/类
private void updateFrameDimensionsAndReportEvents(VideoRenderer.I420Frame frame) {
synchronized (layoutLock) {
if (frameWidth != frame.width || frameHeight != frame.height
|| frameRotation != frame.rotationDegree) {
Logging.d(TAG, getResourceName() + "Reporting frame resolution changed to "
+ frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
if (rendererEvents != null) {
rendererEvents.onFrameResolutionChanged(frame.width, frame.height, frame.rotationDegree);
}
frameWidth = frame.width;
frameHeight = frame.height;
frameRotation = frame.rotationDegree;
post(new Runnable() {
@Override public void run() {
requestLayout();
}
});
}
}
}
示例3: WebRtcAudioManager
import org.webrtc.Logging; //导入方法依赖的package包/类
WebRtcAudioManager(Context context, long nativeAudioManager) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.context = context;
this.nativeAudioManager = nativeAudioManager;
audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
volumeLogger = new VolumeLogger(audioManager);
storeAudioParameters();
nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, outputBufferSize, inputBufferSize,
nativeAudioManager);
}
示例4: stopRecording
import org.webrtc.Logging; //导入方法依赖的package包/类
private boolean stopRecording() {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "stopRecording");
assertTrue(audioThread != null);
audioThread.stopThread();
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
}
audioThread = null;
if (effects != null) {
effects.release();
}
releaseAudioResources();
return true;
}
示例5: setNS
import org.webrtc.Logging; //导入方法依赖的package包/类
public boolean setNS(boolean enable) {
Logging.d(TAG, "setNS(" + enable + ")");
if (!canUseNoiseSuppressor()) {
Logging.w(TAG, "Platform NS is not supported");
shouldEnableNs = false;
return false;
}
if (ns != null && (enable != shouldEnableNs)) {
Logging.e(TAG, "Platform NS state can't be modified while recording");
return false;
}
shouldEnableNs = enable;
return true;
}
示例6: logMainParametersExtended
import org.webrtc.Logging; //导入方法依赖的package包/类
@TargetApi(23)
private void logMainParametersExtended() {
if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
Logging.d(TAG, "AudioRecord: "
// The frame count of the native AudioRecord buffer.
+ "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
}
}
示例7: enableBuiltInAEC
import org.webrtc.Logging; //导入方法依赖的package包/类
private boolean enableBuiltInAEC(boolean enable) {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
if (effects == null) {
Logging.e(TAG, "Built-in AEC is not supported on this platform");
return false;
}
return effects.setAEC(enable);
}
示例8: logDeviceInfo
import org.webrtc.Logging; //导入方法依赖的package包/类
public static void logDeviceInfo(String tag) {
Logging.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
+ "Release: " + Build.VERSION.RELEASE + ", "
+ "Brand: " + Build.BRAND + ", "
+ "Device: " + Build.DEVICE + ", "
+ "Id: " + Build.ID + ", "
+ "Hardware: " + Build.HARDWARE + ", "
+ "Manufacturer: " + Build.MANUFACTURER + ", "
+ "Model: " + Build.MODEL + ", "
+ "Product: " + Build.PRODUCT);
}
示例9: enableBuiltInNS
import org.webrtc.Logging; //导入方法依赖的package包/类
private boolean enableBuiltInNS(boolean enable) {
threadChecker.checkIsOnValidThread();
Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
if (effects == null) {
Logging.e(TAG, "Built-in NS is not supported on this platform");
return false;
}
return effects.setNS(enable);
}
示例10: init
import org.webrtc.Logging; //导入方法依赖的package包/类
private boolean init() {
Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
if (initialized) {
return true;
}
Logging.d(TAG, "audio mode is: " + AUDIO_MODES[audioManager.getMode()]);
initialized = true;
volumeLogger.start();
return true;
}
示例11: stopRecording
import org.webrtc.Logging; //导入方法依赖的package包/类
private boolean stopRecording() {
Logging.d(TAG, "stopRecording");
assertTrue(audioThread != null);
audioThread.stopThread();
if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
}
audioThread = null;
if (effects != null) {
effects.release();
}
releaseAudioResources();
return true;
}
示例12: release
import org.webrtc.Logging; //导入方法依赖的package包/类
/**
* Block until any pending frame is returned and all GL resources released, even if an interrupt
* occurs. If an interrupt occurs during release(), the interrupt flag will be set. This function
* should be called before the Activity is destroyed and the EGLContext is still valid. If you
* don't call this function, the GL resources might leak.
*/
public void release() {
final CountDownLatch eglCleanupBarrier = new CountDownLatch(1);
synchronized (handlerLock) {
if (renderThreadHandler == null) {
Logging.d(TAG, getResourceName() + "Already released");
return;
}
// Release EGL and GL resources on render thread.
// TODO(magjed): This might not be necessary - all OpenGL resources are automatically deleted
// when the EGL context is lost. It might be dangerous to delete them manually in
// Activity.onDestroy().
renderThreadHandler.postAtFrontOfQueue(new Runnable() {
@Override public void run() {
drawer.release();
drawer = null;
if (yuvTextures != null) {
GLES20.glDeleteTextures(3, yuvTextures, 0);
yuvTextures = null;
}
// Clear last rendered image to black.
makeBlack();
eglBase.release();
eglBase = null;
eglCleanupBarrier.countDown();
}
});
// Don't accept any more frames or messages to the render thread.
renderThreadHandler = null;
}
// Make sure the EGL/GL cleanup posted above is executed.
ThreadUtils.awaitUninterruptibly(eglCleanupBarrier);
renderThread.quit();
synchronized (frameLock) {
if (pendingFrame != null) {
VideoRenderer.renderFrameDone(pendingFrame);
pendingFrame = null;
}
}
// The |renderThread| cleanup is not safe to cancel and we need to wait until it's done.
ThreadUtils.joinUninterruptibly(renderThread);
renderThread = null;
// Reset statistics and event reporting.
synchronized (layoutLock) {
frameWidth = 0;
frameHeight = 0;
frameRotation = 0;
rendererEvents = null;
}
resetStatistics();
}
示例13: getStreamMaxVolume
import org.webrtc.Logging; //导入方法依赖的package包/类
private int getStreamMaxVolume() {
Logging.d(TAG, "getStreamMaxVolume");
assertTrue(audioManager != null);
return audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL);
}
示例14: createMediaConstraintsInternal
import org.webrtc.Logging; //导入方法依赖的package包/类
private void createMediaConstraintsInternal() {
// Create peer connection constraints.
pcConstraints = new MediaConstraints();
// Enable DTLS for normal calls and disable for loopback calls.
if (peerConnectionParameters.loopback) {
pcConstraints.optional.add(
new MediaConstraints.KeyValuePair(DTLS_SRTP_KEY_AGREEMENT_CONSTRAINT, "false"));
} else {
pcConstraints.optional.add(
new MediaConstraints.KeyValuePair(DTLS_SRTP_KEY_AGREEMENT_CONSTRAINT, "true"));
}
// Check if there is a camera on device and disable video call if not.
if (videoCapturer == null) {
Log.w(TAG, "No camera on device. Switch to audio only call.");
videoCallEnabled = false;
}
// Create video constraints if video call is enabled.
if (videoCallEnabled) {
videoWidth = peerConnectionParameters.videoWidth;
videoHeight = peerConnectionParameters.videoHeight;
videoFps = peerConnectionParameters.videoFps;
// If video resolution is not specified, default to HD.
if (videoWidth == 0 || videoHeight == 0) {
videoWidth = HD_VIDEO_WIDTH;
videoHeight = HD_VIDEO_HEIGHT;
}
// If fps is not specified, default to 30.
if (videoFps == 0) {
videoFps = 30;
}
Logging.d(TAG, "Capturing format: " + videoWidth + "x" + videoHeight + "@" + videoFps);
}
// Create audio constraints.
audioConstraints = new MediaConstraints();
// added for audio performance measurements
if (peerConnectionParameters.noAudioProcessing) {
Log.d(TAG, "Disabling audio processing");
audioConstraints.mandatory.add(
new MediaConstraints.KeyValuePair(AUDIO_ECHO_CANCELLATION_CONSTRAINT, "false"));
audioConstraints.mandatory.add(
new MediaConstraints.KeyValuePair(AUDIO_AUTO_GAIN_CONTROL_CONSTRAINT, "false"));
audioConstraints.mandatory.add(
new MediaConstraints.KeyValuePair(AUDIO_HIGH_PASS_FILTER_CONSTRAINT, "false"));
audioConstraints.mandatory.add(
new MediaConstraints.KeyValuePair(AUDIO_NOISE_SUPPRESSION_CONSTRAINT, "false"));
}
if (peerConnectionParameters.enableLevelControl) {
Log.d(TAG, "Enabling level control.");
audioConstraints.mandatory.add(
new MediaConstraints.KeyValuePair(AUDIO_LEVEL_CONTROL_CONSTRAINT, "true"));
}
// Create SDP constraints.
sdpMediaConstraints = new MediaConstraints();
sdpMediaConstraints.mandatory.add(
new MediaConstraints.KeyValuePair("OfferToReceiveAudio", "true"));
if (videoCallEnabled || peerConnectionParameters.loopback) {
sdpMediaConstraints.mandatory.add(
new MediaConstraints.KeyValuePair("OfferToReceiveVideo", "true"));
} else {
sdpMediaConstraints.mandatory.add(
new MediaConstraints.KeyValuePair("OfferToReceiveVideo", "false"));
}
}
示例15: setErrorCallback
import org.webrtc.Logging; //导入方法依赖的package包/类
public static void setErrorCallback(WebRtcAudioRecordErrorCallback errorCallback) {
Logging.d(TAG, "Set error callback");
WebRtcAudioRecord.errorCallback = errorCallback;
}