本文整理汇总了Java中org.webrtc.Logging类的典型用法代码示例。如果您正苦于以下问题:Java Logging类的具体用法?Java Logging怎么用?Java Logging使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Logging类属于org.webrtc包,在下文中一共展示了Logging类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: updateFrameDimensionsAndReportEvents
import org.webrtc.Logging; //导入依赖的package包/类
private void updateFrameDimensionsAndReportEvents(VideoRenderer.I420Frame frame) {
synchronized (layoutLock) {
if (frameWidth != frame.width || frameHeight != frame.height
|| frameRotation != frame.rotationDegree) {
Logging.d(TAG, getResourceName() + "Reporting frame resolution changed to "
+ frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
if (rendererEvents != null) {
rendererEvents.onFrameResolutionChanged(frame.width, frame.height, frame.rotationDegree);
}
frameWidth = frame.width;
frameHeight = frame.height;
frameRotation = frame.rotationDegree;
post(new Runnable() {
@Override public void run() {
requestLayout();
}
});
}
}
}
示例2: stopPlayout
import org.webrtc.Logging; //导入依赖的package包/类
private boolean stopPlayout() {
Logging.d(TAG, "stopPlayout");
assertTrue(audioThread != null);
logUnderrunCount();
audioThread.stopThread();
final Thread aThread = audioThread;
audioThread = null;
if (aThread != null) {
Logging.d(TAG, "Stopping the AudioTrackThread...");
aThread.interrupt();
if (!ThreadUtils.joinUninterruptibly(aThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
Logging.e(TAG, "Join of AudioTrackThread timed out.");
}
Logging.d(TAG, "AudioTrackThread has now been stopped.");
}
releaseAudioResources();
return true;
}
示例3: getNativeOutputSampleRate
import org.webrtc.Logging; //导入依赖的package包/类
private int getNativeOutputSampleRate() {
// Override this if we're running on an old emulator image which only
// supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
if (WebRtcAudioUtils.runningOnEmulator()) {
Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
return 8000;
}
// Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
// If so, use that value and return here.
if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
Logging.d(TAG, "Default sample rate is overriden to "
+ WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
return WebRtcAudioUtils.getDefaultSampleRateHz();
}
// No overrides available. Deliver best possible estimate based on default
// Android AudioManager APIs.
final int sampleRateHz;
if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
} else {
sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
}
Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
return sampleRateHz;
}
示例4: startRecording
import org.webrtc.Logging; //导入依赖的package包/类
private boolean startRecording() {
Logging.d(TAG, "startRecording");
assertTrue(audioRecord != null);
assertTrue(audioThread == null);
try {
audioRecord.startRecording();
} catch (IllegalStateException e) {
reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed: " + e.getMessage());
return false;
}
if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed - incorrect state :"
+ audioRecord.getRecordingState());
return false;
}
audioThread = new AudioRecordThread("AudioRecordJavaThread");
audioThread.start();
return true;
}
示例5: surfaceCreated
import org.webrtc.Logging; //导入依赖的package包/类
@Override
public void surfaceCreated(final SurfaceHolder holder) {
Logging.d(TAG, getResourceName() + "Surface created.");
synchronized (layoutLock) {
isSurfaceCreated = true;
}
tryCreateEglSurface();
}
示例6: surfaceChanged
import org.webrtc.Logging; //导入依赖的package包/类
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
Logging.d(TAG, getResourceName() + "Surface changed: " + width + "x" + height);
synchronized (layoutLock) {
surfaceSize.x = width;
surfaceSize.y = height;
}
// Might have a pending frame waiting for a surface of correct size.
runOnRenderThread(renderFrameRunnable);
}
示例7: renderFrame
import org.webrtc.Logging; //导入依赖的package包/类
@Override
synchronized public void renderFrame(VideoRenderer.I420Frame frame) {
if (target == null) {
Logging.d(TAG, "Dropping frame in proxy because target is null.");
VideoRenderer.renderFrameDone(frame);
return;
}
target.renderFrame(frame);
}
示例8: onFrame
import org.webrtc.Logging; //导入依赖的package包/类
@Override
synchronized public void onFrame(VideoFrame frame) {
if (target == null) {
Logging.d(TAG, "Dropping frame in proxy because target is null.");
return;
}
target.onFrame(frame);
}
示例9: setSwappedFeeds
import org.webrtc.Logging; //导入依赖的package包/类
private void setSwappedFeeds(boolean isSwappedFeeds) {
Logging.d(TAG, "setSwappedFeeds: " + isSwappedFeeds);
this.isSwappedFeeds = isSwappedFeeds;
localProxyVideoSink.setTarget(isSwappedFeeds ? fullscreenRenderer : pipRenderer);
remoteProxyRenderer.setTarget(isSwappedFeeds ? pipRenderer : fullscreenRenderer);
fullscreenRenderer.setMirror(isSwappedFeeds);
pipRenderer.setMirror(!isSwappedFeeds);
}
示例10: WebRtcAudioTrack
import org.webrtc.Logging; //导入依赖的package包/类
WebRtcAudioTrack(long nativeAudioTrack) {
Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
this.nativeAudioTrack = nativeAudioTrack;
audioManager =
(AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
if (DEBUG) {
WebRtcAudioUtils.logDeviceInfo(TAG);
}
}
示例11: logMainParametersExtended
import org.webrtc.Logging; //导入依赖的package包/类
@TargetApi(23)
private void logMainParametersExtended() {
if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
Logging.d(TAG, "AudioRecord: "
// The frame count of the native AudioRecord buffer.
+ "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
}
}
示例12: stop
import org.webrtc.Logging; //导入依赖的package包/类
public void stop() {
if (mVideoCapturer != null && !mVideoCapturerStopped) {
Logging.d(TAG, "Stop video source.");
try {
mVideoCapturer.stopCapture();
} catch (InterruptedException e) {
Logging.e(TAG, "stop", e);
}
mVideoCapturerStopped = true;
}
}
示例13: logMainParameters
import org.webrtc.Logging; //导入依赖的package包/类
private void logMainParameters() {
Logging.d(TAG, "AudioTrack: "
+ "session ID: " + audioTrack.getAudioSessionId() + ", "
+ "channels: " + audioTrack.getChannelCount() + ", "
+ "sample rate: " + audioTrack.getSampleRate() + ", "
// Gain (>=1.0) expressed as linear multiplier on sample values.
+ "max gain: " + audioTrack.getMaxVolume());
}
示例14: createAudioTrackOnLollipopOrHigher
import org.webrtc.Logging; //导入依赖的package包/类
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
// TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
// performance when Android O is supported. Add some logging in the mean time.
final int nativeOutputSampleRate =
AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
if (sampleRateInHz != nativeOutputSampleRate) {
Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
}
if (usageAttribute != DEFAULT_USAGE) {
Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
}
// Create an audio track where the audio usage is for VoIP and the content type is speech.
return new AudioTrack(
new AudioAttributes.Builder()
.setUsage(usageAttribute)
.setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
.build(),
new AudioFormat.Builder()
.setEncoding(AudioFormat.ENCODING_PCM_16BIT)
.setSampleRate(sampleRateInHz)
.setChannelMask(channelConfig)
.build(),
bufferSizeInBytes,
AudioTrack.MODE_STREAM,
AudioManager.AUDIO_SESSION_ID_GENERATE);
}
示例15: releaseAudioResources
import org.webrtc.Logging; //导入依赖的package包/类
private void releaseAudioResources() {
Logging.d(TAG, "releaseAudioResources");
if (audioTrack != null) {
audioTrack.release();
audioTrack = null;
}
}