本文整理汇总了Java中org.webrtc.EglBase.create方法的典型用法代码示例。如果您正苦于以下问题:Java EglBase.create方法的具体用法?Java EglBase.create怎么用?Java EglBase.create使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.webrtc.EglBase
的用法示例。
在下文中一共展示了EglBase.create方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: YuvConverter
import org.webrtc.EglBase; //导入方法依赖的package包/类
public YuvConverter (EglBase.Context sharedContext) {
eglBase = EglBase.create(sharedContext, EglBase.CONFIG_PIXEL_RGBA_BUFFER);
eglBase.createDummyPbufferSurface();
eglBase.makeCurrent();
shader = new GlShader(VERTEX_SHADER, FRAGMENT_SHADER);
shader.useProgram();
texMatrixLoc = shader.getUniformLocation("texMatrix");
xUnitLoc = shader.getUniformLocation("xUnit");
coeffsLoc = shader.getUniformLocation("coeffs");
GLES20.glUniform1i(shader.getUniformLocation("oesTex"), 0);
GlUtil.checkNoGLES2Error("Initialize fragment shader uniform values.");
// Initialize vertex shader attributes.
shader.setVertexAttribArray("in_pos", 2, DEVICE_RECTANGLE);
// If the width is not a multiple of 4 pixels, the texture
// will be scaled up slightly and clipped at the right border.
shader.setVertexAttribArray("in_tc", 2, TEXTURE_RECTANGLE);
eglBase.detachCurrent();
}
示例2: setUp
import org.webrtc.EglBase; //导入方法依赖的package包/类
@Before
public void setUp() {
signalingExecutor = Executors.newSingleThreadExecutor();
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN_MR1) {
eglBase = EglBase.create();
}
}
示例3: onStart
import org.webrtc.EglBase; //导入方法依赖的package包/类
@Override
protected void onStart() {
super.onStart();
Bundle extras = getIntent().getExtras();
this.username = extras.getString(Constants.USER_NAME, "");
Log.i(TAG, "username: " + username);
EglBase rootEglBase = EglBase.create();
masterView.init(rootEglBase.getEglBaseContext(), null);
masterView.setScalingType(RendererCommon.ScalingType.SCALE_ASPECT_FILL);
localView.init(rootEglBase.getEglBaseContext(), null);
localView.setScalingType(RendererCommon.ScalingType.SCALE_ASPECT_FILL);
NBMMediaConfiguration peerConnectionParameters = new NBMMediaConfiguration(
NBMMediaConfiguration.NBMRendererType.OPENGLES,
NBMMediaConfiguration.NBMAudioCodec.OPUS, 0,
NBMMediaConfiguration.NBMVideoCodec.VP8, 0,
new NBMMediaConfiguration.NBMVideoFormat(352, 288, PixelFormat.RGB_888, 20),
NBMMediaConfiguration.NBMCameraPosition.FRONT);
videoRequestUserMapping = new HashMap<>();
nbmWebRTCPeer = new NBMWebRTCPeer(peerConnectionParameters, this, localView, this);
nbmWebRTCPeer.registerMasterRenderer(masterView);
Log.i(TAG, "Initializing nbmWebRTCPeer...");
nbmWebRTCPeer.initialize();
callState = CallState.PUBLISHING;
mCallStatus.setText("Publishing...");
}
示例4: initVideos
import org.webrtc.EglBase; //导入方法依赖的package包/类
private void initVideos() {
EglBase rootEglBase = EglBase.create();
localVideoView.init(rootEglBase.getEglBaseContext(), null);
remoteVideoView.init(rootEglBase.getEglBaseContext(), null);
localVideoView.setZOrderMediaOverlay(true);
remoteVideoView.setZOrderMediaOverlay(true);
}
示例5: init
import org.webrtc.EglBase; //导入方法依赖的package包/类
private void init() {
mLocalVideoView = (SurfaceViewRenderer) findViewById(R.id.local_video_view);
// Init ExecutorService
mExecutorService = Executors.newSingleThreadExecutor();
// Socket.IO initialization
initSocket();
// Create video renderer
rootEglBase = EglBase.create();
Log.d(TAG, "Created video renderer.");
mLocalVideoView.init(rootEglBase.getEglBaseContext(), null);
mLocalVideoView.setScalingType(RendererCommon.ScalingType.SCALE_ASPECT_FILL);
mLocalVideoView.setEnableHardwareScaler(true);
// Set ProxyRenderer target to SurfaceViewRenderer
localProxyRenderer.setTarget(mLocalVideoView);
mLocalVideoView.setMirror(true);
// Check permission
/*for (String permission : MANDATORY_PERMISSIONS) {
if (checkCallingOrSelfPermission(permission) != PackageManager.PERMISSION_GRANTED) {
Log.w(TAG, "Permission " + permission + " is not granted.");
// finish();
return;
}
}*/
DisplayMetrics displayMetrics = getDisplayMetrics();
videoWidth = displayMetrics.widthPixels;
videoHeight = displayMetrics.heightPixels;
videoFps = 30;
initPeerConnectionFactory();
// Set STUN Server
mIceServers.add(new PeerConnection.IceServer(googleStunServer));
// Set default SessionDescription MediaConstraints
mSdpConstraints.mandatory.add(new MediaConstraints.KeyValuePair("OfferToReceiveAudio", "true"));
mSdpConstraints.mandatory.add(new MediaConstraints.KeyValuePair("OfferToReceiveVideo", "true"));
// Set default AudioConstraints
mAudioConstraints.mandatory.add(new MediaConstraints.KeyValuePair(AUDIO_ECHO_CANCELLATION_CONSTRAINT, "false"));
mAudioConstraints.mandatory.add(new MediaConstraints.KeyValuePair(AUDIO_AUTO_GAIN_CONTROL_CONSTRAINT, "false"));
mAudioConstraints.mandatory.add(new MediaConstraints.KeyValuePair(AUDIO_HIGH_PASS_FILTER_CONSTRAINT, "false"));
mAudioConstraints.mandatory.add(new MediaConstraints.KeyValuePair(AUDIO_NOISE_SUPPRESSION_CONSTRAINT, "false"));
// Enalble DTLS for normal calls
mPcConstraints.optional.add(new MediaConstraints.KeyValuePair("RtpDataChannels", "true"));
}
示例6: PeerConnectionClient
import org.webrtc.EglBase; //导入方法依赖的package包/类
public PeerConnectionClient() {
rootEglBase = EglBase.create();
}
示例7: init
import org.webrtc.EglBase; //导入方法依赖的package包/类
private void init(){
rootEglBase = EglBase.create();
svr_video.init(rootEglBase.getEglBaseContext(), null);
svr_video.setZOrderMediaOverlay(false);
}
示例8: onCreate
import org.webrtc.EglBase; //导入方法依赖的package包/类
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
//Initialize PeerConnectionFactory globals.
//Params are context, initAudio,initVideo and videoCodecHwAcceleration
PeerConnectionFactory.initializeAndroidGlobals(this, true, true, true);
//Create a new PeerConnectionFactory instance.
PeerConnectionFactory.Options options = new PeerConnectionFactory.Options();
PeerConnectionFactory peerConnectionFactory = new PeerConnectionFactory(options);
//Now create a VideoCapturer instance. Callback methods are there if you want to do something! Duh!
VideoCapturer videoCapturerAndroid = createVideoCapturer();
//Create MediaConstraints - Will be useful for specifying video and audio constraints. More on this later!
MediaConstraints constraints = new MediaConstraints();
//Create a VideoSource instance
VideoSource videoSource = peerConnectionFactory.createVideoSource(videoCapturerAndroid);
VideoTrack localVideoTrack = peerConnectionFactory.createVideoTrack("100", videoSource);
//create an AudioSource instance
AudioSource audioSource = peerConnectionFactory.createAudioSource(constraints);
AudioTrack localAudioTrack = peerConnectionFactory.createAudioTrack("101", audioSource);
//we will start capturing the video from the camera
//width,height and fps
videoCapturerAndroid.startCapture(1000, 1000, 30);
//create surface renderer, init it and add the renderer to the track
SurfaceViewRenderer videoView = (SurfaceViewRenderer) findViewById(R.id.surface_rendeer);
videoView.setMirror(true);
EglBase rootEglBase = EglBase.create();
videoView.init(rootEglBase.getEglBaseContext(), null);
localVideoTrack.addRenderer(new VideoRenderer(videoView));
}
示例9: onCreate
import org.webrtc.EglBase; //导入方法依赖的package包/类
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
mLocalLayout = (PercentFrameLayout) findViewById(R.id.local_view_layout);
mRemoteLayout = (PercentFrameLayout) findViewById(R.id.remote_video_layout);
mLocalRender = (MySurfaceViewRenderer) findViewById(R.id.local_video_view);
mLocalRender.setType(MySurfaceViewRenderer.TYPE_LOCAL);
mRemoteRender = (MySurfaceViewRenderer) findViewById(R.id.remote_video_view);
mRemoteRender.setType(MySurfaceViewRenderer.TYPE_REMOTE);
EglBase eglBase = EglBase.create();
mRemoteRender.init(eglBase.getEglBaseContext(), null);
mLocalRender.init(eglBase.getEglBaseContext(), null);
mLocalRender.setZOrderMediaOverlay(true);
WebRtcAudioTrack.setAudioTrackModuleFactory(null);
Intent intent = getIntent();
if (intent != null) {
long prevTimeStamp = ((WebRTCApplication) getApplication()).getCallTimeStamp();
long callTimeStamp = intent.getLongExtra(EXTRA_CALL_TIMESTAMP, 0);
if (prevTimeStamp == callTimeStamp) {
createWebRTCErrorDialog();
return;
}
((WebRTCApplication) getApplication()).setCallTimeStamp(callTimeStamp);
PeerConfig config = intent.getParcelableExtra(EXTRA_CONFIG);
String videoUri = intent.getStringExtra(EXTRA_VIDEO_URI);
String audioUri = intent.getStringExtra(EXTRA_AUDIO_URI);
String addressId = intent.getStringExtra(EXTRA_ADDRESS_ID);
boolean offer = intent.getBooleanExtra(EXTRA_OFFER, false);
String audioSampleRate = intent.getStringExtra(EXTRA_AUDIOSAMPLERATE);
int audioSampleRateValue;
if (audioSampleRate == null) {
audioSampleRateValue = WebRTCVideoChatProfile.PARAM_RATE_48000;
} else {
audioSampleRateValue = Integer.valueOf(audioSampleRate);
}
String audioBitDepth = intent.getStringExtra(EXTRA_AUDIOBITDEPTH);
String audioChannel = intent.getStringExtra(EXTRA_AUDIOCHANNEL);
WebRTCController.Builder builder = new WebRTCController.Builder();
builder.setApplication((WebRTCApplication) getApplication());
builder.setWebRTCEventListener(mListener);
builder.setContext(this);
builder.setEglBase(eglBase);
builder.setConfig(config);
builder.setRemoteRender(mRemoteRender);
builder.setLocalRender(mLocalRender);
builder.setVideoUri(videoUri);
builder.setAudioUri(audioUri);
builder.setAddressId(addressId);
builder.setOffer(offer);
builder.setAudioSampleRate(audioSampleRateValue);
builder.setAudioBitDepth(audioBitDepth);
builder.setAudioChannel(audioChannel);
builder.setLandscape(isLandscape());
mWebRTCController = builder.create();
updateVideoView(videoUri);
} else {
openWebRTCErrorDialog();
}
}