本文整理匯總了Java中io.grpc.ManagedChannel.newCall方法的典型用法代碼示例。如果您正苦於以下問題:Java ManagedChannel.newCall方法的具體用法?Java ManagedChannel.newCall怎麽用?Java ManagedChannel.newCall使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類io.grpc.ManagedChannel
的用法示例。
在下文中一共展示了ManagedChannel.newCall方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: startStreamingCalls
import io.grpc.ManagedChannel; //導入方法依賴的package包/類
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startStreamingCalls(int callsPerChannel, final AtomicLong counter,
final AtomicBoolean record, final AtomicBoolean done, final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(pingPongMethod, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<StreamObserver<ByteBuf>>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
requestObserverRef.get().onCompleted();
}
return;
}
requestObserverRef.get().onNext(request.slice());
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
requestObserver.onNext(request.slice());
requestObserver.onNext(request.slice());
}
}
return latch;
}
示例2: startFlowControlledStreamingCalls
import io.grpc.ManagedChannel; //導入方法依賴的package包/類
/**
* Start a continuously executing set of duplex streaming ping-pong calls that will terminate when
* {@code done.get()} is true. Each completed call will increment the counter by the specified
* delta which benchmarks can use to measure messages per second or bandwidth.
*/
protected CountDownLatch startFlowControlledStreamingCalls(int callsPerChannel,
final AtomicLong counter, final AtomicBoolean record, final AtomicBoolean done,
final long counterDelta) {
final CountDownLatch latch = new CountDownLatch(callsPerChannel * channels.length);
for (final ManagedChannel channel : channels) {
for (int i = 0; i < callsPerChannel; i++) {
final ClientCall<ByteBuf, ByteBuf> streamingCall =
channel.newCall(flowControlledStreaming, CALL_OPTIONS);
final AtomicReference<StreamObserver<ByteBuf>> requestObserverRef =
new AtomicReference<StreamObserver<ByteBuf>>();
final AtomicBoolean ignoreMessages = new AtomicBoolean();
StreamObserver<ByteBuf> requestObserver = ClientCalls.asyncBidiStreamingCall(
streamingCall,
new StreamObserver<ByteBuf>() {
@Override
public void onNext(ByteBuf value) {
StreamObserver<ByteBuf> obs = requestObserverRef.get();
if (done.get()) {
if (!ignoreMessages.getAndSet(true)) {
obs.onCompleted();
}
return;
}
if (record.get()) {
counter.addAndGet(counterDelta);
}
// request is called automatically because the observer implicitly has auto
// inbound flow control
}
@Override
public void onError(Throwable t) {
logger.log(Level.WARNING, "call error", t);
latch.countDown();
}
@Override
public void onCompleted() {
latch.countDown();
}
});
requestObserverRef.set(requestObserver);
// Add some outstanding requests to ensure the server is filling the connection
streamingCall.request(5);
requestObserver.onNext(request.slice());
}
}
return latch;
}
示例3: oobTransportDoesNotAffectIdleness
import io.grpc.ManagedChannel; //導入方法依賴的package包/類
@Test
public void oobTransportDoesNotAffectIdleness() {
// Start a call, which goes to delayed transport
ClientCall<String, Integer> call = channel.newCall(method, CallOptions.DEFAULT);
call.start(mockCallListener, new Metadata());
// Verify that we have exited the idle mode
ArgumentCaptor<Helper> helperCaptor = ArgumentCaptor.forClass(null);
verify(mockLoadBalancerFactory).newLoadBalancer(helperCaptor.capture());
Helper helper = helperCaptor.getValue();
// Fail the RPC
SubchannelPicker failingPicker = mock(SubchannelPicker.class);
when(failingPicker.pickSubchannel(any(PickSubchannelArgs.class)))
.thenReturn(PickResult.withError(Status.UNAVAILABLE));
helper.updateBalancingState(TRANSIENT_FAILURE, failingPicker);
executor.runDueTasks();
verify(mockCallListener).onClose(same(Status.UNAVAILABLE), any(Metadata.class));
// ... so that the channel resets its in-use state
assertFalse(channel.inUseStateAggregator.isInUse());
// Now make an RPC on an OOB channel
ManagedChannel oob = helper.createOobChannel(servers.get(0), "oobauthority");
verify(mockTransportFactory, never())
.newClientTransport(any(SocketAddress.class), same("oobauthority"), same(USER_AGENT),
same(NO_PROXY));
ClientCall<String, Integer> oobCall = oob.newCall(method, CallOptions.DEFAULT);
oobCall.start(mockCallListener2, new Metadata());
verify(mockTransportFactory)
.newClientTransport(any(SocketAddress.class), same("oobauthority"), same(USER_AGENT),
same(NO_PROXY));
MockClientTransportInfo oobTransportInfo = newTransports.poll();
assertEquals(0, newTransports.size());
// The OOB transport reports in-use state
oobTransportInfo.listener.transportInUse(true);
// But it won't stop the channel from going idle
verify(mockLoadBalancer, never()).shutdown();
timer.forwardTime(IDLE_TIMEOUT_SECONDS, TimeUnit.SECONDS);
verify(mockLoadBalancer).shutdown();
}
示例4: inprocessTransportManualFlow
import io.grpc.ManagedChannel; //導入方法依賴的package包/類
@Test
public void inprocessTransportManualFlow() throws Exception {
final Semaphore semaphore = new Semaphore(1);
ServerServiceDefinition service = ServerServiceDefinition.builder(
new ServiceDescriptor("some", STREAMING_METHOD))
.addMethod(STREAMING_METHOD, ServerCalls.asyncBidiStreamingCall(
new ServerCalls.BidiStreamingMethod<Integer, Integer>() {
int iteration;
@Override
public StreamObserver<Integer> invoke(StreamObserver<Integer> responseObserver) {
final ServerCallStreamObserver<Integer> serverCallObserver =
(ServerCallStreamObserver<Integer>) responseObserver;
serverCallObserver.setOnReadyHandler(new Runnable() {
@Override
public void run() {
while (serverCallObserver.isReady()) {
serverCallObserver.onNext(iteration);
}
iteration++;
semaphore.release();
}
});
return new ServerCalls.NoopStreamObserver<Integer>() {
@Override
public void onCompleted() {
serverCallObserver.onCompleted();
}
};
}
}))
.build();
long tag = System.nanoTime();
InProcessServerBuilder.forName("go-with-the-flow" + tag).addService(service).build().start();
ManagedChannel channel = InProcessChannelBuilder.forName("go-with-the-flow" + tag).build();
final ClientCall<Integer, Integer> clientCall = channel.newCall(STREAMING_METHOD,
CallOptions.DEFAULT);
final CountDownLatch latch = new CountDownLatch(1);
final int[] receivedMessages = new int[6];
clientCall.start(new ClientCall.Listener<Integer>() {
int index;
@Override
public void onMessage(Integer message) {
receivedMessages[index++] = message;
}
@Override
public void onClose(Status status, Metadata trailers) {
latch.countDown();
}
}, new Metadata());
semaphore.acquire();
clientCall.request(1);
semaphore.acquire();
clientCall.request(2);
semaphore.acquire();
clientCall.request(3);
clientCall.halfClose();
latch.await(5, TimeUnit.SECONDS);
// Very that number of messages produced in each onReady handler call matches the number
// requested by the client.
assertArrayEquals(new int[]{0, 1, 1, 2, 2, 2}, receivedMessages);
}