本文整理汇总了Java中io.netty.channel.ChannelFuture.get方法的典型用法代码示例。如果您正苦于以下问题:Java ChannelFuture.get方法的具体用法?Java ChannelFuture.get怎么用?Java ChannelFuture.get使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.netty.channel.ChannelFuture
的用法示例。
在下文中一共展示了ChannelFuture.get方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: handleOperationComplete
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
protected synchronized void handleOperationComplete ( final SettableFuture<Void> result, final ChannelFuture future )
{
if ( this.connectFuture != result )
{
// this should never happen
return;
}
this.connectFuture = null;
try
{
future.get ();
this.channel = future.channel ();
fireConnected ( this.channel );
result.set ( null );
}
catch ( final InterruptedException | ExecutionException e )
{
fireDisconnected ( e );
result.setException ( e );
}
}
示例2: testCloseConnection
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Test
public void testCloseConnection() throws Exception {
// Validate that when a stub dictates to close a connection it does so and does not close the
// NodeSpec's channel so it can remain accepting traffic.
NodeSpec node = NodeSpec.builder().build();
BoundNode boundNode = localServer.register(node);
stubCloseOnStartup(Scope.CONNECTION);
try (MockClient client = new MockClient(eventLoop)) {
client.connect(boundNode.getAddress());
// Sending a write should cause the connection to close.
ChannelFuture f = client.write(new Startup());
// Future should be successful since write was successful.
f.get(5, TimeUnit.SECONDS);
// Next write should fail because the channel was closed.
f = client.write(Options.INSTANCE);
try {
f.get();
} catch (ExecutionException e) {
assertThat(e.getCause()).isInstanceOf(ClosedChannelException.class);
} finally {
assertThat(client.channel.isOpen()).isFalse();
// node should still accept connections.
assertThat(boundNode.channel.get().isOpen()).isTrue();
}
}
}
示例3: testCloseNode
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Test
public void testCloseNode() throws Exception {
// Validates that a stub that dictates to close a node's connections does so.
ClusterSpec cluster = ClusterSpec.builder().withNodes(2, 2).build();
BoundCluster boundCluster = localServer.register(cluster);
BoundDataCenter dc0 = boundCluster.getDataCenters().iterator().next();
Iterator<BoundNode> nodes = dc0.getNodes().iterator();
BoundNode boundNode = nodes.next();
stubCloseOnStartup(Scope.NODE);
Map<BoundNode, MockClient> nodeToClients = new HashMap<>();
MockClient client = null;
try {
// Create a connection to each node.
for (BoundNode node : boundCluster.getNodes()) {
MockClient client0 = new MockClient(eventLoop);
client0.connect(node.getAddress());
nodeToClients.put(node, client0);
}
client = new MockClient(eventLoop);
client.connect(boundNode.getAddress());
// Sending a write should cause the connection to close.
ChannelFuture f = client.write(new Startup());
// Future should be successful since write was successful.
f.get(5, TimeUnit.SECONDS);
// Next write should fail because the channel was closed.
f = client.write(Options.INSTANCE);
try {
f.get();
} catch (ExecutionException e) {
assertThat(e.getCause()).isInstanceOf(ClosedChannelException.class);
}
} finally {
if (client != null) {
// client that sent request should close.
assertThat(client.channel.isOpen()).isFalse();
}
// All clients should remain open except the ones to the node that received the request.
nodeToClients
.entrySet()
.stream()
.filter(e -> e.getKey() != boundNode)
.forEach(e -> assertThat(e.getValue().channel.isOpen()).isTrue());
nodeToClients
.entrySet()
.stream()
.filter(e -> e.getKey() == boundNode)
.forEach(e -> assertThat(e.getValue().channel.isOpen()).isFalse());
}
}
示例4: testCloseDataCenter
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Test
public void testCloseDataCenter() throws Exception {
// Validates that a stub that dictates to close a node's DC's connections does so.
ClusterSpec cluster = ClusterSpec.builder().withNodes(2, 2).build();
BoundCluster boundCluster = localServer.register(cluster);
BoundDataCenter dc0 = boundCluster.getDataCenters().iterator().next();
Iterator<BoundNode> nodes = dc0.getNodes().iterator();
BoundNode boundNode = nodes.next();
stubCloseOnStartup(Scope.DATA_CENTER);
Map<BoundNode, MockClient> nodeToClients = new HashMap<>();
MockClient client = null;
try {
// Create a connection to each node.
for (BoundNode node : boundCluster.getNodes()) {
MockClient client0 = new MockClient(eventLoop);
client0.connect(node.getAddress());
nodeToClients.put(node, client0);
}
client = new MockClient(eventLoop);
client.connect(boundNode.getAddress());
// Sending a write should cause the connection to close.
ChannelFuture f = client.write(new Startup());
// Future should be successful since write was successful.
f.get(5, TimeUnit.SECONDS);
// Next write should fail because the channel was closed.
f = client.write(Options.INSTANCE);
try {
f.get();
} catch (ExecutionException e) {
assertThat(e.getCause()).isInstanceOf(ClosedChannelException.class);
}
} finally {
if (client != null) {
// client that sent request should close.
assertThat(client.channel.isOpen()).isFalse();
}
// Clients connecting to a different DC should remain open.
nodeToClients
.entrySet()
.stream()
.filter(e -> e.getKey().getDataCenter() != boundNode.getDataCenter())
.forEach(e -> assertThat(e.getValue().channel.isOpen()).isTrue());
// Clients connecting to same DC should close.
nodeToClients
.entrySet()
.stream()
.filter(e -> e.getKey().getDataCenter() == boundNode.getDataCenter())
.forEach(e -> assertThat(e.getValue().channel.isOpen()).isFalse());
}
}
示例5: testCloseCluster
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Test
public void testCloseCluster() throws Exception {
// Validates that a stub that dictates to close a node's ClusterSpec's connections does so.
ClusterSpec cluster = ClusterSpec.builder().withNodes(2, 2).build();
BoundCluster boundCluster = localServer.register(cluster);
BoundDataCenter dc0 = boundCluster.getDataCenters().iterator().next();
Iterator<BoundNode> nodes = dc0.getNodes().iterator();
BoundNode boundNode = nodes.next();
stubCloseOnStartup(Scope.CLUSTER);
Map<BoundNode, MockClient> nodeToClients = new HashMap<>();
MockClient client = null;
try {
// Create a connection to each node.
for (BoundNode node : boundCluster.getNodes()) {
MockClient client0 = new MockClient(eventLoop);
client0.connect(node.getAddress());
nodeToClients.put(node, client0);
}
client = new MockClient(eventLoop);
client.connect(boundNode.getAddress());
// Sending a write should cause the connection to close.
ChannelFuture f = client.write(new Startup());
// Future should be successful since write was successful.
f.get(5, TimeUnit.SECONDS);
// Next write should fail because the channel was closed.
f = client.write(Options.INSTANCE);
try {
f.get();
} catch (ExecutionException e) {
assertThat(e.getCause()).isInstanceOf(ClosedChannelException.class);
}
} finally {
if (client != null) {
// client that sent request should close.
assertThat(client.channel.isOpen()).isFalse();
}
// All clients should close
nodeToClients.entrySet().forEach(e -> assertThat(e.getValue().channel.isOpen()).isFalse());
}
}
示例6: operationComplete
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Override
public void operationComplete(ChannelFuture future) throws Exception {
boolean isInterrupted = false;
// We want to wait for at least 120 secs when interrupts occur. Establishing a connection fails/succeeds quickly,
// So there is no point propagating the interruption as failure immediately.
long remainingWaitTimeMills = 120000;
long startTime = System.currentTimeMillis();
// logger.debug("Connection operation finished. Success: {}", future.isSuccess());
while(true) {
try {
future.get(remainingWaitTimeMills, TimeUnit.MILLISECONDS);
if (future.isSuccess()) {
SocketAddress remote = future.channel().remoteAddress();
SocketAddress local = future.channel().localAddress();
setAddresses(remote, local);
// send a handshake on the current thread. This is the only time we will send from within the event thread.
// We can do this because the connection will not be backed up.
send(handshakeSendHandler, connection, handshakeType, handshakeValue, responseClass, true);
} else {
l.connectionFailed(FailureType.CONNECTION, new RpcException("General connection failure."));
}
// logger.debug("Handshake queued for send.");
break;
} catch (final InterruptedException interruptEx) {
remainingWaitTimeMills -= (System.currentTimeMillis() - startTime);
startTime = System.currentTimeMillis();
isInterrupted = true;
if (remainingWaitTimeMills < 1) {
l.connectionFailed(FailureType.CONNECTION, interruptEx);
break;
}
// Ignore the interrupt and continue to wait until we elapse remainingWaitTimeMills.
} catch (final Exception ex) {
logger.error("Failed to establish connection", ex);
l.connectionFailed(FailureType.CONNECTION, ex);
break;
}
}
if (isInterrupted) {
// Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the
// interruption and respond to it if it wants to.
Thread.currentThread().interrupt();
}
}
示例7: operationComplete
import io.netty.channel.ChannelFuture; //导入方法依赖的package包/类
@Override
public void operationComplete(ChannelFuture future) throws Exception {
boolean isInterrupted = false;
// We want to wait for at least 120 secs when interrupts occur. Establishing a connection fails/succeeds quickly,
// So there is no point propagating the interruption as failure immediately.
long remainingWaitTimeMills = 120000;
long startTime = System.currentTimeMillis();
// logger.debug("Connection operation finished. Success: {}", future.isSuccess());
while(true) {
try {
future.get(remainingWaitTimeMills, TimeUnit.MILLISECONDS);
if (future.isSuccess()) {
// send a handshake on the current thread. This is the only time we will send from within the event thread.
// We can do this because the connection will not be backed up.
send(handshakeSendHandler, connection, handshakeType, handshakeValue, responseClass, true);
} else {
l.connectionFailed(FailureType.CONNECTION, new RpcException("General connection failure."));
}
// logger.debug("Handshake queued for send.");
break;
} catch (final InterruptedException interruptEx) {
remainingWaitTimeMills -= (System.currentTimeMillis() - startTime);
startTime = System.currentTimeMillis();
isInterrupted = true;
if (remainingWaitTimeMills < 1) {
l.connectionFailed(FailureType.CONNECTION, interruptEx);
break;
}
// Ignore the interrupt and continue to wait until we elapse remainingWaitTimeMills.
} catch (final Exception ex) {
logger.error("Failed to establish connection", ex);
l.connectionFailed(FailureType.CONNECTION, ex);
break;
}
}
if (isInterrupted) {
// Preserve evidence that the interruption occurred so that code higher up on the call stack can learn of the
// interruption and respond to it if it wants to.
Thread.currentThread().interrupt();
}
}