本文整理汇总了Java中io.netty.channel.ChannelOption类的典型用法代码示例。如果您正苦于以下问题:Java ChannelOption类的具体用法?Java ChannelOption怎么用?Java ChannelOption使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
ChannelOption类属于io.netty.channel包,在下文中一共展示了ChannelOption类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: connect
import io.netty.channel.ChannelOption; //导入依赖的package包/类
@Override
public void connect(final InetSocketAddress socketAddress) {
workerGroup = new NioEventLoopGroup(workerGroupThreads);
Bootstrap bootstrap = new Bootstrap();
try {
bootstrap
.group(workerGroup)
.channel(NioSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.TCP_NODELAY, true)
.handler(clientChannelInitializer);
} catch (final Exception ex) {
throw new ClientException(ex);
}
channel = bootstrap.connect(socketAddress.getAddress().getHostAddress(), socketAddress.getPort()).syncUninterruptibly().channel();
}
示例2: initChannel
import io.netty.channel.ChannelOption; //导入依赖的package包/类
@Override
protected void initChannel(SocketChannel ch) throws Exception {
try {
ch.config().setOption(ChannelOption.TCP_NODELAY, true);
ch.config().setOption(ChannelOption.IP_TOS, 0x18);
} catch (ChannelException ex) {
// IP_TOS not supported by platform, ignore
}
ch.config().setAllocator(PooledByteBufAllocator.DEFAULT);
PacketRegistry r = new PacketRegistry();
ch.pipeline().addLast("idleStateHandler", new IdleStateHandler(0, 0, 30));
ch.pipeline().addLast(new HttpServerCodec());
ch.pipeline().addLast(new HttpObjectAggregator(65536));
ch.pipeline().addLast(new WebSocketHandler());
ch.pipeline().addLast(new PacketDecoder(r));
ch.pipeline().addLast(new PacketEncoder(r));
ch.pipeline().addLast(mExecutorGroup, "serverHandler", new ClientHandler(mServer));
}
示例3: newPool
import io.netty.channel.ChannelOption; //导入依赖的package包/类
@Override
protected FastdfsPool newPool(InetSocketAddress addr) {
if (LOG.isDebugEnabled()) {
LOG.debug("channel pool created : {}", addr);
}
Bootstrap bootstrap = new Bootstrap().channel(NioSocketChannel.class).group(loopGroup);
bootstrap.remoteAddress(addr);
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, (int) connectTimeout);
bootstrap.option(ChannelOption.TCP_NODELAY, true);
bootstrap.option(ChannelOption.SO_KEEPALIVE, true);
return new FastdfsPool(
bootstrap,
readTimeout,
idleTimeout,
maxConnPerHost
);
}
示例4: start
import io.netty.channel.ChannelOption; //导入依赖的package包/类
public void start() {
bootstrap.group(group).channel(NioSocketChannel.class);
bootstrap.option(ChannelOption.TCP_NODELAY, true);
bootstrap.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
// ch.pipeline().addLast(new IdleStateHandler(1, 1, 5));
ch.pipeline().addLast(new KyroMsgDecoder());
ch.pipeline().addLast(new KyroMsgEncoder());
ch.pipeline().addLast(new ClientHandler());
}
});
new ScheduledThreadPoolExecutor(1).scheduleAtFixedRate(new Runnable() {
@Override
public void run() {
scanResponseTable(3000);
}
}, 1000, 1000, TimeUnit.MILLISECONDS);
}
示例5: main
import io.netty.channel.ChannelOption; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_REUSEADDR, true)
.childHandler(new ChannelInitializer<NioSocketChannel>() {
@Override
protected void initChannel(NioSocketChannel ch) throws Exception {
ch.pipeline().addLast(new SimpleServerHandler());
}
});
b.bind(8090).sync().channel().closeFuture().sync();
}
示例6: AbstractNettyServer
import io.netty.channel.ChannelOption; //导入依赖的package包/类
protected AbstractNettyServer(String serverName) {
this.serverName = Objects.requireNonNull(serverName, "server name");
bootstrap = new ServerBootstrap();
if (Epoll.isAvailable()) {
bootstrap.option(ChannelOption.SO_BACKLOG, 1024).channel(EpollServerSocketChannel.class)
.childOption(ChannelOption.SO_LINGER, 0).childOption(ChannelOption.SO_REUSEADDR, true)
.childOption(ChannelOption.SO_KEEPALIVE, true);
log.info(serverName + " epoll init");
} else {
bootstrap.channel(NioServerSocketChannel.class);
log.info(serverName + " nio init");
}
bootstrap.group(bossGroup, workerGroup).option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.TCP_NODELAY, true).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
initPipeline(ch.pipeline());
}
});
}
示例7: startClients
import io.netty.channel.ChannelOption; //导入依赖的package包/类
/**
* Connect to remote servers. We'll initiate the connection to
* any nodes with a lower ID so that there will be a single connection
* between each pair of nodes which we'll use symmetrically
*/
protected void startClients(RPCChannelInitializer channelInitializer) {
final Bootstrap bootstrap = new Bootstrap();
bootstrap.group(workerGroup)
.channel(NioSocketChannel.class)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_SNDBUF, SEND_BUFFER_SIZE)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECT_TIMEOUT)
.handler(channelInitializer);
clientBootstrap = bootstrap;
ScheduledExecutorService ses =
syncManager.getThreadPool().getScheduledExecutor();
reconnectTask = new SingletonTask(ses, new ConnectTask());
reconnectTask.reschedule(0, TimeUnit.SECONDS);
}
示例8: connectAsync
import io.netty.channel.ChannelOption; //导入依赖的package包/类
public ChannelFuture connectAsync(String host, int port, String remoteId, boolean discoveryMode) {
ethereumListener.trace("Connecting to: " + host + ":" + port);
EthereumChannelInitializer ethereumChannelInitializer = ctx.getBean(EthereumChannelInitializer.class, remoteId);
ethereumChannelInitializer.setPeerDiscoveryMode(discoveryMode);
Bootstrap b = new Bootstrap();
b.group(workerGroup);
b.channel(NioSocketChannel.class);
b.option(ChannelOption.SO_KEEPALIVE, true);
b.option(ChannelOption.MESSAGE_SIZE_ESTIMATOR, DefaultMessageSizeEstimator.DEFAULT);
b.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, config.peerConnectionTimeout());
b.remoteAddress(host, port);
b.handler(ethereumChannelInitializer);
// Start the client.
return b.connect();
}
示例9: bootstrap
import io.netty.channel.ChannelOption; //导入依赖的package包/类
private final Future<Void> bootstrap(final NettyChannel channel) {
final Promise<Void> p = Promise.apply();
new Bootstrap().group(eventLoopGroup).channel(NioSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.AUTO_READ, false)
.handler(new ChannelInitializer<io.netty.channel.Channel>() {
@Override
protected void initChannel(final io.netty.channel.Channel ch) throws Exception {
ch.pipeline().addLast(new MessageDecoder(), new MessageEncoder(),
new FlowControlHandler(), channel);
}
})
.connect(new InetSocketAddress(host, port))
.addListener(future -> p.become(Future.VOID));
return p;
}
示例10: start
import io.netty.channel.ChannelOption; //导入依赖的package包/类
/**
* 启动服务
*
* @throws Exception 异常
*/
public void start() throws Exception {
try {
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(channelInitializer)
.option(ChannelOption.SO_BACKLOG, aceServerConfig.getBackSize())
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
ChannelFuture future = bootstrap.bind(aceServerConfig.getPort()).sync();
System.out.println("ace server starter on port : " + aceServerConfig.getPort());
future.channel().closeFuture().sync();
} finally {
close();
}
}
示例11: run
import io.netty.channel.ChannelOption; //导入依赖的package包/类
@Async("myTaskAsyncPool")
public void run(int udpReceivePort) {
EventLoopGroup group = new NioEventLoopGroup();
logger.info("Server start! Udp Receive msg Port:" + udpReceivePort);
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioDatagramChannel.class)
.option(ChannelOption.SO_BROADCAST, true)
.handler(new UdpServerHandler()); // 设置服务端接收消息的 Handler (保存消息到 mysql 和 redis 中)
b.bind(udpReceivePort).sync().channel().closeFuture().await();
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
group.shutdownGracefully();
}
}
示例12: Receiver
import io.netty.channel.ChannelOption; //导入依赖的package包/类
public Receiver ( final ReceiverHandlerFactory factory, final SocketAddress addr )
{
this.factory = factory;
this.bossGroup = new NioEventLoopGroup ();
this.workerGroup = new NioEventLoopGroup ();
this.bootstrap = new ServerBootstrap ();
this.bootstrap.group ( this.bossGroup, this.workerGroup );
this.bootstrap.channel ( NioServerSocketChannel.class );
this.bootstrap.option ( ChannelOption.SO_BACKLOG, 5 );
this.bootstrap.option ( ChannelOption.SO_REUSEADDR, true );
this.bootstrap.childHandler ( new ChannelInitializer<SocketChannel> () {
@Override
protected void initChannel ( final SocketChannel ch ) throws Exception
{
handleInitChannel ( ch );
}
} );
this.channel = this.bootstrap.bind ( addr ).channel ();
logger.info ( "Receiver running ..." );
}
示例13: bindToPlainSocket
import io.netty.channel.ChannelOption; //导入依赖的package包/类
private ChannelFuture bindToPlainSocket() throws InterruptedException {
String hostname = configuration.getHostName();
int port = Integer.parseInt(configuration.getPlain().getPort());
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new SocketChannelInitializer(ioExecutors))
.option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true);
// Bind and start to accept incoming connections.
ChannelFuture future = b.bind(hostname, port).sync();
LOGGER.info("Listening AMQP on " + hostname + ":" + port);
return future;
}
示例14: bindToSslSocket
import io.netty.channel.ChannelOption; //导入依赖的package包/类
private ChannelFuture bindToSslSocket()
throws InterruptedException, CertificateException, UnrecoverableKeyException, NoSuchAlgorithmException,
KeyStoreException, KeyManagementException, IOException {
String hostname = configuration.getHostName();
int port = Integer.parseInt(configuration.getSsl().getPort());
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new SslSocketChannelInitializer(ioExecutors, new SslHandlerFactory(configuration)))
.option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true);
// Bind and start to accept incoming connections.
ChannelFuture future = b.bind(hostname, port).sync();
LOGGER.info("Listening AMQP/" + configuration.getSsl().getProtocol() + " on " + hostname + ":" + port);
return future;
}
示例15: run
import io.netty.channel.ChannelOption; //导入依赖的package包/类
public void run() {
try {
// Configure the server.
EventLoopGroup group = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(group)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new Http2ServerInitializer(mSslCtx));
sServerChannel = b.bind(PORT).sync().channel();
Log.i(TAG, "Netty HTTP/2 server started on " + getServerUrl());
sBlock.open();
sServerChannel.closeFuture().sync();
} finally {
group.shutdownGracefully();
}
Log.i(TAG, "Stopped Http2TestServerRunnable!");
} catch (Exception e) {
Log.e(TAG, e.toString());
}
}