本文整理汇总了Java中io.netty.bootstrap.ServerBootstrap.bind方法的典型用法代码示例。如果您正苦于以下问题:Java ServerBootstrap.bind方法的具体用法?Java ServerBootstrap.bind怎么用?Java ServerBootstrap.bind使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类io.netty.bootstrap.ServerBootstrap
的用法示例。
在下文中一共展示了ServerBootstrap.bind方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: bind
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public void bind(int port) {
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
ServerBootstrap bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class).localAddress(new InetSocketAddress(8888))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new Encoder(serializer), new Decoder(serializer), new ProviderHandler());
}
});
bootstrap.bind(port);
}
示例2: run
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public void run(String... args) throws Exception {
NioEventLoopGroup group = new NioEventLoopGroup();
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(group)
.channel(NioServerSocketChannel.class)
.childHandler(serverChannelInitializer);
ChannelFuture channelFuture = bootstrap.bind(new InetSocketAddress(30232));
channelFuture.addListener(future -> {
if (future.isSuccess()) {
logger.info("「Netty」服务器启动成功");
} else {
logger.info("「Netty」服务器启动失败");
}
});
}
示例3: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
public void start() {
ServerBootstrap b = new ServerBootstrap();
b.group(workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_KEEPALIVE, true)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
System.out.println("New client connected! (" + socketChannel.localAddress() + ")");
socketChannel.pipeline().addLast(new StringEncoder()).addLast(new StringEncoder()).addLast(new EchoServerHandler());
}
});
f = b.bind(port);
}
示例4: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public synchronized void start() {
bossGroup = new NioEventLoopGroup(); // (1)
workerGroup = new NioEventLoopGroup();
try {
b = new ServerBootstrap(); // (2)
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new SocketServerChannelInitializer(heartTime,socketService,applicationContext));
// Bind and start to accept incoming connections.
b.bind(port);
logger.info("socket: "+port+" starting....");
// Wait until the server socket is closed.
// In this example, this does not happen, but you can do that to gracefully
} catch (Exception e) {
e.printStackTrace();
}
}
示例5: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
/**
* 启动netty服务
*/
@Override
public void start() {
SocketManager.getInstance().setMaxConnection(nettyConfig.getMaxConnection());
servletExecutor = new DefaultEventExecutorGroup(MAX_THREADS);
if (nettyConfig.getMaxThreads() != 0) {
MAX_THREADS = nettyConfig.getMaxThreads();
}
try {
final SerializeProtocolEnum serializeProtocolEnum =
SerializeProtocolEnum.acquireSerializeProtocol(nettyConfig.getSerialize());
nettyServerHandlerInitializer.setSerializeProtocolEnum(serializeProtocolEnum);
nettyServerHandlerInitializer.setServletExecutor(servletExecutor);
ServerBootstrap b = new ServerBootstrap();
groups(b, MAX_THREADS << 1);
/* bossGroup = new NioEventLoopGroup();
workerGroup = new NioEventLoopGroup(MAX_THREADS * 2);
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 1000)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childHandler(nettyServerHandlerInitializer);*/
b.bind(nettyConfig.getPort());
LOGGER.info("netty service started on port: " + nettyConfig.getPort());
} catch (Exception e) {
e.printStackTrace();
}
}
示例6: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
public void start() {
Configuration conf = context.getConf();
int workerNum =
conf.getInt(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_SERVER_EVENTGROUP_THREADNUM,
AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_SERVER_EVENTGROUP_THREADNUM);
int sendBuffSize =
conf.getInt(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_SERVER_SNDBUF,
AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_SERVER_SNDBUF);
int recvBuffSize =
conf.getInt(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_SERVER_RCVBUF,
AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_SERVER_RCVBUF);
final int maxMessageSize =
conf.getInt(AngelConf.ANGEL_NETTY_MATRIXTRANSFER_MAX_MESSAGE_SIZE,
AngelConf.DEFAULT_ANGEL_NETTY_MATRIXTRANSFER_MAX_MESSAGE_SIZE);
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup(workerNum);
((NioEventLoopGroup) workerGroup).setIoRatio(70);
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_SNDBUF, sendBuffSize)
.option(ChannelOption.SO_RCVBUF, recvBuffSize)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast(new LengthFieldBasedFrameDecoder(maxMessageSize, 0, 4, 0, 4));
p.addLast(new LengthFieldPrepender(4));
p.addLast(new MatrixTransportServerHandler(context));
}
});
channelFuture = b.bind(port);
}
示例7: run
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public void run() {
EventLoopGroup bossGroup = new NioEventLoopGroup(socketServerProperty.getBossThreadCount());
EventLoopGroup workerGroup = new NioEventLoopGroup(socketServerProperty.getWorkerThreadCount());
ChannelFuture closeFuture = null;
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(channelInitializer)
.option(ChannelOption.SO_BACKLOG, SO_BACKLOG)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_KEEPALIVE, true);
ChannelFuture bind = b.bind(socketServerProperty.getBindPort());
ChannelFuture sync = bind.sync();
sync.addListener(channelServerStartListener);
Channel ch = sync.channel();
closeFuture = ch.closeFuture();
closeFuture.sync();
} catch (InterruptedException e) {
e.printStackTrace();
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
示例8: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public void start() {
int heartTime = transaction_netty_heart_time+10;
txCoreServerHandler = new TxCoreServerHandler(mqTxManagerService);
bossGroup = new NioEventLoopGroup(50); // (1)
workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast("timeout", new IdleStateHandler(heartTime, heartTime, heartTime, TimeUnit.SECONDS));
ch.pipeline().addLast(new LengthFieldPrepender(4, false));
ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
ch.pipeline().addLast(txCoreServerHandler);
}
});
// Start the server.
b.bind(Constants.socketPort);
logger.info("Socket started on port(s): " + Constants.socketPort + " (socket)");
} catch (Exception e) {
// Shut down all event loops to terminate all threads.
e.printStackTrace();
}
}
示例9: start
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
@Override
public synchronized void start() {
bossGroup = new NioEventLoopGroup(); // (1)
workerGroup = new NioEventLoopGroup();
try {
b = new ServerBootstrap(); // (2)
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class) // (3)
.childHandler(new ChannelInitializer<SocketChannel>() { // (4)
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new ByteArrayDecoder());
ch.pipeline().addLast(new ByteArrayEncoder());
ch.pipeline().addLast(new LengthFieldBasedFrameDecoder(Integer.MAX_VALUE, 0, 4, 0, 4));
ch.pipeline().addLast(new IdleStateHandler(heartTime, heartTime, heartTime, TimeUnit.SECONDS));
ch.pipeline().addLast(new DeliveryHandler(deliveryService));
}
})
.option(ChannelOption.SO_BACKLOG, 128) // (5)
.childOption(ChannelOption.SO_KEEPALIVE, true); // (6)
// Bind and start to accept incoming connections.
b.bind(settingService.getDeliveryPort());
logger.info("socket: "+settingService.getDeliveryPort()+" starting....");
// Wait until the server socket is closed.
// In this example, this does not happen, but you can do that to gracefully
} catch (Exception e) {
e.printStackTrace();
}
}
示例10: bindAndStart
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
/**
* 绑定并启动 server , accept 连接
*
* @param host
* @param port
* @param socketEventHandler
* @throws IOException
*/
public void bindAndStart(String host, int port, final SocketEventHandler socketEventHandler) throws IOException {
// 管理线程
final EventLoopGroup bossGroup = new NioEventLoopGroup(1);
// 默认线程数是 cpu 核数的两倍
final EventLoopGroup workerGroup = new NioEventLoopGroup();
//server启动管理配置
final ServerBootstrap boot = new ServerBootstrap();
// 闲置事件
// final ChannelAcceptorIdleStateTrigger trigger = new ChannelAcceptorIdleStateTrigger();
boot.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
//最大客户端连接数为 0x7fffffff
.option(ChannelOption.SO_BACKLOG, Integer.MAX_VALUE)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
// http://blog.csdn.net/linuu/article/details/51360609
// protocol 协议
ch.pipeline().addLast("frameDecoder", new ProtobufVarint32FrameDecoder());
ch.pipeline().addLast("decoder", new ProtobufDecoder(SocketASK.getDefaultInstance()));
ch.pipeline().addLast("fieldPrepender", new ProtobufVarint32LengthFieldPrepender());
ch.pipeline().addLast("encoder", new ProtobufEncoder());
// http://blog.csdn.net/z69183787/article/details/52625095
// 心跳检查 检查一次
ch.pipeline().addLast("heartbeatHandler", new IdleStateHandler(0, 0, 15, TimeUnit.SECONDS));
// 闲置事件
//ch.pipeline().addLast("heartbeatTrigger", trigger);
final ChannelInboundHandlerRouterAdapter routerAdapter = new ChannelInboundHandlerRouterAdapter();
routerAdapter.setHandlers(socketEventHandler);
ch.pipeline().addLast("routerAdapter", routerAdapter);
}
});
// Bind and start to accept incoming connections.
ChannelFuture future = boot.bind(host, port);
try {
future.await();
} catch (InterruptedException ex) {
Thread.currentThread().interrupt();
throw new RuntimeException("Interrupted waiting for bindAndStart");
}
if (!future.isSuccess()) {
throw new IOException("Failed to bindAndStart", future.cause());
}
LOGGER.info("listen port:{} started.", port);
serverChannel = future.channel();
}
示例11: run
import io.netty.bootstrap.ServerBootstrap; //导入方法依赖的package包/类
private void run() throws Exception {
ExecutorService boss = Executors.newCachedThreadPool(new NamedThreadFactory("NettyServerBoss", true));
ExecutorService worker = Executors.newCachedThreadPool(new NamedThreadFactory("NettyServerWorker", true));
bossGroup = new NioEventLoopGroup(1, boss); // (1)
workerGroup = new NioEventLoopGroup(numOfIOWorkerThreads, worker);
ServerDesc serverDesc = new ServerDesc();
serverDesc.setRegistry(this.registry);
serverDesc.setServerApp(this.serverApp);
serverDesc.setServerGroup(this.serverApp);
NettoServiceChannelHandler handler = new AsynchronousChannelHandler(serverDesc, serviceBeans, filters,
this.maxWaitingQueueSize, this.numOfHandlerWorker);
ServerBootstrap b = new ServerBootstrap(); // (2)
b.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class) // (3)
.option(ChannelOption.SO_BACKLOG, backlog).childHandler(new ChannelInitializer<SocketChannel>() { // (4)
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
// p.addLast("framer", new
// DelimiterBasedFrameDecoder(maxRequestSize,
// Constants.delimiterAsByteBufArray()));
// p.addLast("framer",new
// JsonObjectDecoder(maxRequestSize));
// p.addLast("decoder", new ByteArrayDecoder());
p.addLast("framer", new NettoFrameDecoder(maxRequestSize));
p.addLast("decoder", new NettoMessageDecoder());
p.addLast("encoder", new StringEncoder());
p.addLast("handler", new NettyNettoMessageHandler(handler));
// p.addLast("handler",new
// NettyServerJsonHandler(serviceBeans, filters));
}
});
// Bind and start to accept incoming connections.
channel = b.bind(this.port);// .sync(); // (7)
logger.info("server bind port:" + this.port);
// Wait until the server socket is closed.
// f.channel().closeFuture().sync();
}