本文整理汇总了Java中io.netty.channel.FixedRecvByteBufAllocator类的典型用法代码示例。如果您正苦于以下问题:Java FixedRecvByteBufAllocator类的具体用法?Java FixedRecvByteBufAllocator怎么用?Java FixedRecvByteBufAllocator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FixedRecvByteBufAllocator类属于io.netty.channel包,在下文中一共展示了FixedRecvByteBufAllocator类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: NomadServer
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
public NomadServer(NomadLobby nLobby, EventLoopGroup bossGroup, EventLoopGroup workerGroup,
EventExecutorGroup executorGroup) {
sb = new ServerBootstrap();
sb.group(bossGroup, workerGroup);
sb.channel(NioServerSocketChannel.class);
final int BUF_PER_CLIENT = Packet.MAX_PACKET_LENGTH * 4;
final int MAX_CLIENTS = 2000;
sb.option(ChannelOption.SO_BACKLOG, MAX_CLIENTS);
sb.option(ChannelOption.SO_REUSEADDR, true);
sb.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
sb.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(BUF_PER_CLIENT));
sb.childOption(ChannelOption.SO_SNDBUF, 65535);
sb.childOption(ChannelOption.SO_RCVBUF, 65535);
sb.childHandler(new ServerHandler(nLobby, executorGroup));
String ip = Nomad.BIND_ON_ALL ? "0.0.0.0" : nLobby.getLobby().getIp();
sb.localAddress(ip, nLobby.getLobby().getPort());
}
示例2: applyConnectionOptions
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
public void applyConnectionOptions(ServerBootstrap bootstrap) {
bootstrap.childOption(ChannelOption.TCP_NODELAY, tcpNoDelay);
if (tcpSendBufferSize != -1) {
bootstrap.childOption(ChannelOption.SO_SNDBUF, tcpSendBufferSize);
}
if (tcpReceiveBufferSize != -1) {
bootstrap.childOption(ChannelOption.SO_RCVBUF, tcpReceiveBufferSize);
bootstrap.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(tcpReceiveBufferSize));
}
bootstrap.option(ChannelOption.SO_LINGER, soLinger);
if (trafficClass != -1) {
bootstrap.childOption(ChannelOption.IP_TOS, trafficClass);
}
bootstrap.childOption(ChannelOption.ALLOCATOR, PartialPooledByteBufAllocator.INSTANCE);
bootstrap.childOption(ChannelOption.SO_KEEPALIVE, tcpKeepAlive);
bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
bootstrap.option(ChannelOption.SO_BACKLOG, acceptBackLog);
}
示例3: open
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
/**
* Opens connection to server. This method must be called explicitly.
*/
public void open() {
new Bootstrap()
.group(notificationGroup)
.channel(NioSocketChannel.class)
.handler(new NotificationInitializer())
.option(ChannelOption.RCVBUF_ALLOCATOR,
new FixedRecvByteBufAllocator(16384))
.option(ChannelOption.TCP_NODELAY, true)
.connect(this.host, this.port);
new Bootstrap()
.group(dataGroup)
.channel(NioSocketChannel.class)
.handler(new DataInitializer())
.option(ChannelOption.RCVBUF_ALLOCATOR,
new FixedRecvByteBufAllocator(16384))
.option(ChannelOption.TCP_NODELAY, true)
.connect(this.host, this.port);
}
示例4: configureNetty
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private void configureNetty(Bootstrap bootstrap, NettyTransportOptions options) {
bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());
if (options.getSendBufferSize() != -1) {
bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
}
if (options.getReceiveBufferSize() != -1) {
bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
}
if (options.getTrafficClass() != -1) {
bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
}
}
示例5: createServerBootstrap
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private synchronized ServerBootstrap createServerBootstrap(final ChannelPipelineInitializer initializer) {
final ServerBootstrap serverBootstrap = new ServerBootstrap();
if (Epoll.isAvailable()) {
serverBootstrap.channel(EpollServerSocketChannel.class);
serverBootstrap.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
} else {
serverBootstrap.channel(NioServerSocketChannel.class);
}
final ChannelHandler serverChannelHandler = BGPChannel.createServerChannelHandler(initializer);
serverBootstrap.childHandler(serverChannelHandler);
serverBootstrap.option(ChannelOption.SO_BACKLOG, SOCKET_BACKLOG_SIZE);
serverBootstrap.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
serverBootstrap.childOption(ChannelOption.WRITE_BUFFER_WATER_MARK, WATER_MARK);
// Make sure we are doing round-robin processing
serverBootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(FIX_BUFFER_SIZE));
if (serverBootstrap.config().group() == null) {
serverBootstrap.group(this.bossGroup, this.workerGroup);
}
return serverBootstrap;
}
示例6: configureNetty
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private void configureNetty(Bootstrap bootstrap, TransportOptions options) {
bootstrap.option(ChannelOption.TCP_NODELAY, options.isTcpNoDelay());
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, options.getConnectTimeout());
bootstrap.option(ChannelOption.SO_KEEPALIVE, options.isTcpKeepAlive());
bootstrap.option(ChannelOption.SO_LINGER, options.getSoLinger());
if (options.getSendBufferSize() != -1) {
bootstrap.option(ChannelOption.SO_SNDBUF, options.getSendBufferSize());
}
if (options.getReceiveBufferSize() != -1) {
bootstrap.option(ChannelOption.SO_RCVBUF, options.getReceiveBufferSize());
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(options.getReceiveBufferSize()));
}
if (options.getTrafficClass() != -1) {
bootstrap.option(ChannelOption.IP_TOS, options.getTrafficClass());
}
}
示例7: startupUDP
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
/**
* Start to listen on a UPD port.
*
* @param listenAddresses
* The address to listen to
* @param config
* Can create handlers to be attached to this port
* @return True if startup was successful
*/
boolean startupUDP(final InetSocketAddress listenAddresses, final ChannelServerConficuration config) {
Bootstrap b = new Bootstrap();
b.group(workerGroup);
b.channel(NioDatagramChannel.class);
b.option(ChannelOption.SO_BROADCAST, true);
b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));
b.handler(new ChannelInitializer<Channel>() {
@Override
protected void initChannel(final Channel ch) throws Exception {
for (Map.Entry<String, ChannelHandler> entry : handlers(false).entrySet()) {
ch.pipeline().addLast(entry.getKey(), entry.getValue());
}
}
});
ChannelFuture future = b.bind(listenAddresses);
channelUDP = future.channel();
return handleFuture(future);
}
示例8: createTransportService
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
@Override
protected Transport<SourceServerRequest> createTransportService() {
NettyPooledUdpTransport<SourceServerRequest> transport = new NettyPooledUdpTransport<>(ChannelType.NIO_UDP);
transport.setChannelInitializer(new SourceQueryChannelInitializer(this));
transport.addChannelOption(ChannelOption.SO_SNDBUF, 1048576);
transport.addChannelOption(ChannelOption.SO_RCVBUF, 1048576);
transport.addChannelOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1400));
return transport;
}
示例9: HttpClient
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private HttpClient() {
// Configure the client.
group = new NioEventLoopGroup();
bootstrap = new Bootstrap();
bootstrap.group(group)
.channel(NioSocketChannel.class)
.handler(new HttpClientInitializer(mHttpClientListener));
bootstrap.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, CONNECTION_TIMEOUT_MILLIS);
bootstrap.option(ChannelOption.TCP_NODELAY, true);
bootstrap.option(ChannelOption.SO_RCVBUF, 65536 * 3); // added in 2017-07-14
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator( 65536 * 3 )); // added in 2017-07-14
}
示例10: createRegistry
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
@Override
protected JndiRegistry createRegistry() throws Exception {
JndiRegistry jndi = super.createRegistry();
FixedRecvByteBufAllocator fixedRecvByteBufAllocator = new FixedRecvByteBufAllocator(4096);
jndi.bind(ChannelOption.RCVBUF_ALLOCATOR.name(), fixedRecvByteBufAllocator);
return jndi;
}
示例11: runLineBasedFrameDecoder
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private static void runLineBasedFrameDecoder() {
TcpServer<String, String> transport = Netty4TcpServer.<String, String>create(
0,
new ChannelInitializer<Channel>() {
@Override
protected void initChannel(Channel channel) throws Exception {
int bufferSize = 1;
ChannelConfig config = channel.config();
config.setOption(ChannelOption.SO_RCVBUF, bufferSize);
config.setOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(bufferSize));
channel.pipeline().addFirst(
new LineBasedFrameDecoder(256),
new StringDecoder(CharsetUtil.UTF_8),
new StringEncoder(CharsetUtil.UTF_8));
}
});
ReactorTcpServer.create(transport).start(connection -> {
connection.log("input")
.observeComplete(v -> LOG.info("Connection input complete"))
.capacity(1)
.consume(line -> {
String response = "Hello " + line + "\n";
Streams.wrap(connection.writeWith(Streams.just(response))).consume();
});
return Streams.never();
});
}
示例12: startListener
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private void startListener() throws InterruptedException {
CountDownLatch latch = new CountDownLatch(1);
// TODO: Add configuration stuff
ChannelFutureListener listener = future -> {
if (future.isSuccess()) {
getLogger().info(PocketLogging.Server.STARTUP, "Listening on port {}", PORT);
channel = future.channel();
latch.countDown();
running = true;
} else {
getLogger().error(PocketLogging.Server.STARTUP, "Could not bind to {}", PORT, future
.cause());
shutdown();
}
};
new Bootstrap()
.group(eventLoopGroup)
.handler(PipelineUtils.INITIALIZER)
.channel(PipelineUtils.getChannelClass())
.option(ChannelOption.SO_BROADCAST, true)
.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(Short.MAX_VALUE)) // BLAME CONNOR
.bind(PORT)
.addListener(listener);
latch.await();
}
示例13: createServerBootstrap
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
synchronized ServerBootstrap createServerBootstrap(final ChannelPipelineInitializer initializer) {
final ServerBootstrap b = new ServerBootstrap();
b.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(final SocketChannel ch) {
initializer.initializeChannel(ch, new DefaultPromise<>(PCEPDispatcherImpl.this.executor));
}
});
b.option(ChannelOption.SO_BACKLOG, SOCKET_BACKLOG_SIZE);
b.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
if (Epoll.isAvailable()) {
b.channel(EpollServerSocketChannel.class);
b.childOption(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
} else {
b.channel(NioServerSocketChannel.class);
}
if (!this.keys.isEmpty()) {
if (Epoll.isAvailable()) {
b.option(EpollChannelOption.TCP_MD5SIG, this.keys);
} else {
throw new UnsupportedOperationException(Epoll.unavailabilityCause().getCause());
}
}
// Make sure we are doing round-robin processing
b.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(1));
if (b.config().group() == null) {
b.group(this.bossGroup, this.workerGroup);
}
return b;
}
示例14: createClientBootStrap
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
private synchronized Bootstrap createClientBootStrap(final KeyMapping keys, final boolean reuseAddress) {
final Bootstrap bootstrap = new Bootstrap();
if (Epoll.isAvailable()) {
bootstrap.channel(EpollSocketChannel.class);
bootstrap.option(EpollChannelOption.EPOLL_MODE, EpollMode.LEVEL_TRIGGERED);
} else {
bootstrap.channel(NioSocketChannel.class);
}
if (keys != null && !keys.isEmpty()) {
if (Epoll.isAvailable()) {
bootstrap.option(EpollChannelOption.TCP_MD5SIG, keys);
} else {
throw new UnsupportedOperationException(Epoll.unavailabilityCause().getCause());
}
}
// Make sure we are doing round-robin processing
bootstrap.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(FIX_BUFFER_SIZE));
bootstrap.option(ChannelOption.SO_KEEPALIVE, Boolean.TRUE);
bootstrap.option(ChannelOption.WRITE_BUFFER_WATER_MARK, WATER_MARK);
bootstrap.option(ChannelOption.SO_REUSEADDR, reuseAddress);
if (bootstrap.config().group() == null) {
bootstrap.group(this.workerGroup);
}
return bootstrap;
}
示例15: createUDP
import io.netty.channel.FixedRecvByteBufAllocator; //导入依赖的package包/类
/**
* Creates a "channel" to the given address. This won't send any message unlike TCP.
*
* @param recipient
* The recipient of the a message
*
* @param broadcast
* Sets this channel to be able to broadcast
* @param channelHandlers
* The handlers to set
* @return The channel future object or null if we are shut down
*/
public ChannelFuture createUDP(final SocketAddress recipient, final boolean broadcast,
final Map<String, ChannelHandler> channelHandlers) {
readUDP.lock();
try {
if (shutdownUDP) {
return null;
}
if (!semaphoreUPD.tryAcquire()) {
LOG.error("Tried to acquire more resources (UDP) than announced!");
throw new RuntimeException("Tried to acquire more resources (UDP) than announced!");
}
final Bootstrap b = new Bootstrap();
b.group(workerGroup);
b.channel(NioDatagramChannel.class);
b.option(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(ConnectionBean.UDP_LIMIT));
if (broadcast) {
b.option(ChannelOption.SO_BROADCAST, true);
}
channelClientConfiguration.pipelineFilter().filter(channelHandlers, false, true);
addHandlers(b, channelHandlers);
// Here we need to bind, as opposed to the TCP, were we connect if we do a connect, we cannot receive
// broadcast messages
final ChannelFuture channelFuture;
if (broadcast) {
channelFuture = b.bind(new InetSocketAddress(0));
} else {
channelFuture = b.connect(recipient);
}
setupCloseListener(channelFuture, semaphoreUPD);
CREATED_UDP_CONNECTIONS.incrementAndGet();
return channelFuture;
} finally {
readUDP.unlock();
}
}