本文整理汇总了Java中io.netty.util.concurrent.EventExecutorGroup类的典型用法代码示例。如果您正苦于以下问题:Java EventExecutorGroup类的具体用法?Java EventExecutorGroup怎么用?Java EventExecutorGroup使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
EventExecutorGroup类属于io.netty.util.concurrent包,在下文中一共展示了EventExecutorGroup类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: ImapClient
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public ImapClient(ImapClientConfiguration configuration,
Channel channel,
SslContext sslContext,
EventExecutorGroup promiseExecutor,
String clientName) {
this.logger = LogUtils.loggerWithName(ImapClient.class, clientName);
this.configuration = configuration;
this.channel = channel;
this.sslContext = sslContext;
this.promiseExecutor = promiseExecutor;
this.clientState = new ImapClientState(clientName, promiseExecutor);
this.codec = new ImapCodec(clientState);
this.pendingWriteQueue = new ConcurrentLinkedQueue<>();
this.connectionShutdown = new AtomicBoolean(false);
this.connectionClosed = new AtomicBoolean(false);
this.capabilities = new AtomicReference<>(null);
configureChannel();
}
示例2: ReadWriteExchangeChannelGroup
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public ReadWriteExchangeChannelGroup(MsgHandler<Protocol> msgHandler, Address address, int connectTimeout,
int reconnectInterval, byte idleTimeout, byte maxIdleTimeOut, boolean lazy, short connections,
short writeConnections, boolean reverseIndex, EventLoopGroup loopGroup, EventExecutorGroup executorGroup)
throws SailfishException {
super(UUID.randomUUID());
this.msgHandler = msgHandler;
this.tracer = new Tracer();
NegotiateConfig readConfig = new NegotiateConfig(idleTimeout, maxIdleTimeOut, id(), ChannelType.read.code(),
connections, writeConnections, (short) 0, reverseIndex);
this.readGroup = new DefaultExchangeChannelGroup(tracer, msgHandler, address,
(short) (connections - writeConnections), connectTimeout, reconnectInterval, idleTimeout,
maxIdleTimeOut, lazy, reverseIndex, readConfig, this, loopGroup, executorGroup);
NegotiateConfig writeConfig = new NegotiateConfig(idleTimeout, maxIdleTimeOut, id(), ChannelType.write.code(),
connections, writeConnections, (short) 0, reverseIndex);
this.writeGroup = new DefaultExchangeChannelGroup(tracer, msgHandler, address, writeConnections, connectTimeout,
reconnectInterval, idleTimeout, maxIdleTimeOut, lazy, reverseIndex, writeConfig, this, loopGroup,
executorGroup);
}
示例3: newChannelInitializer
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
private ChannelInitializer<SocketChannel> newChannelInitializer(final NegotiateConfig config,
final ExchangeChannelGroup channelGroup, final EventExecutorGroup executorGroup) {
return new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline pipeline = ch.pipeline();
ch.attr(ChannelAttrKeys.maxIdleTimeout).set(config.maxIdleTimeout());
ch.attr(ChannelAttrKeys.channelGroup).set(channelGroup);
ch.attr(ChannelAttrKeys.clientSide).set(true);
ch.attr(OneTime.awaitNegotiate).set(new CountDownLatch(1));
ch.attr(OneTime.channelConfig).set(config);
// TODO should increase ioRatio when every ChannelHandler bind to executorGroup?
pipeline.addLast(executorGroup,
RemotingEncoder.INSTANCE,
new RemotingDecoder(),
new IdleStateHandler(config.idleTimeout(), 0, 0),
HeartbeatChannelHandler.INSTANCE,
NegotiateChannelHandler.INSTANCE,
ConcreteRequestHandler.INSTANCE);
}
};
}
示例4: shutdown
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public void shutdown() throws InterruptedException {
try {
logger.info("Shutting down Riposte...");
List<ChannelFuture> channelCloseFutures = new ArrayList<>();
for (Channel ch : channels) {
// execute shutdown hooks
if (serverConfig.serverShutdownHooks() != null) {
for (ServerShutdownHook hook : serverConfig.serverShutdownHooks()) {
hook.executeServerShutdownHook(serverConfig, ch);
}
}
channelCloseFutures.add(ch.close());
}
for (ChannelFuture chf : channelCloseFutures) {
chf.sync();
}
}
finally {
eventLoopGroups.forEach(EventExecutorGroup::shutdownGracefully);
logger.info("...Riposte shutdown complete");
}
}
示例5: NomadServer
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public NomadServer(NomadLobby nLobby, EventLoopGroup bossGroup, EventLoopGroup workerGroup,
EventExecutorGroup executorGroup) {
sb = new ServerBootstrap();
sb.group(bossGroup, workerGroup);
sb.channel(NioServerSocketChannel.class);
final int BUF_PER_CLIENT = Packet.MAX_PACKET_LENGTH * 4;
final int MAX_CLIENTS = 2000;
sb.option(ChannelOption.SO_BACKLOG, MAX_CLIENTS);
sb.option(ChannelOption.SO_REUSEADDR, true);
sb.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT);
sb.childOption(ChannelOption.RCVBUF_ALLOCATOR, new FixedRecvByteBufAllocator(BUF_PER_CLIENT));
sb.childOption(ChannelOption.SO_SNDBUF, 65535);
sb.childOption(ChannelOption.SO_RCVBUF, 65535);
sb.childHandler(new ServerHandler(nLobby, executorGroup));
String ip = Nomad.BIND_ON_ALL ? "0.0.0.0" : nLobby.getLobby().getIp();
sb.localAddress(ip, nLobby.getLobby().getPort());
}
示例6: testIdentifyCloseChannelOnFailure
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Test(expected = PrematureChannelClosureException.class)
public void testIdentifyCloseChannelOnFailure() throws Exception {
Channel channel = mock(Channel.class, Answers.RETURNS_SMART_NULLS.get());
mockWriteHandler = mock(ChannelHandler.class);
DefaultChannelPromise completedFuture = new DefaultChannelPromise(channel);
completedFuture.setSuccess();
DefaultChannelPromise failedFuture = new DefaultChannelPromise(channel);
failedFuture.setFailure(new PrematureChannelClosureException("test"));
ChannelPipeline channelPipeline = mock(ChannelPipeline.class);
when(channelPipeline.addLast(anyString(), any(ChannelHandler.class))).thenReturn(channelPipeline);
when(channelPipeline.addLast(any(EventExecutorGroup.class), anyString(), any(ChannelHandler.class))).thenReturn(channelPipeline);
when(channel.pipeline()).thenReturn(channelPipeline);
when(channel.isActive()).thenReturn(true);
when(channel.writeAndFlush(any())).thenReturn(failedFuture);
when(channel.close()).thenReturn(completedFuture);
when(bootstrap.connect(anyString(), anyInt())).thenReturn(completedFuture);
ClientSessionConfiguration configuration = new ClientSessionConfiguration();
jannelClient.identify(configuration, null);
}
示例7: main
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
init();
EventExecutorGroup eventExecutorGroup = new NioEventLoopGroup(16);
/**
* POST /login
* GET /foods
* POST /carts
* PATCH /carts/cart_id
* POST /orders
* GET /orders
* GET /admin/orders
*/
HttpServerUrlHandler httpServerUrlHandler = new HttpServerUrlHandler(new DefaultHandler(HttpResponseStatus.BAD_GATEWAY))
.register(HttpMethod.POST, "/login", new LoginHandler())
.register(HttpMethod.GET, "/foods", new GetStockHandler())
.register(HttpMethod.POST, "/carts", new AddCartHandler())
.register(HttpMethod.PATCH, "/carts", new AddFoodHandler())
.register(HttpMethod.POST, "/orders", new MakeOrderHandler())
.register(HttpMethod.GET, "/orders", new GetOrderHandler())
.register(HttpMethod.GET, "/admin/orders", new AdminGetOrderHandler());
new HttpServer(128, ch -> ch.pipeline().addLast(httpServerUrlHandler)).start(Integer.parseInt(Config.APP_PORT));
}
示例8: addFirst
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Override
public ChannelPipeline addFirst(EventExecutorGroup executor, ChannelHandler... handlers) {
if (handlers == null) {
throw new NullPointerException("handlers");
}
if (handlers.length == 0 || handlers[0] == null) {
return this;
}
int size;
for (size = 1; size < handlers.length; size ++) {
if (handlers[size] == null) {
break;
}
}
for (int i = size - 1; i >= 0; i --) {
ChannelHandler h = handlers[i];
addFirst(executor, generateName(h), h);
}
return this;
}
示例9: addFirst
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Override
public ChannelPipeline addFirst(EventExecutorGroup executor, ChannelHandler... handlers) {
if (handlers == null) {
throw new NullPointerException("handlers");
}
if (handlers.length == 0 || handlers[0] == null) {
return this;
}
int size;
for (size = 1; size < handlers.length; size++) {
if (handlers[size] == null) {
break;
}
}
for (int i = size - 1; i >= 0; i--) {
ChannelHandler h = handlers[i];
addFirst(executor, generateName(h), h);
}
return this;
}
示例10: testBuilder
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Test
public void testBuilder() throws Exception {
DefaultClientResources sut = new DefaultClientResources.Builder().ioThreadPoolSize(4).computationThreadPoolSize(4)
.commandLatencyCollectorOptions(DefaultCommandLatencyCollectorOptions.disabled()).build();
EventExecutorGroup eventExecutors = sut.eventExecutorGroup();
NioEventLoopGroup eventLoopGroup = sut.eventLoopGroupProvider().allocate(NioEventLoopGroup.class);
assertThat(eventExecutors.iterator()).hasSize(4);
assertThat(eventLoopGroup.executorCount()).isEqualTo(4);
assertThat(sut.ioThreadPoolSize()).isEqualTo(4);
assertThat(sut.commandLatencyCollector()).isNotNull();
assertThat(sut.commandLatencyCollector().isEnabled()).isFalse();
assertThat(sut.shutdown(0, 0, TimeUnit.MILLISECONDS).get()).isTrue();
}
示例11: reuseClientConnections
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Test
public void reuseClientConnections() throws Exception {
// given
DefaultClientResources clientResources = DefaultClientResources.create();
Map<Class<? extends EventExecutorGroup>, EventExecutorGroup> eventLoopGroups = getExecutors(clientResources);
RedisClient redisClient1 = newClient(clientResources);
RedisClient redisClient2 = newClient(clientResources);
connectAndClose(redisClient1);
connectAndClose(redisClient2);
// when
EventExecutorGroup executor = eventLoopGroups.values().iterator().next();
redisClient1.shutdown(0, 0, TimeUnit.MILLISECONDS);
// then
connectAndClose(redisClient2);
clientResources.shutdown(0, 0, TimeUnit.MILLISECONDS).get();
assertThat(eventLoopGroups).isEmpty();
assertThat(executor.isShuttingDown()).isTrue();
assertThat(clientResources.eventExecutorGroup().isShuttingDown()).isTrue();
}
示例12: managedClientResources
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Test
public void managedClientResources() throws Exception {
// given
RedisClient redisClient1 = RedisClient.create(RedisURI.create(TestSettings.host(), TestSettings.port()));
ClientResources clientResources = redisClient1.getResources();
Map<Class<? extends EventExecutorGroup>, EventExecutorGroup> eventLoopGroups = getExecutors(clientResources);
connectAndClose(redisClient1);
// when
EventExecutorGroup executor = eventLoopGroups.values().iterator().next();
redisClient1.shutdown(0, 0, TimeUnit.MILLISECONDS);
// then
assertThat(eventLoopGroups).isEmpty();
assertThat(executor.isShuttingDown()).isTrue();
assertThat(clientResources.eventExecutorGroup().isShuttingDown()).isTrue();
}
示例13: findInvoker
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
private ChannelHandlerInvoker findInvoker(EventExecutorGroup group) {
if (group == null) {
return null;
}
// Pin one of the child executors once and remember it so that the same child executor
// is used to fire events for the same channel.
ChannelHandlerInvoker invoker = childInvokers.get(group);
if (invoker == null) {
EventExecutor executor = group.next();
if (executor instanceof EventLoop) {
invoker = ((EventLoop) executor).asInvoker();
} else {
invoker = new DefaultChannelHandlerInvoker(executor);
}
childInvokers.put(group, invoker);
}
return invoker;
}
示例14: NettyPipeline
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
public NettyPipeline(final EventExecutorGroup executor, final HttpHandler handler,
final Config conf, final SslContext sslCtx) {
this.executor = executor;
this.handler = handler;
this.config = conf;
maxInitialLineLength = conf.getBytes("netty.http.MaxInitialLineLength").intValue();
maxHeaderSize = conf.getBytes("netty.http.MaxHeaderSize").intValue();
maxChunkSize = conf.getBytes("netty.http.MaxChunkSize").intValue();
maxContentLength = conf.getBytes("netty.http.MaxContentLength").intValue();
idleTimeOut = conf.getDuration("netty.http.IdleTimeout", TimeUnit.MILLISECONDS);
supportH2 = conf.getBoolean("server.http2.enabled");
this.tmpdir = config.getString("application.tmpdir");
this.bufferSize = config.getBytes("server.http.ResponseBufferSize").intValue();
this.wsMaxMessageSize = Math
.max(
config.getBytes("server.ws.MaxTextMessageSize").intValue(),
config.getBytes("server.ws.MaxBinaryMessageSize").intValue());
this.sslCtx = sslCtx;
}
示例15: https1_1
import io.netty.util.concurrent.EventExecutorGroup; //导入依赖的package包/类
@Test
public void https1_1() throws Exception {
Config conf = conf(false, 123, 234, 345, 456, 567L);
new MockUnit(EventExecutorGroup.class, HttpHandler.class, SocketChannel.class,
ChannelPipeline.class, ChannelHandlerContext.class)
.expect(sslContext)
.expect(pipeline)
.expect(ssl)
.expect(http2OrHttp)
.expect(ctxpipeline)
.expect(http1Codec())
.expect(idle(567))
.expect(aggregator(456))
.expect(jooby(conf))
.run(unit -> {
new NettyPipeline(unit.get(EventExecutorGroup.class), unit.get(HttpHandler.class),
conf, unit.get(SslContext.class))
.initChannel(unit.get(SocketChannel.class));
}, unit -> {
Http2OrHttpHandler handler = unit.captured(Http2OrHttpHandler.class).iterator()
.next();
handler.configurePipeline(unit.get(ChannelHandlerContext.class), "http/1.1");
});
}