本文整理汇总了Java中io.netty.util.concurrent.DefaultEventExecutorGroup类的典型用法代码示例。如果您正苦于以下问题:Java DefaultEventExecutorGroup类的具体用法?Java DefaultEventExecutorGroup怎么用?Java DefaultEventExecutorGroup使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
DefaultEventExecutorGroup类属于io.netty.util.concurrent包,在下文中一共展示了DefaultEventExecutorGroup类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
public void start() throws EmbeddedServletContainerException {
ServerBootstrap b = new ServerBootstrap();
groups(b);
servletExecutor = new DefaultEventExecutorGroup(50);
b.childHandler(new NettyEmbeddedServletInitializer(servletExecutor, context));
// Don't yet need the complexity of lifecycle state, listeners etc, so tell the context it's initialised here
context.setInitialised(true);
ChannelFuture future = b.bind(address).awaitUninterruptibly();
//noinspection ThrowableResultOfMethodCallIgnored
Throwable cause = future.cause();
if (null != cause) {
throw new EmbeddedServletContainerException("Could not start Netty server", cause);
}
logger.info(context.getServerInfo() + " started on port: " + getPort());
}
示例2: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
/**
* 启动netty客户端
*/
@Override
public void start(TxConfig txConfig) {
this.txConfig = txConfig;
SerializeProtocolEnum serializeProtocol =
SerializeProtocolEnum.acquireSerializeProtocol(txConfig.getNettySerializer());
nettyClientHandlerInitializer.setSerializeProtocolEnum(serializeProtocol);
servletExecutor = new DefaultEventExecutorGroup(txConfig.getNettyThreadMax());
nettyClientHandlerInitializer.setServletExecutor(servletExecutor);
nettyClientHandlerInitializer.setTxConfig(txConfig);
TxManagerLocator.getInstance().setTxConfig(txConfig);
TxManagerLocator.getInstance().schedulePeriodicRefresh();
try {
bootstrap = new Bootstrap();
groups(bootstrap, txConfig.getNettyThreadMax());
doConnect();
} catch (Exception e) {
e.printStackTrace();
}
}
示例3: Server
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
public Server(StartupContext startupContext) throws Exception {
BrokerConfigProvider configProvider = startupContext.getService(BrokerConfigProvider.class);
this.configuration = configProvider
.getConfigurationObject(AmqpServerConfiguration.NAMESPACE, AmqpServerConfiguration.class);
this.broker = startupContext.getService(Broker.class);
if (broker == null) {
throw new RuntimeException("Could not find the broker class to initialize AMQP server");
}
bossGroup = new NioEventLoopGroup();
workerGroup = new NioEventLoopGroup();
ioExecutors = new DefaultEventExecutorGroup(BLOCKING_TASK_EXECUTOR_THREADS);
haStrategy = startupContext.getService(HaStrategy.class);
if (haStrategy == null) {
serverHelper = new ServerHelper();
} else {
LOGGER.info("AMQP Transport is in PASSIVE mode"); //starts up in passive mode
serverHelper = new HaEnabledServerHelper();
}
}
示例4: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
public void start(int listenPort, final ExecutorService ignore) throws Exception {
if (!startFlag.compareAndSet(false, true)) {
return;
}
bossGroup = new NioEventLoopGroup();
ioGroup = new NioEventLoopGroup();
businessGroup = new DefaultEventExecutorGroup(businessThreads);
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, ioGroup).channel(NioServerSocketChannel.class)
.childOption(ChannelOption.TCP_NODELAY, Boolean.parseBoolean(System.getProperty("nfs.rpc.tcp.nodelay", "true")))
.childOption(ChannelOption.SO_REUSEADDR, Boolean.parseBoolean(System.getProperty("nfs.rpc.tcp.reuseaddress", "true")))
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast("decoder", new Netty4ProtocolDecoder());
ch.pipeline().addLast("encoder", new Netty4ProtocolEncoder());
ch.pipeline().addLast(businessGroup, "handler", new Netty4ServerHandler());
}
});
b.bind(new InetSocketAddress("127.0.0.1", listenPort)).sync();
LOGGER.warn("Server started,listen at: " + listenPort + ", businessThreads is " + businessThreads);
}
示例5: initChannel
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
public void initChannel(SocketChannel channel) {
ChannelPipeline pipeline = channel.pipeline();
// pipeline.addLast("tracker", connectionTracker);
pipeline.addLast("decoder", new HttpRequestDecoder());
pipeline.addLast("aggregator", new HttpObjectAggregator(Integer.MAX_VALUE)); //TODO: fix
pipeline.addLast("encoder", new HttpResponseEncoder());
pipeline.addLast("compressor", new HttpContentCompressor());
HttpResourceHandler resourceHandler = new HttpResourceHandler(dataHolder.getHttpServices(),
new ArrayList<HandlerHook>(), null, null);
pipeline.addLast(new DefaultEventExecutorGroup(200),
"router", new RequestRouter(resourceHandler, 0)); //TODO: remove limit
//TODO: see what can be done
/*if (pipelineModifier != null) {
pipelineModifier.apply(pipeline);
}*/
}
示例6: ApiServerChannelInitializer
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Inject
public ApiServerChannelInitializer(ObjectMapper objectMapper,
ApiProtocolSwitcher apiProtocolSwitcher,
// ObservableEncoder rxjavaHandler,
GeneratedJaxRsModuleHandler jaxRsModuleHandler) {
this.apiProtocolSwitcher = apiProtocolSwitcher;
// this.rxjavaHandler = rxjavaHandler;
this.jaxRsHandlers = jaxRsModuleHandler;
SimpleModule nettyModule = new SimpleModule("Netty", PackageVersion.VERSION);
nettyModule.addSerializer(new ByteBufSerializer());
objectMapper.registerModule(nettyModule);
// TODO: allow customization of the thread pool!
// rxJavaGroup = new DefaultEventExecutorGroup(4, new DefaultThreadFactory("rxjava"));
jaxRsGroup = new DefaultEventExecutorGroup(Runtime.getRuntime().availableProcessors(),
new DefaultThreadFactory("jax-rs"));
}
示例7: NettyStarter
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
public NettyStarter() throws InterruptedException {
LOG.info("NettyHttpServer Initializing...");
bossGroup = new NioEventLoopGroup();
LOG.trace("NettyHttpServer bossGroup created.");
workerGroup = new NioEventLoopGroup();
LOG.trace("NettyHttpServer workGroup created.");
bServer = new ServerBootstrap();
LOG.trace("NettyHttpServer ServerBootstrap created.");
eventExecutor = new DefaultEventExecutorGroup(1);
LOG.trace("NettyHttpServer Task Executor created.");
DefaultServerInitializer sInit = new DefaultServerInitializer(eventExecutor);
LOG.trace("NettyHttpServer InitClass instance created.");
LOG.trace("NettyHttpServer InitClass instance Init().");
bServer.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class).childHandler(sInit)
.option(ChannelOption.SO_REUSEADDR, true);
LOG.trace("NettyHttpServer ServerBootstrap group initialized.");
bindChannel = bServer.bind(HTTP_BIND_PORT).sync().channel();
}
示例8: init
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
public void init(String path, final int heartBeatInterval, final NetHandler netHandler, String threadName) throws Throwable {
final Conf conf = new Conf(path);
this.netHandler = netHandler;
NioEventLoopGroup group = new NioEventLoopGroup(conf.getIothreadnum());
bootstrap.group(group).channel(NioSocketChannel.class).option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_SNDBUF, conf.getSendBuf()).option(ChannelOption.SO_RCVBUF, conf.getRecvBuf())
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new IdleStateHandler((int) (2 * heartBeatInterval / 1000), 0, 0));
if (conf.getWorkthreadNum() == 0) {
ch.pipeline().addLast(new BufDecoder(), new NettyHandler(netHandler));
} else {
ch.pipeline().addLast(new DefaultEventExecutorGroup(conf.getWorkthreadNum()), new BufDecoder(),
new NettyHandler(netHandler));
}
}
});
}
示例9: init
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
public void init(String confPath, final int heartBeatInterval, final NetHandler handler,String ip,int port, String threadName) throws FileNotFoundException, IOException {
final Conf conf = new Conf(confPath);
this.ip = ip;
this.port = port;
NioEventLoopGroup group = new NioEventLoopGroup(conf.getIothreadnum());
bootstrap.group(group).channel(NioServerSocketChannel.class).option(ChannelOption.TCP_NODELAY, true).option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.SO_SNDBUF, conf.getSendBuf()).option(ChannelOption.SO_RCVBUF, conf.getRecvBuf())
.option(ChannelOption.SO_BACKLOG, conf.getBacklog()).childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new IdleStateHandler((int) (2 * heartBeatInterval / 1000), 0, 0));
if (conf.getWorkthreadNum() == 0) {
ch.pipeline().addLast(new BufDecoder(), new NettyHandler(handler));
} else {
ch.pipeline().addLast(new DefaultEventExecutorGroup(conf.getWorkthreadNum()), new BufDecoder(),
new NettyHandler(handler));
}
}
});
}
示例10: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
public void start() throws Exception {
int bossThreads = conf.getInt("netty.threads.Boss");
bossLoop = eventLoop(bossThreads, "boss");
int workerThreads = conf.getInt("netty.threads.Worker");
if (workerThreads > 0) {
workerLoop = eventLoop(Math.max(4, workerThreads), "worker");
} else {
workerLoop = bossLoop;
}
ThreadFactory threadFactory = new DefaultThreadFactory(conf.getString("netty.threads.Name"));
this.executor = new DefaultEventExecutorGroup(conf.getInt("netty.threads.Max"), threadFactory);
this.ch = bootstrap(executor, null, conf.getInt("application.port"));
boolean securePort = conf.hasPath("application.securePort");
if (securePort) {
bootstrap(executor, NettySslContext.build(conf), conf.getInt("application.securePort"));
}
}
示例11: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
/**
* 启动netty服务
*/
@Override
public void start() {
SocketManager.getInstance().setMaxConnection(nettyConfig.getMaxConnection());
servletExecutor = new DefaultEventExecutorGroup(MAX_THREADS);
if (nettyConfig.getMaxThreads() != 0) {
MAX_THREADS = nettyConfig.getMaxThreads();
}
try {
final SerializeProtocolEnum serializeProtocolEnum =
SerializeProtocolEnum.acquireSerializeProtocol(nettyConfig.getSerialize());
nettyServerHandlerInitializer.setSerializeProtocolEnum(serializeProtocolEnum);
nettyServerHandlerInitializer.setServletExecutor(servletExecutor);
ServerBootstrap b = new ServerBootstrap();
groups(b, MAX_THREADS << 1);
/* bossGroup = new NioEventLoopGroup();
workerGroup = new NioEventLoopGroup(MAX_THREADS * 2);
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 100)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS, 1000)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childOption(ChannelOption.ALLOCATOR, PooledByteBufAllocator.DEFAULT)
.childHandler(nettyServerHandlerInitializer);*/
b.bind(nettyConfig.getPort());
LOGGER.info("netty service started on port: " + nettyConfig.getPort());
} catch (Exception e) {
e.printStackTrace();
}
}
示例12: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
public void start() {
this.defaultEventExecutorGroup = new DefaultEventExecutorGroup(nettyClientConfig.getClientWorkerThreads(),new ThreadFactory() {
private AtomicInteger threadIndex = new AtomicInteger(0);
@Override
public Thread newThread(Runnable r) {
return new Thread(r, "NettyClientWorkerThread_" + this.threadIndex.incrementAndGet());
}
});
this.bootstrap
.group(this.eventLoopGroupWorker)
.channel(NioSocketChannel.class)
.option(ChannelOption.TCP_NODELAY, true)
.option(ChannelOption.SO_KEEPALIVE, false)
.option(ChannelOption.CONNECT_TIMEOUT_MILLIS,nettyClientConfig.getConnectTimeoutMillis())
.option(ChannelOption.SO_SNDBUF,nettyClientConfig.getClientSocketSndBufSize())
.option(ChannelOption.SO_RCVBUF,nettyClientConfig.getClientSocketRcvBufSize())
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
// 编码
ch.pipeline().addLast(new NettyEncoder(serializeTypeCurrentRPC));
// 解码
ch.pipeline().addLast(new NettyDecoder(RemotingCommand.class,serializeTypeCurrentRPC));
// 心跳
ch.pipeline().addLast(new IdleStateHandler(0, 0, nettyClientConfig.getClientChannelMaxIdleTimeSeconds()));
// 业务处理
ch.pipeline().addLast(defaultEventExecutorGroup,new NettyConnectManageHandler(), new NettyClientHandler());
}
});
}
示例13: executor
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Default
default EventExecutorGroup executor() {
Logger logger = LoggerFactory.getLogger("imap-executor");
ThreadFactory threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setUncaughtExceptionHandler((t, e) -> logger.error("Uncaught exception on thread {}", t.getName(), e))
.setNameFormat("imap-executor-%d")
.build();
int nThreads = Runtime.getRuntime().availableProcessors() * 2;
return new DefaultEventExecutorGroup(nThreads, threadFactory);
}
示例14: start
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
public void start() throws IOException, InterruptedException, URISyntaxException {
// Server part
mBossGroup = new NioEventLoopGroup();
mWorkerGroup = new NioEventLoopGroup();
mExecutorGroup = new DefaultEventExecutorGroup(Integer.parseInt(mServer.getConfig().getProperty("netty.executor_threads", "512")));
ServerBootstrap b = new ServerBootstrap();
b.group(mBossGroup, mWorkerGroup).channel(NioServerSocketChannel.class).handler(new LoggingHandler(LogLevel.INFO)).childHandler(new ClientInitializer(mServer, mExecutorGroup));
int port = Integer.parseInt(mServer.getConfig().getProperty("server.port"));
mChannel = b.bind(port).sync().channel();
Server.LOGGER.info("Server started on port " + port + ".");
}
示例15: initChannel
import io.netty.util.concurrent.DefaultEventExecutorGroup; //导入依赖的package包/类
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
final EventExecutorGroup group = new DefaultEventExecutorGroup(Runtime.getRuntime().availableProcessors() + 1);
socketChannel.pipeline().addLast(new CommonEncoder());
socketChannel.pipeline().addLast(new CommonDecoder());
socketChannel.pipeline().addLast(group, new CommonServiceHandler(accessService));
}