本文整理汇总了Java中io.netty.channel.EventLoopGroup类的典型用法代码示例。如果您正苦于以下问题:Java EventLoopGroup类的具体用法?Java EventLoopGroup怎么用?Java EventLoopGroup使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
EventLoopGroup类属于io.netty.channel包,在下文中一共展示了EventLoopGroup类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: Server
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
private Server(
AddressResolver addressResolver,
EventLoopGroup eventLoopGroup,
Class<? extends ServerChannel> channelClass,
boolean customEventLoop,
Timer timer,
boolean customTimer,
long bindTimeoutInNanos,
StubStore stubStore,
boolean activityLogging) {
this(
addressResolver,
eventLoopGroup,
customEventLoop,
timer,
customTimer,
bindTimeoutInNanos,
stubStore,
activityLogging,
new ServerBootstrap()
.group(eventLoopGroup)
.channel(channelClass)
.childHandler(new Initializer()));
}
示例2: addLocalEndpoint
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
/**
* Adds a channel that listens locally
*/
public SocketAddress addLocalEndpoint()
{
ChannelFuture channelfuture;
synchronized (this.endpoints)
{
channelfuture = ((ServerBootstrap)((ServerBootstrap)(new ServerBootstrap()).channel(LocalServerChannel.class)).childHandler(new ChannelInitializer<Channel>()
{
protected void initChannel(Channel p_initChannel_1_) throws Exception
{
NetworkManager networkmanager = new NetworkManager(EnumPacketDirection.SERVERBOUND);
networkmanager.setNetHandler(new NetHandlerHandshakeMemory(NetworkSystem.this.mcServer, networkmanager));
NetworkSystem.this.networkManagers.add(networkmanager);
p_initChannel_1_.pipeline().addLast((String)"packet_handler", (ChannelHandler)networkmanager);
}
}).group((EventLoopGroup)eventLoops.getValue()).localAddress(LocalAddress.ANY)).bind().syncUninterruptibly();
this.endpoints.add(channelfuture);
}
return channelfuture.channel().localAddress();
}
示例3: createEventLoopGroup
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
private static Pair<EventLoopGroup, Class<? extends Channel>> createEventLoopGroup(
Configuration conf) {
// Max amount of threads to use. 0 lets Netty decide based on amount of cores
int maxThreads = conf.getInt(CLIENT_MAX_THREADS, 0);
// Config to enable native transport. Does not seem to be stable at time of implementation
// although it is not extensively tested.
boolean epollEnabled = conf.getBoolean(USE_NATIVE_TRANSPORT, false);
// Use the faster native epoll transport mechanism on linux if enabled
if (epollEnabled && JVM.isLinux() && JVM.isAmd64()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Create EpollEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new EpollEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), EpollSocketChannel.class);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Create NioEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new NioEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), NioSocketChannel.class);
}
}
示例4: start
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
public void start(String ip, int port) throws Exception {
// Configure SSL.
final SslContext sslCtx;
if (SSL) {
sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class).handler(new FileClientInitializer(sslCtx));
Channel ch = b.connect(ip, port).sync().channel();
ConfigurationContext.propMap.putIfAbsent(SOCKET_CHANNEL, ch);
}catch(Exception e){
e.printStackTrace();
}
}
示例5: bind
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
/**
*@description 监听指定端口
*@time 创建时间:2017年7月21日下午3:50:26
*@param port
*@throws InterruptedException
*@author dzn
*/
public void bind(int port) throws InterruptedException{
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workGroup = new NioEventLoopGroup();
try{
ServerBootstrap server = new ServerBootstrap();
server.group(bossGroup, workGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 1024)
.childHandler(new ChildChannelHandler());
ChannelFuture cf = server.bind(port).sync();
System.out.println("服务器已启动, 监控端口号为 : " + port);
cf.channel().closeFuture().sync();
}finally{
bossGroup.shutdownGracefully();
workGroup.shutdownGracefully();
}
}
示例6: RedisClient
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
@Deprecated
public RedisClient(final Timer timer, ExecutorService executor, EventLoopGroup group, Class<? extends SocketChannel> socketChannelClass, String host, int port,
int connectTimeout, int commandTimeout) {
RedisClientConfig config = new RedisClientConfig();
config.setTimer(timer).setExecutor(executor).setGroup(group).setSocketChannelClass(socketChannelClass)
.setAddress(host, port).setConnectTimeout(connectTimeout).setCommandTimeout(commandTimeout);
this.config = config;
this.executor = config.getExecutor();
this.timer = config.getTimer();
addr = new InetSocketAddress(config.getAddress().getHost(), config.getAddress().getPort());
channels = new DefaultChannelGroup(config.getGroup().next());
bootstrap = createBootstrap(config, Type.PLAIN);
pubSubBootstrap = createBootstrap(config, Type.PUBSUB);
this.commandTimeout = config.getCommandTimeout();
}
示例7: run
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
/**
* Start the server
*
* @param port The port on which the server listen to
*/
public void run(final int port) {
final EventLoopGroup bossGroup = new NioEventLoopGroup();
final EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
final ServerBootstrap bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ServerInitializer())
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.TCP_NODELAY, true);
final ChannelFuture f = bootstrap.bind(port).sync();
LOGGER.info("NettyServer: running on port {}", port);
f.channel().closeFuture().sync();
} catch (final InterruptedException e) {
LOGGER.error("NettyServer: an error occurred while running: {}", e.getMessage());
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
示例8: run
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
public void run() {
try {
// Configure the server.
EventLoopGroup group = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(group)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new Http2ServerInitializer(mSslCtx));
sServerChannel = b.bind(PORT).sync().channel();
Log.i(TAG, "Netty HTTP/2 server started on " + getServerUrl());
sBlock.open();
sServerChannel.closeFuture().sync();
} finally {
group.shutdownGracefully();
}
Log.i(TAG, "Stopped Http2TestServerRunnable!");
} catch (Exception e) {
Log.e(TAG, e.toString());
}
}
示例9: start
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
public void start() throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioSocketChannel.class)
.remoteAddress(new InetSocketAddress(this.host, this.port))
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
System.out.println("connected server...");
ch.pipeline().addLast(new ByteArrayEncoder());
ch.pipeline().addLast(new ByteArrayDecoder());
ch.pipeline().addLast(new EchoClientHandler());
}
});
ChannelFuture cf = b.connect().sync();
cf.channel().closeFuture().sync();
} finally {
group.shutdownGracefully().sync();
}
}
示例10: customEventLoopGroup_NotClosedWhenClientIsClosed
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
@Test
public void customEventLoopGroup_NotClosedWhenClientIsClosed() throws Exception {
ThreadFactory threadFactory = spy(new CustomThreadFactory());
// Cannot use DefaultEventLoopGroupFactory because the concrete
// implementation it creates is platform-dependent and could be a final
// (i.e. non-spyable) class.
EventLoopGroup eventLoopGroup = spy(new NioEventLoopGroup(0, threadFactory));
EventLoopGroupConfiguration eventLoopGroupConfiguration =
EventLoopGroupConfiguration.builder()
.eventLoopGroup(eventLoopGroup)
.build();
SdkAsyncHttpClient customClient =
NettySdkHttpClientFactory.builder()
.trustAllCertificates(true)
.eventLoopGroupConfiguration(eventLoopGroupConfiguration)
.build()
.createHttpClient();
makeSimpleRequest(customClient);
customClient.close();
Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any());
Mockito.verify(eventLoopGroup, never()).shutdownGracefully();
}
示例11: FabricClient
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
public FabricClient(
RpcConfig config,
EventLoopGroup eventLoop,
BufferAllocator allocator,
FabricIdentity remoteIdentity,
FabricIdentity localIdentity,
FabricMessageHandler handler,
FabricConnectionManager.CloseHandlerCreator closeHandlerFactory) {
super(
config,
allocator.getAsByteBufAllocator(),
eventLoop,
RpcType.HANDSHAKE,
FabricHandshake.class,
FabricHandshake.PARSER);
this.localIdentity = localIdentity;
this.remoteIdentity = remoteIdentity;
this.handler = handler;
this.closeHandlerFactory = closeHandlerFactory;
this.allocator = allocator;
}
示例12: resolveSocketChannelClass
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
/**
* Attempts to determine the {@link Channel} class that corresponds to the given
* event loop group.
*
* @param eventLoopGroup the event loop group to determine the {@link Channel} for
* @return A {@link Channel} class for the given event loop group.
*/
public static Class<? extends Channel> resolveSocketChannelClass(EventLoopGroup eventLoopGroup) {
if (eventLoopGroup instanceof DelegatingEventLoopGroup) {
return resolveSocketChannelClass(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate());
}
if (eventLoopGroup instanceof NioEventLoopGroup) {
return NioSocketChannel.class;
}
if (eventLoopGroup instanceof EpollEventLoopGroup) {
return EpollSocketChannel.class;
}
String socketFqcn = KNOWN_EL_GROUPS.get(eventLoopGroup.getClass().getName());
if (socketFqcn == null) {
throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass());
}
return invokeSafely(() -> (Class<? extends Channel>) Class.forName(socketFqcn));
}
示例13: startUDPServer
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
public void startUDPServer() throws InterruptedException {
logger.info("Discovery UDPListener started");
EventLoopGroup group = new NioEventLoopGroup(1);
while (!shutdown) {
Bootstrap bootstrap = this.createBootstrap(group);
channel = bootstrap.bind(address, port).sync().channel();
channel.closeFuture().sync();
logger.warn("UDP channel closed. Recreating after 5 sec pause...");
TimeUnit.SECONDS.sleep(5);
}
group.shutdownGracefully().sync();
}
示例14: start
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
@PostConstruct
public void start() {
new Thread(() -> {
logger.info("HttpProxyServer started on port: {}", port);
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.DEBUG))
.childHandler(channelInitializer)
.bind(port).sync().channel().closeFuture().sync();
} catch (InterruptedException e) {
logger.error("shit happens", e);
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}).start();
}
示例15: UserRPCServer
import io.netty.channel.EventLoopGroup; //导入依赖的package包/类
UserRPCServer(
BootStrapContext context,
Provider<SabotContext> dbContext,
Provider<UserWorker> worker,
BufferAllocator allocator,
EventLoopGroup eventLoopGroup,
InboundImpersonationManager impersonationManager
) {
super(UserRpcConfig.getMapping(context.getConfig(), context.getExecutor()),
allocator.getAsByteBufAllocator(),
eventLoopGroup);
this.worker = worker;
this.dbContext = dbContext;
this.allocator = allocator;
this.impersonationManager = impersonationManager;
}