本文整理匯總了Java中io.netty.channel.EventLoopGroup類的典型用法代碼示例。如果您正苦於以下問題:Java EventLoopGroup類的具體用法?Java EventLoopGroup怎麽用?Java EventLoopGroup使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。
EventLoopGroup類屬於io.netty.channel包,在下文中一共展示了EventLoopGroup類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: Server
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
private Server(
AddressResolver addressResolver,
EventLoopGroup eventLoopGroup,
Class<? extends ServerChannel> channelClass,
boolean customEventLoop,
Timer timer,
boolean customTimer,
long bindTimeoutInNanos,
StubStore stubStore,
boolean activityLogging) {
this(
addressResolver,
eventLoopGroup,
customEventLoop,
timer,
customTimer,
bindTimeoutInNanos,
stubStore,
activityLogging,
new ServerBootstrap()
.group(eventLoopGroup)
.channel(channelClass)
.childHandler(new Initializer()));
}
示例2: addLocalEndpoint
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
/**
* Adds a channel that listens locally
*/
public SocketAddress addLocalEndpoint()
{
ChannelFuture channelfuture;
synchronized (this.endpoints)
{
channelfuture = ((ServerBootstrap)((ServerBootstrap)(new ServerBootstrap()).channel(LocalServerChannel.class)).childHandler(new ChannelInitializer<Channel>()
{
protected void initChannel(Channel p_initChannel_1_) throws Exception
{
NetworkManager networkmanager = new NetworkManager(EnumPacketDirection.SERVERBOUND);
networkmanager.setNetHandler(new NetHandlerHandshakeMemory(NetworkSystem.this.mcServer, networkmanager));
NetworkSystem.this.networkManagers.add(networkmanager);
p_initChannel_1_.pipeline().addLast((String)"packet_handler", (ChannelHandler)networkmanager);
}
}).group((EventLoopGroup)eventLoops.getValue()).localAddress(LocalAddress.ANY)).bind().syncUninterruptibly();
this.endpoints.add(channelfuture);
}
return channelfuture.channel().localAddress();
}
示例3: createEventLoopGroup
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
private static Pair<EventLoopGroup, Class<? extends Channel>> createEventLoopGroup(
Configuration conf) {
// Max amount of threads to use. 0 lets Netty decide based on amount of cores
int maxThreads = conf.getInt(CLIENT_MAX_THREADS, 0);
// Config to enable native transport. Does not seem to be stable at time of implementation
// although it is not extensively tested.
boolean epollEnabled = conf.getBoolean(USE_NATIVE_TRANSPORT, false);
// Use the faster native epoll transport mechanism on linux if enabled
if (epollEnabled && JVM.isLinux() && JVM.isAmd64()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Create EpollEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new EpollEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), EpollSocketChannel.class);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Create NioEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new NioEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), NioSocketChannel.class);
}
}
示例4: start
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
public void start(String ip, int port) throws Exception {
// Configure SSL.
final SslContext sslCtx;
if (SSL) {
sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class).handler(new FileClientInitializer(sslCtx));
Channel ch = b.connect(ip, port).sync().channel();
ConfigurationContext.propMap.putIfAbsent(SOCKET_CHANNEL, ch);
}catch(Exception e){
e.printStackTrace();
}
}
示例5: bind
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
/**
*@description 監聽指定端口
*@time 創建時間:2017年7月21日下午3:50:26
*@param port
*@throws InterruptedException
*@author dzn
*/
public void bind(int port) throws InterruptedException{
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workGroup = new NioEventLoopGroup();
try{
ServerBootstrap server = new ServerBootstrap();
server.group(bossGroup, workGroup)
.channel(NioServerSocketChannel.class)
.option(ChannelOption.SO_BACKLOG, 1024)
.childHandler(new ChildChannelHandler());
ChannelFuture cf = server.bind(port).sync();
System.out.println("服務器已啟動, 監控端口號為 : " + port);
cf.channel().closeFuture().sync();
}finally{
bossGroup.shutdownGracefully();
workGroup.shutdownGracefully();
}
}
示例6: RedisClient
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
@Deprecated
public RedisClient(final Timer timer, ExecutorService executor, EventLoopGroup group, Class<? extends SocketChannel> socketChannelClass, String host, int port,
int connectTimeout, int commandTimeout) {
RedisClientConfig config = new RedisClientConfig();
config.setTimer(timer).setExecutor(executor).setGroup(group).setSocketChannelClass(socketChannelClass)
.setAddress(host, port).setConnectTimeout(connectTimeout).setCommandTimeout(commandTimeout);
this.config = config;
this.executor = config.getExecutor();
this.timer = config.getTimer();
addr = new InetSocketAddress(config.getAddress().getHost(), config.getAddress().getPort());
channels = new DefaultChannelGroup(config.getGroup().next());
bootstrap = createBootstrap(config, Type.PLAIN);
pubSubBootstrap = createBootstrap(config, Type.PUBSUB);
this.commandTimeout = config.getCommandTimeout();
}
示例7: run
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
/**
* Start the server
*
* @param port The port on which the server listen to
*/
public void run(final int port) {
final EventLoopGroup bossGroup = new NioEventLoopGroup();
final EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
final ServerBootstrap bootstrap = new ServerBootstrap()
.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ServerInitializer())
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.TCP_NODELAY, true);
final ChannelFuture f = bootstrap.bind(port).sync();
LOGGER.info("NettyServer: running on port {}", port);
f.channel().closeFuture().sync();
} catch (final InterruptedException e) {
LOGGER.error("NettyServer: an error occurred while running: {}", e.getMessage());
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
示例8: run
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
public void run() {
try {
// Configure the server.
EventLoopGroup group = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.option(ChannelOption.SO_BACKLOG, 1024);
b.group(group)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.childHandler(new Http2ServerInitializer(mSslCtx));
sServerChannel = b.bind(PORT).sync().channel();
Log.i(TAG, "Netty HTTP/2 server started on " + getServerUrl());
sBlock.open();
sServerChannel.closeFuture().sync();
} finally {
group.shutdownGracefully();
}
Log.i(TAG, "Stopped Http2TestServerRunnable!");
} catch (Exception e) {
Log.e(TAG, e.toString());
}
}
示例9: start
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
public void start() throws Exception {
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group)
.channel(NioSocketChannel.class)
.remoteAddress(new InetSocketAddress(this.host, this.port))
.handler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel ch) throws Exception {
System.out.println("connected server...");
ch.pipeline().addLast(new ByteArrayEncoder());
ch.pipeline().addLast(new ByteArrayDecoder());
ch.pipeline().addLast(new EchoClientHandler());
}
});
ChannelFuture cf = b.connect().sync();
cf.channel().closeFuture().sync();
} finally {
group.shutdownGracefully().sync();
}
}
示例10: customEventLoopGroup_NotClosedWhenClientIsClosed
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
@Test
public void customEventLoopGroup_NotClosedWhenClientIsClosed() throws Exception {
ThreadFactory threadFactory = spy(new CustomThreadFactory());
// Cannot use DefaultEventLoopGroupFactory because the concrete
// implementation it creates is platform-dependent and could be a final
// (i.e. non-spyable) class.
EventLoopGroup eventLoopGroup = spy(new NioEventLoopGroup(0, threadFactory));
EventLoopGroupConfiguration eventLoopGroupConfiguration =
EventLoopGroupConfiguration.builder()
.eventLoopGroup(eventLoopGroup)
.build();
SdkAsyncHttpClient customClient =
NettySdkHttpClientFactory.builder()
.trustAllCertificates(true)
.eventLoopGroupConfiguration(eventLoopGroupConfiguration)
.build()
.createHttpClient();
makeSimpleRequest(customClient);
customClient.close();
Mockito.verify(threadFactory, atLeastOnce()).newThread(Mockito.any());
Mockito.verify(eventLoopGroup, never()).shutdownGracefully();
}
示例11: FabricClient
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
public FabricClient(
RpcConfig config,
EventLoopGroup eventLoop,
BufferAllocator allocator,
FabricIdentity remoteIdentity,
FabricIdentity localIdentity,
FabricMessageHandler handler,
FabricConnectionManager.CloseHandlerCreator closeHandlerFactory) {
super(
config,
allocator.getAsByteBufAllocator(),
eventLoop,
RpcType.HANDSHAKE,
FabricHandshake.class,
FabricHandshake.PARSER);
this.localIdentity = localIdentity;
this.remoteIdentity = remoteIdentity;
this.handler = handler;
this.closeHandlerFactory = closeHandlerFactory;
this.allocator = allocator;
}
示例12: resolveSocketChannelClass
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
/**
* Attempts to determine the {@link Channel} class that corresponds to the given
* event loop group.
*
* @param eventLoopGroup the event loop group to determine the {@link Channel} for
* @return A {@link Channel} class for the given event loop group.
*/
public static Class<? extends Channel> resolveSocketChannelClass(EventLoopGroup eventLoopGroup) {
if (eventLoopGroup instanceof DelegatingEventLoopGroup) {
return resolveSocketChannelClass(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate());
}
if (eventLoopGroup instanceof NioEventLoopGroup) {
return NioSocketChannel.class;
}
if (eventLoopGroup instanceof EpollEventLoopGroup) {
return EpollSocketChannel.class;
}
String socketFqcn = KNOWN_EL_GROUPS.get(eventLoopGroup.getClass().getName());
if (socketFqcn == null) {
throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass());
}
return invokeSafely(() -> (Class<? extends Channel>) Class.forName(socketFqcn));
}
示例13: startUDPServer
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
public void startUDPServer() throws InterruptedException {
logger.info("Discovery UDPListener started");
EventLoopGroup group = new NioEventLoopGroup(1);
while (!shutdown) {
Bootstrap bootstrap = this.createBootstrap(group);
channel = bootstrap.bind(address, port).sync().channel();
channel.closeFuture().sync();
logger.warn("UDP channel closed. Recreating after 5 sec pause...");
TimeUnit.SECONDS.sleep(5);
}
group.shutdownGracefully().sync();
}
示例14: start
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
@PostConstruct
public void start() {
new Thread(() -> {
logger.info("HttpProxyServer started on port: {}", port);
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap b = new ServerBootstrap();
b.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.DEBUG))
.childHandler(channelInitializer)
.bind(port).sync().channel().closeFuture().sync();
} catch (InterruptedException e) {
logger.error("shit happens", e);
} finally {
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}).start();
}
示例15: UserRPCServer
import io.netty.channel.EventLoopGroup; //導入依賴的package包/類
UserRPCServer(
BootStrapContext context,
Provider<SabotContext> dbContext,
Provider<UserWorker> worker,
BufferAllocator allocator,
EventLoopGroup eventLoopGroup,
InboundImpersonationManager impersonationManager
) {
super(UserRpcConfig.getMapping(context.getConfig(), context.getExecutor()),
allocator.getAsByteBufAllocator(),
eventLoopGroup);
this.worker = worker;
this.dbContext = dbContext;
this.allocator = allocator;
this.impersonationManager = impersonationManager;
}