本文整理汇总了Java中io.netty.channel.epoll.EpollSocketChannel类的典型用法代码示例。如果您正苦于以下问题:Java EpollSocketChannel类的具体用法?Java EpollSocketChannel怎么用?Java EpollSocketChannel使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
EpollSocketChannel类属于io.netty.channel.epoll包,在下文中一共展示了EpollSocketChannel类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: connectPlugin
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
public void connectPlugin(String host, int port) {
ManagedChannel channel = NettyChannelBuilder.forAddress(host, port)
.negotiationType(NegotiationType.PLAINTEXT) // TODO: gRPC encryption
.keepAliveTime(1, TimeUnit.MINUTES)
.keepAliveTimeout(5, TimeUnit.SECONDS)
.directExecutor()
.channelType(EpollSocketChannel.class)
.eventLoopGroup(new EpollEventLoopGroup())
.build();
PluginManagerGrpc.PluginManagerBlockingStub blocking = PluginManagerGrpc.newBlockingStub(channel);
PluginManagerGrpc.PluginManagerStub async = PluginManagerGrpc.newStub(channel);
ServiceConnection connection = ServiceConnection.builder()
.channel(channel)
.blockingStub(blocking)
.asyncStub(async)
.build();
this.pluginConnections.put(PLUGIN_MANAGER, connection);
}
示例2: start
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
public void start() throws InterruptedException {
final EventLoopGroup workerGroup = Epoll.isAvailable() ? new EpollEventLoopGroup() : new NioEventLoopGroup();
try {
Bootstrap bootstrap = new Bootstrap();
bootstrap.group(workerGroup)
.channel(Epoll.isAvailable() ? EpollSocketChannel.class : NioSocketChannel.class)
.handler(new OpenCloudChannelInitializer(this))
.connect(this.host, this.port).sync().channel().closeFuture().syncUninterruptibly();
} catch (Exception ex) {
if (ex.getClass().getSimpleName().equals("AnnotatedConnectException")) {
System.err.println("Cannot connect to master!");
channel.close();
} else {
ex.printStackTrace();
}
} finally {
workerGroup.shutdownGracefully();
System.out.println("Netty client stopped");
Runtime.getRuntime().halt(0);
}
}
示例3: resolveSocketChannelClass
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
/**
* Attempts to determine the {@link Channel} class that corresponds to the given
* event loop group.
*
* @param eventLoopGroup the event loop group to determine the {@link Channel} for
* @return A {@link Channel} class for the given event loop group.
*/
public static Class<? extends Channel> resolveSocketChannelClass(EventLoopGroup eventLoopGroup) {
if (eventLoopGroup instanceof DelegatingEventLoopGroup) {
return resolveSocketChannelClass(((DelegatingEventLoopGroup) eventLoopGroup).getDelegate());
}
if (eventLoopGroup instanceof NioEventLoopGroup) {
return NioSocketChannel.class;
}
if (eventLoopGroup instanceof EpollEventLoopGroup) {
return EpollSocketChannel.class;
}
String socketFqcn = KNOWN_EL_GROUPS.get(eventLoopGroup.getClass().getName());
if (socketFqcn == null) {
throw new IllegalArgumentException("Unknown event loop group : " + eventLoopGroup.getClass());
}
return invokeSafely(() -> (Class<? extends Channel>) Class.forName(socketFqcn));
}
示例4: createEventLoopGroup
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
private static Pair<EventLoopGroup, Class<? extends Channel>> createEventLoopGroup(
Configuration conf) {
// Max amount of threads to use. 0 lets Netty decide based on amount of cores
int maxThreads = conf.getInt(CLIENT_MAX_THREADS, 0);
// Config to enable native transport. Does not seem to be stable at time of implementation
// although it is not extensively tested.
boolean epollEnabled = conf.getBoolean(USE_NATIVE_TRANSPORT, false);
// Use the faster native epoll transport mechanism on linux if enabled
if (epollEnabled && JVM.isLinux() && JVM.isAmd64()) {
if (LOG.isDebugEnabled()) {
LOG.debug("Create EpollEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new EpollEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), EpollSocketChannel.class);
} else {
if (LOG.isDebugEnabled()) {
LOG.debug("Create NioEventLoopGroup with maxThreads = " + maxThreads);
}
return new Pair<EventLoopGroup, Class<? extends Channel>>(new NioEventLoopGroup(maxThreads,
Threads.newDaemonThreadFactory("AsyncRpcChannel")), NioSocketChannel.class);
}
}
示例5: init
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
@Override
public void init(final InetAddress address, final int port, final boolean useEpoll)
{
final Class<? extends SocketChannel> socketChannelClass;
final LazyValue<? extends EventLoopGroup> lazyInit;
if ((Epoll.isAvailable()) && useEpoll)
{
socketChannelClass = EpollSocketChannel.class;
lazyInit = this.epollEventLoopGroupLazyValue;
CoreMain.debug("[Netty] Using epoll channel type");
}
else
{
socketChannelClass = NioSocketChannel.class;
lazyInit = this.nioEventLoopGroupLazyValue;
CoreMain.debug("[Netty] Using default channel type");
}
this.channelFuture = new Bootstrap().channel(socketChannelClass).handler(new ClientConnectionChannel(this)).group(lazyInit.get()).remoteAddress(address, port).connect().syncUninterruptibly();
}
示例6: connect
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
public void connect(String apiKey) {
Bootstrap bootstrap = new Bootstrap();
Class<? extends Channel> channelClazz;
if (Epoll.isAvailable()) {
channelClazz = EpollSocketChannel.class;
eventLoopGroup = new EpollEventLoopGroup();
} else {
channelClazz = NioSocketChannel.class;
eventLoopGroup = new NioEventLoopGroup();
}
bootstrap.group(eventLoopGroup)
.channel(channelClazz)
.option(ChannelOption.SO_KEEPALIVE, true)
// TODO: add function to get data class by topic and add handler
.remoteAddress(host, port)
.connect();
}
示例7: initEventLoopGroup
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
private void initEventLoopGroup() {
// try Epoll first and if that does work, use nio.
try {
clientGroup = new EpollEventLoopGroup();
serverGroup = new EpollEventLoopGroup();
serverChannelClass = EpollServerSocketChannel.class;
clientChannelClass = EpollSocketChannel.class;
return;
} catch (Throwable t) {
log.warn("Failed to initialize native (epoll) transport. Reason: {}. Proceeding with nio.", t.getMessage());
}
clientGroup = new NioEventLoopGroup();
serverGroup = new NioEventLoopGroup();
serverChannelClass = NioServerSocketChannel.class;
clientChannelClass = NioSocketChannel.class;
}
示例8: initEventLoopGroup
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
private void initEventLoopGroup() {
// try Epoll first and if that does work, use nio.
try {
clientGroup = new EpollEventLoopGroup(0, namedThreads("netty-messaging-event-epoll-client-%d", log));
serverGroup = new EpollEventLoopGroup(0, namedThreads("netty-messaging-event-epoll-server-%d", log));
serverChannelClass = EpollServerSocketChannel.class;
clientChannelClass = EpollSocketChannel.class;
return;
} catch (Throwable e) {
log.debug("Failed to initialize native (epoll) transport. "
+ "Reason: {}. Proceeding with nio.", e.getMessage());
}
clientGroup = new NioEventLoopGroup(0, namedThreads("netty-messaging-event-nio-client-%d", log));
serverGroup = new NioEventLoopGroup(0, namedThreads("netty-messaging-event-nio-server-%d", log));
serverChannelClass = NioServerSocketChannel.class;
clientChannelClass = NioSocketChannel.class;
}
示例9: clientConfig
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
public static ChannelConfiguration clientConfig(EventLoopGroup workerGroup) {
EventLoopGroup parent = workerGroup;
if (parent instanceof EventLoop) {
parent = ((EventLoop) workerGroup).parent();
}
Class<? extends Channel> channelClass;
if (parent instanceof EpollEventLoopGroup) {
channelClass = EpollSocketChannel.class;
} else if (parent instanceof NioEventLoopGroup) {
channelClass = NioSocketChannel.class;
} else {
throw new RuntimeException("Unsupported EventLoopGroup " + workerGroup.getClass());
}
return new ChannelConfiguration(workerGroup, channelClass);
}
示例10: clientConfig
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
/**
* This method will configure a worker EventLoopGroup and a Channel for use by a client. It will
* try to use the correct SocketChannel for the provided workerGroup.
*
* @param workerGroup uses EventLoopGroup in the ClientChannelConfiguration
* @return ClientChannelConfiguration
*/
public static ClientChannelConfiguration clientConfig(EventLoopGroup workerGroup) {
EventLoopGroup parent = workerGroup;
if (parent instanceof EventLoop) {
parent = ((EventLoop) workerGroup).parent();
}
Class<? extends Channel> channelClass;
if (parent instanceof EpollEventLoopGroup) {
channelClass = EpollSocketChannel.class;
} else if (parent instanceof NioEventLoopGroup) {
channelClass = NioSocketChannel.class;
} else {
throw new RuntimeException("Unsupported EventLoopGroup " + workerGroup.getClass());
}
return new ClientChannelConfiguration(workerGroup, channelClass);
}
示例11: initEventLoopGroup
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
private void initEventLoopGroup() {
// try Epoll first and if that does work, use nio.
try {
clientGroup = new EpollEventLoopGroup(0, groupedThreads("NettyMessagingEvt", "epollC-%d", log));
serverGroup = new EpollEventLoopGroup(0, groupedThreads("NettyMessagingEvt", "epollS-%d", log));
serverChannelClass = EpollServerSocketChannel.class;
clientChannelClass = EpollSocketChannel.class;
return;
} catch (Throwable e) {
log.debug("Failed to initialize native (epoll) transport. "
+ "Reason: {}. Proceeding with nio.", e.getMessage());
}
clientGroup = new NioEventLoopGroup(0, groupedThreads("NettyMessagingEvt", "nioC-%d", log));
serverGroup = new NioEventLoopGroup(0, groupedThreads("NettyMessagingEvt", "nioS-%d", log));
serverChannelClass = NioServerSocketChannel.class;
clientChannelClass = NioSocketChannel.class;
}
示例12: getClientChannelClass
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
/** Returns the correct (client) SocketChannel class based on IOMode. */
public static Class<? extends Channel> getClientChannelClass(IOMode mode) {
switch (mode) {
case NIO:
return NioSocketChannel.class;
case EPOLL:
return EpollSocketChannel.class;
default:
throw new IllegalArgumentException("Unknown io mode: " + mode);
}
}
示例13: addConnection
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
@SuppressWarnings("unchecked")
@Override
public void addConnection(ServiceType serviceType, String host, int port) {
ManagedChannel channel = NettyChannelBuilder.forAddress(host, port)
.negotiationType(NegotiationType.PLAINTEXT) // TODO: gRPC encryption
.keepAliveTime(1, TimeUnit.MINUTES)
.keepAliveTimeout(5, TimeUnit.SECONDS)
.directExecutor()
.channelType(EpollSocketChannel.class)
.eventLoopGroup(new EpollEventLoopGroup())
.build();
AbstractStub blocking;
AbstractStub async;
switch (serviceType) {
case WORLD: {
blocking = WorldServiceGrpc.newBlockingStub(channel);
async = WorldServiceGrpc.newStub(channel);
break;
}
case PLUGIN_MANAGER: {
blocking = PluginManagerGrpc.newBlockingStub(channel);
async = PluginManagerGrpc.newStub(channel);
break;
}
default: {
throw new RuntimeException("Service type not handled: " + serviceType.name());
}
}
ServiceConnection connection = ServiceConnection.builder()
.channel(channel)
.blockingStub(blocking)
.asyncStub(async)
.build();
this.connections.put(serviceType, connection);
}
示例14: func_181124_a
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
public static NetworkManager func_181124_a(InetAddress p_181124_0_, int p_181124_1_, boolean p_181124_2_)
{
final NetworkManager networkmanager = new NetworkManager(EnumPacketDirection.CLIENTBOUND);
Class <? extends SocketChannel > oclass;
LazyLoadBase <? extends EventLoopGroup > lazyloadbase;
if (Epoll.isAvailable() && p_181124_2_)
{
oclass = EpollSocketChannel.class;
lazyloadbase = field_181125_e;
}
else
{
oclass = NioSocketChannel.class;
lazyloadbase = CLIENT_NIO_EVENTLOOP;
}
((Bootstrap)((Bootstrap)((Bootstrap)(new Bootstrap()).group((EventLoopGroup)lazyloadbase.getValue())).handler(new ChannelInitializer<Channel>()
{
protected void initChannel(Channel p_initChannel_1_) throws Exception
{
try
{
p_initChannel_1_.config().setOption(ChannelOption.TCP_NODELAY, Boolean.valueOf(true));
}
catch (ChannelException var3)
{
;
}
p_initChannel_1_.pipeline().addLast((String)"timeout", (ChannelHandler)(new ReadTimeoutHandler(30))).addLast((String)"splitter", (ChannelHandler)(new MessageDeserializer2())).addLast((String)"decoder", (ChannelHandler)(new MessageDeserializer(EnumPacketDirection.CLIENTBOUND))).addLast((String)"prepender", (ChannelHandler)(new MessageSerializer2())).addLast((String)"encoder", (ChannelHandler)(new MessageSerializer(EnumPacketDirection.SERVERBOUND))).addLast((String)"packet_handler", (ChannelHandler)networkmanager);
}
})).channel(oclass)).connect(p_181124_0_, p_181124_1_).syncUninterruptibly();
return networkmanager;
}
示例15: setUp
import io.netty.channel.epoll.EpollSocketChannel; //导入依赖的package包/类
@Before
public void setUp()
throws Exception {
serverMock = new EpollConnDroppingServer(DEFAULT_PORT, FAIL_EVERY_CONN_ATTEMPT);
final Semaphore concurrencyThrottle = new Semaphore(CONCURRENCY);
group = new EpollEventLoopGroup();
final Bootstrap bootstrap = new Bootstrap()
.group(group)
.channel(EpollSocketChannel.class)
.handler(
new ChannelInitializer<SocketChannel>() {
@Override
protected final void initChannel(final SocketChannel conn)
throws Exception {
conn.pipeline().addLast(new DummyClientChannelHandler());
}
}
)
.option(ChannelOption.SO_KEEPALIVE, true)
.option(ChannelOption.SO_REUSEADDR, true)
.option(ChannelOption.TCP_NODELAY, true);
connPool = new BasicMultiNodeConnPool(
concurrencyThrottle, NODES, bootstrap, CPH, DEFAULT_PORT, 0
);
connPool.preCreateConnections(CONCURRENCY);
}