本文整理汇总了Java中io.netty.util.internal.logging.Slf4JLoggerFactory类的典型用法代码示例。如果您正苦于以下问题:Java Slf4JLoggerFactory类的具体用法?Java Slf4JLoggerFactory怎么用?Java Slf4JLoggerFactory使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
Slf4JLoggerFactory类属于io.netty.util.internal.logging包,在下文中一共展示了Slf4JLoggerFactory类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: init
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Init timeouts and the connection registry and start the netty IO server synchronously
*/
@Override
public void init(Container container) {
super.init(container);
try {
// Configure netty
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory() {
@Override
public InternalLogger newInstance(String name) {
return new NettyInternalLogger(name);
}
});
ResourceLeakDetector.setLevel(CoreConstants.NettyConstants.RESOURCE_LEAK_DETECTION);
// Start server
startServer();
} catch (InterruptedException e) {
throw new StartupException("Could not start netty server", e);
}
}
示例2: init
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Configure netty and initialize related Components.
* Afterwards call {@link #initClient()} method to start the netty IO client asynchronously.
*/
@Override
public void init(Container container) {
super.init(container);
// Configure netty
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory() {
@Override
public InternalLogger newInstance(String name) {
return new NettyInternalLogger(name);
}
});
ResourceLeakDetector.setLevel(CoreConstants.NettyConstants.RESOURCE_LEAK_DETECTION);
// And try to connect
isActive = true;
initClient();
// register BroadcastListener
IntentFilter filter = new IntentFilter();
filter.addAction(WifiManager.SUPPLICANT_CONNECTION_CHANGE_ACTION);
filter.addAction(WifiManager.NETWORK_STATE_CHANGED_ACTION);
filter.addAction(WifiManager.WIFI_STATE_CHANGED_ACTION);
filter.addAction(ConnectivityManager.CONNECTIVITY_ACTION);
requireComponent(ContainerService.KEY_CONTEXT).registerReceiver(broadcastReceiver, filter);
}
示例3: main
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Main entry point
* @param args None for now
*/
public static void main(String[] args) {
log.info("TSDBLite booting....");
ExtendedThreadManager.install();
InternalLoggerFactory .setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
final String jmxmpIface = ConfigurationHelper.getSystemThenEnvProperty(Constants.CONF_JMXMP_IFACE, Constants.DEFAULT_JMXMP_IFACE);
final int jmxmpPort = ConfigurationHelper.getIntSystemThenEnvProperty(Constants.CONF_JMXMP_PORT, Constants.DEFAULT_JMXMP_PORT);
JMXHelper.fireUpJMXMPServer(jmxmpIface, jmxmpPort, JMXHelper.getHeliosMBeanServer());
server = Server.getInstance();
final Thread mainThread = Thread.currentThread();
StdInCommandHandler.getInstance().registerCommand("stop", new Runnable(){
@Override
public void run() {
if(server!=null) {
log.info("Stopping TSDBLite Server.....");
server.stop();
log.info("TSDBLite Server Stopped. Bye.");
mainThread.interrupt();
}
}
});
try { Thread.currentThread().join(); } catch (Exception x) {/* No Op */}
}
示例4: init
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
public void init(Container container, ServerBundleConfiguration config) {
logger.info("Initializing the container");
// Override the supplied one
ServerConfiguration configuration = container.getConfiguration().getServerConfiguration();
AbstractHttpConnector connector = null;
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
logger.info("Loading the http connectors");
for (ConnectorConfiguration connectorConfig : configuration.getConnectorConfigurations()) {
if (connectorConfig.getScheme() == Scheme.https) {
connector = createHttpsConnector(connectorConfig, container.getRouter());
} else {
connector = createHttpConnector(connectorConfig, container.getRouter());
}
connector.registerListener(container.getMessageObserver());
connector.initialize();
connectors.add(connector);
}
}
示例5: start
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
public void start() {
Configuration config = Configuration.INSTANCE;
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
bossGroup = new NioEventLoopGroup(1);
workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
protected void initChannel(SocketChannel socketChannel) throws Exception {
socketChannel.pipeline()
.addLast("logging", new LoggingHandler(LogLevel.DEBUG))
.addLast(new SocksInitRequestDecoder())
.addLast(new SocksMessageEncoder())
.addLast(new Socks5Handler())
.addLast(Status.TRAFFIC_HANDLER);
}
});
log.info("\tStartup {}-{}-client [{}{}]", Constants.APP_NAME, Constants.APP_VERSION, config.getMode(), config.getMode().equals("socks5") ? "" : ":" + config.getProtocol());
new Thread(() -> new UdpServer().start()).start();
ChannelFuture future = bootstrap.bind(config.getLocalHost(), config.getLocalPort()).sync();
future.addListener(future1 -> log.info("\tTCP listening at {}:{}...", config.getLocalHost(), config.getLocalPort()));
future.channel().closeFuture().sync();
} catch (Exception e) {
log.error("\tSocket bind failure ({})", e.getMessage());
} finally {
log.info("\tShutting down");
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
}
}
示例6: start
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
public void start() {
Configuration config = Configuration.INSTANCE;
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap bootstrap = new ServerBootstrap();
bootstrap.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
protected void initChannel(SocketChannel socketChannel) throws Exception {
socketChannel.pipeline()
.addLast("logging", new LoggingHandler(LogLevel.DEBUG))
.addLast(new XConnectHandler());
if (config.getReadLimit() != 0 || config.getWriteLimit() != 0) {
socketChannel.pipeline().addLast(
new GlobalTrafficShapingHandler(Executors.newScheduledThreadPool(1), config.getWriteLimit(), config.getReadLimit())
);
}
}
});
log.info("\tStartup {}-{}-server [{}]", Constants.APP_NAME, Constants.APP_VERSION, config.getProtocol());
new Thread(() -> new UdpServer().start()).start();
ChannelFuture future = bootstrap.bind(config.getHost(), config.getPort()).sync();
future.addListener(future1 -> log.info("\tTCP listening at {}:{}...", config.getHost(), config.getPort()));
future.channel().closeFuture().sync();
} catch (Exception e) {
log.error("\tSocket bind failure ({})", e.getMessage());
} finally {
log.info("\tShutting down and recycling...");
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
Configuration.shutdownRelays();
}
System.exit(0);
}
示例7: start
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Starts the server.
* @throws InterruptedException
*/
public static void start() {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap boot = new Bootstrap();
boot.group(group)
.channel(NioSocketChannel.class)
.handler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ch.pipeline().addLast(new SortClientInitializer());
}
});
LOG.info("Client connecting to {}:{}", ClientMain.SERVER_ADDRESS, ClientMain.SERVER_PORT);
// Start the client.
ChannelFuture f = boot.connect(new InetSocketAddress(ClientMain.SERVER_ADDRESS, ClientMain.SERVER_PORT)).sync();
// Wait until the connection is closed.
f.channel().closeFuture().sync();
} catch (Exception e) {
e.printStackTrace();
} finally {
// The connection is closed automatically on shutdown.
group.shutdownGracefully();
LOG.info("Client Exit.");
}
}
示例8: start
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Starts the server.
* @throws InterruptedException
*/
public static void start() throws InterruptedException {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
// configure the server
EventLoopGroup bossGroup = new NioEventLoopGroup(1);
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
ServerBootstrap boot = new ServerBootstrap();
boot.group(bossGroup, workerGroup)
.channel(NioServerSocketChannel.class)
.childHandler(new ChannelInitializer<SocketChannel>() {
@Override
public void initChannel(SocketChannel sChannel) throws Exception {
sChannel.pipeline().addLast(new SortServerInitializer());
}
})
.option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.TCP_NODELAY, true);
// Start the server.
ChannelFuture f = boot.bind(new InetSocketAddress(ServerMain.SERVER_ADDRESS, ServerMain.SERVER_PORT)).sync();
LOG.info("Server started at {}:{}, JobId: {}", ServerMain.SERVER_ADDRESS, ServerMain.SERVER_PORT, ServerMain.JOB_ID);
// Wait until the server socket is closed.
f.channel().closeFuture().sync();
}
catch (Exception e) {
e.printStackTrace();
}
finally {
// Shut down all event loops to terminate all threads.
bossGroup.shutdownGracefully();
workerGroup.shutdownGracefully();
// Wait until all threads are terminated.
bossGroup.terminationFuture().sync();
workerGroup.terminationFuture().sync();
LOG.info("Server Exit.");
}
}
示例9: PravegaConnectionListener
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* Creates a new instance of the PravegaConnectionListener class.
*
* @param ssl Whether to use SSL.
* @param host The name of the host to listen to.
* @param port The port to listen on.
* @param streamSegmentStore The SegmentStore to delegate all requests to.
* @param statsRecorder (Optional) A StatsRecorder for Metrics.
*/
public PravegaConnectionListener(boolean ssl, String host, int port, StreamSegmentStore streamSegmentStore,
SegmentStatsRecorder statsRecorder) {
this.ssl = ssl;
this.host = Exceptions.checkNotNullOrEmpty(host, "host");
this.port = port;
this.store = Preconditions.checkNotNull(streamSegmentStore, "streamSegmentStore");
this.statsRecorder = statsRecorder;
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
}
示例10: setup
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
@Before
public void setup() throws Exception {
originalLevel = ResourceLeakDetector.getLevel();
ResourceLeakDetector.setLevel(Level.PARANOID);
InternalLoggerFactory.setDefaultFactory(Slf4JLoggerFactory.INSTANCE);
this.serviceBuilder = ServiceBuilder.newInMemoryBuilder(ServiceBuilderConfig.getDefaultConfig());
this.serviceBuilder.initialize();
}
示例11: serverBootstrapFactory
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
@Bean
@Resource(name = "channelInitializer")
public ServerBootstrap serverBootstrapFactory(ChannelInitializer<SocketChannel> channelInitializer) {
// 配置服务器
EventLoopGroup bossGroup = new NioEventLoopGroup();
EventLoopGroup workerGroup = new NioEventLoopGroup();
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
ServerBootstrap serverBootstrap = new ServerBootstrap();
serverBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO)).childHandler(channelInitializer)
.option(ChannelOption.SO_BACKLOG, 128).childOption(ChannelOption.SO_KEEPALIVE, true);
return serverBootstrap;
}
示例12: initSystem
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
/**
* 系统参数配置
* @throws Exception
*/
public void initSystem() throws Exception {
PropertyConfigurator.configure("Log4j.properties");
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
log.info(System.getProperty("file.encoding"));
System.setProperty("io.netty.recycler.maxCapacity.default", PropertyUtil.getProperty("io.netty.recycler.maxCapacity.default"));
System.setProperty("io.netty.leakDetectionLevel", "paranoid");
DbHelper.init();
}
示例13: init
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
@Override
public void init(BundleContext context, DependencyManager manager) throws Exception {
// set the Netty log factory
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
// create all OSGi managers
createManagers(manager);
// listen for the HubManager to be published
hubManagerTracker = new ServiceTracker(context, HubManager.class.getName(), null) {
@Override
public Object addingService(ServiceReference ref) {
startHubManager((HubManager)context.getService(ref));
return null;
}
};
hubManagerTracker.open();
// wait for ConfigurationAdmin to become available to start advertising presence
presenceTracker = new ServiceTracker(context, ConfigurationManager.class.getName(), null) {
@Override
public Object addingService(ServiceReference serviceRef) {
ServiceReference ref = context.getServiceReference(ConfigurationManager.class.getName());
if (ref != null) {
// start advertisements
return context.getService(ref);
} else {
return null;
}
}
@Override
public void removedService(ServiceReference ref, Object service) {
super.removedService(ref, service);
}
};
presenceTracker.open();
}
示例14: main
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
// must do this first for logback
InternalLoggerFactory.setDefaultFactory( new Slf4JLoggerFactory( ));
// args[0] should be the path of the transficc.props properties file
String propsFilePath = args[0];
Properties props = new Properties( );
// load up the props for socket and DB config
File pfile = new File( propsFilePath);
logger.info( "Loading config from: " + propsFilePath);
InputStream istrm = new FileInputStream( propsFilePath);
props.load( istrm);
String sport = props.getProperty( "SocketServerPort", "8080");
logger.info( "Binding to port: " + sport);
int port = Integer.parseInt( sport);
// Create the transficc client, and the Q it uses to send events including
// market data back to the thread that pushes updates on the web sockets.
// Also an executor to provide a thread to run the TFClient. We keep a ref
// to the transficc service so we can pass it to the web socket handler.
LinkedBlockingQueue<JSON.Message> outQ = new LinkedBlockingQueue<JSON.Message>( );
TFClient tfc = new TFClient( props, outQ);
TransficcService tfs = tfc.GetService( );
// need an executor for the thread that will intermittently send data to the client
ThreadFactoryBuilder builder = new ThreadFactoryBuilder( );
builder.setDaemon( true);
builder.setNameFormat( "transficc-%d");
ExecutorService transficcExecutor = Executors.newSingleThreadExecutor( builder.build( ));
FutureTask<String> transficcTask = new FutureTask<String>( tfc);
transficcExecutor.execute( transficcTask);
// Now we can create the pusher object and thread, which consumes the event
// on the outQ. Those events originate with TFClient callbacks from transficc,
// and also incoming websock events like subscription requests.
WebSockPusher pusher = new WebSockPusher( props, outQ, tfs);
ExecutorService pusherExecutor = Executors.newSingleThreadExecutor( builder.build( ));
FutureTask<String> pusherTask = new FutureTask<String>( pusher);
pusherExecutor.execute( pusherTask);
// Compose the server components...
EventLoopGroup bossGroup = new NioEventLoopGroup( 1);
EventLoopGroup workerGroup = new NioEventLoopGroup( );
try {
ServerBootstrap b = new ServerBootstrap( );
LoggingHandler lh = new LoggingHandler(LogLevel.INFO);
ChannelInitializer ci = new ChannelInitializer<SocketChannel>( ) {
@Override
public void initChannel(SocketChannel ch) throws Exception {
ChannelPipeline p = ch.pipeline();
p.addLast("encoder", new HttpResponseEncoder());
p.addLast("decoder", new HttpRequestDecoder());
p.addLast("aggregator", new HttpObjectAggregator(65536));
p.addLast("handler", new WebSocketHandler( props, outQ));
}
};
b.group( bossGroup, workerGroup).channel( NioServerSocketChannel.class).handler( lh).childHandler( ci);
// Fire up the server...
ChannelFuture f = b.bind( port).sync( );
logger.info( "Server started");
// Wait until the server socket is closed.
f.channel( ).closeFuture( ).sync( );
}
finally {
logger.info( "Server shutdown started");
// Shut down all event loops to terminate all threads.
bossGroup.shutdownGracefully( );
workerGroup.shutdownGracefully();
logger.info( "Server shutdown completed");
}
}
示例15: start
import io.netty.util.internal.logging.Slf4JLoggerFactory; //导入依赖的package包/类
private void start() throws InterruptedException, IOException {
// NioEventLoopGroup是用来处理IO操作的多线程事件循环器
// boss用来接收进来的连接
EventLoopGroup bossGroup = new NioEventLoopGroup();
// 用来处理已经被接收的连接;
EventLoopGroup workerGroup = new NioEventLoopGroup();
try {
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory());
// 是一个启动NIO服务的辅助启动类
sBootstrap = new ServerBootstrap();
// These EventLoopGroup's are used to handle all the events and IO for ServerChannel
// and
// Channel's.
// 为bootstrap设置acceptor的EventLoopGroup和client的EventLoopGroup
// 这些EventLoopGroups用于处理所有的IO事件
// ?这里为什么设置两个group呢?
sBootstrap.group(bossGroup, workerGroup).channel(NioServerSocketChannel.class)
.childHandler(channelInboundHandler).option(ChannelOption.SO_BACKLOG, 128)
.childOption(ChannelOption.SO_KEEPALIVE, true)
.childOption(ChannelOption.CONNECT_TIMEOUT_MILLIS, 5000);
// 绑定端口,开始接收进来的连接
String registHost = registration.getHost();
future = sBootstrap.bind(registHost, messageServerConfig.getPort()).sync();
// 获取绑定的端口号
if (future.channel().localAddress() instanceof InetSocketAddress ) {
InetSocketAddress socketAddress = (InetSocketAddress)future.channel().localAddress();
this.priorIP = messageServerConfig.getIp();
this.ipadress = socketAddress.getAddress().getHostAddress();
this.port = socketAddress.getPort();
this.started = true;
logger.info("NettyChatServer 启动了,address={}:{}", socketAddress.getAddress().getHostAddress(), socketAddress.getPort());
}
// messageServerCluster
messageServerCluster.registLocal(this);
// 等待服务器socket关闭
// 在本例子中不会发生,这时可以关闭服务器了
future.channel().closeFuture().sync();
} finally {
workerGroup.shutdownGracefully();
bossGroup.shutdownGracefully();
logger.info("NettyChatServer 关闭了");
}
}