本文整理汇总了Java中org.apache.log4j.Logger.setLevel方法的典型用法代码示例。如果您正苦于以下问题:Java Logger.setLevel方法的具体用法?Java Logger.setLevel怎么用?Java Logger.setLevel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.log4j.Logger
的用法示例。
在下文中一共展示了Logger.setLevel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testAppWithNoContainers
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppWithNoContainers() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5120);
RMApp app = rm.submitApp(2000);
//kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
rm.stop();
}
示例2: configureLogging
import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static boolean configureLogging() {
if (!_RootDAO.isConfigured()) return false;
org.hibernate.Session hibSession = ApplicationConfigDAO.getInstance().createNewSession();
try {
for (ApplicationConfig config: (List<ApplicationConfig>)hibSession.createQuery("from ApplicationConfig where key like 'log4j.logger.%'").list()) {
Level level = Level.toLevel(config.getValue());
boolean root = "log4j.logger.root".equals(config.getKey());
Logger logger = (root ? Logger.getRootLogger() : Logger.getLogger(config.getKey().substring("log4j.logger.".length())));
logger.setLevel(level);
Debug.info("Logging level for " + logger.getName() + " set to " + level);
}
} finally {
hibSession.close();
}
return true;
}
示例3: setLoggingLevel
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override
public void setLoggingLevel(String name, Integer level) {
sLog.info("Set logging level for " + (name == null ? "root" : name) + " to " + (level == null ? "null" : Level.toLevel(level)));
Logger logger = (name == null ? Logger.getRootLogger() : Logger.getLogger(name));
if (level == null)
logger.setLevel(null);
else
logger.setLevel(Level.toLevel(level));
}
示例4: run
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override public void run() {
Logger zipkinLogger = LogManager.getLogger("zipkin");
if (!log4Jlevel().equals(zipkinLogger.getLevel())) {
zipkinLogger.setLevel(log4Jlevel());
if (zipkinLogger.getAdditivity()) {
addLogAppendersFromRoot(zipkinLogger);
}
}
java.util.logging.Logger.getLogger("zipkin").setLevel(julLevel());
}
示例5: main
import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void main(String[] args) {
System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
SparkSession sparkSession = SparkSession
.builder()
.master("local")
.config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
.appName("JavaALSExample")
.getOrCreate();
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.WARN);
HashMap<String, String> params = new HashMap<String, String>();
params.put("rowTag", "food");
params.put("failFast", "true");
Dataset<Row> docDF = sparkSession.read()
.format("com.databricks.spark.xml")
.options(params)
.load("C:/Users/sumit.kumar/git/learning/src/main/resources/breakfast_menu.xml");
docDF.printSchema();
docDF.show();
docDF.write().format("com.databricks.spark.xml")
.option("rootTag", "food")
.option("rowTag", "food")
.save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMenu.xml");
}
示例6: testApplicationType
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testApplicationType() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM();
rm.start();
RMApp app = rm.submitApp(2000);
RMApp app1 =
rm.submitApp(200, "name", "user",
new HashMap<ApplicationAccessType, String>(), false, "default", -1,
null, "MAPREDUCE");
Assert.assertEquals("YARN", app.getApplicationType());
Assert.assertEquals("MAPREDUCE", app1.getApplicationType());
rm.stop();
}
示例7: setup
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf = new YarnConfiguration();
UserGroupInformation.setConfiguration(conf);
conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
}
示例8: setRootLevel
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override
public void setRootLevel(String levelName) {
checkNotNull(levelName, "levelName");
LOGGER.debug("updating root logger level (name={})", levelName);
Level level = Level.toLevel(levelName);
Logger logger = LogManager.getRootLogger();
logger.setLevel(level);
}
示例9: main
import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void main(String[] args) {
//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
System.setProperty("hadoop.home.dir", "E:\\hadoop");
//Logger rootLogger = LogManager.getRootLogger();
//rootLogger.setLevel(Level.WARN);
SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
String inputDirectory="E:\\hadoop\\streamFolder\\";
JavaSparkContext sc = new JavaSparkContext(conf);
JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(1));
// streamingContext.checkpoint("E:\\hadoop\\checkpoint");
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.WARN);
JavaDStream<String> streamfile = streamingContext.textFileStream(inputDirectory);
streamfile.print();
streamfile.foreachRDD(rdd-> rdd.foreach(x -> System.out.println(x)));
JavaPairDStream<LongWritable, Text> streamedFile = streamingContext.fileStream(inputDirectory, LongWritable.class, Text.class, TextInputFormat.class);
streamedFile.print();
streamingContext.start();
try {
streamingContext.awaitTermination();
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
示例10: testAllocateContainerOnNodeWithoutOffSwitchSpecified
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified()
throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);
RMApp app1 = rm.submitApp(2048);
// kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
nm1.nodeHeartbeat(true);
RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
am1.registerAppAttempt();
// add request for containers
List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
am1.allocate(requests, null); // send the request
try {
// kick the schedule
nm1.nodeHeartbeat(true);
} catch (NullPointerException e) {
Assert.fail("NPE when allocating container on node but "
+ "forget to set off-switch request should be handled");
}
rm.stop();
}
示例11: setup
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf = getConf();
UserGroupInformation.setConfiguration(conf);
conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
DefaultMetricsSystem.setMiniClusterMode(true);
}
示例12: verifyAuditLogsCheckPattern
import org.apache.log4j.Logger; //导入方法依赖的package包/类
private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
throws IOException {
// Turn off the logs
Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
logger.setLevel(Level.OFF);
// Close the appenders and force all logs to be flushed
Enumeration<?> appenders = logger.getAllAppenders();
while (appenders.hasMoreElements()) {
Appender appender = (Appender)appenders.nextElement();
appender.close();
}
BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
String line = null;
boolean ret = true;
boolean patternMatches = false;
try {
for (int i = 0; i < ndupe; i++) {
line = reader.readLine();
assertNotNull(line);
patternMatches |= pattern.matcher(line).matches();
ret &= successPattern.matcher(line).matches();
}
assertNull("Unexpected event in audit log", reader.readLine());
assertTrue("Expected audit event not found in audit log", patternMatches);
assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
} finally {
reader.close();
}
}
示例13: getLogger
import org.apache.log4j.Logger; //导入方法依赖的package包/类
/**
*
* Get logger for logging execution status of the running job
*
* @param jobID
* @param fileLogLocation
* @return {@link Logger}
*/
public Logger getLogger(String jobID,String fileLogLocation) {
if (executionTrackingLoggers.containsKey(jobID)) {
return executionTrackingLoggers.get(jobID);
}
//creates pattern layout
PatternLayout layout = new PatternLayout();
layout.setConversionPattern("%m%n");
//create file appender
FileAppender fileAppender = new FileAppender();
fileAppender.setFile(fileLogLocation);
fileAppender.setLayout(layout);
fileAppender.activateOptions();
//configures the root logger
Logger logger = Logger.getLogger(jobID);
logger.setLevel(Level.DEBUG);
logger.addAppender(fileAppender);
logger.getRootLogger().setAdditivity(false);
logger.setAdditivity(false);
executionTrackingLoggers.put(jobID, logger);
return logger;
}
示例14: testAppOnMultiNode
import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppOnMultiNode() throws Exception {
Logger rootLogger = LogManager.getRootLogger();
rootLogger.setLevel(Level.DEBUG);
conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
MockRM rm = new MockRM(conf);
rm.start();
MockNM nm1 = rm.registerNode("h1:1234", 5120);
MockNM nm2 = rm.registerNode("h2:5678", 10240);
RMApp app = rm.submitApp(2000);
//kick the scheduling
nm1.nodeHeartbeat(true);
RMAppAttempt attempt = app.getCurrentAppAttempt();
MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
am.registerAppAttempt();
//request for containers
int request = 13;
am.allocate("h1" , 1000, request, new ArrayList<ContainerId>());
//kick the scheduler
List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers();
int contReceived = conts.size();
while (contReceived < 3) {//only 3 containers are available on node1
nm1.nodeHeartbeat(true);
conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers());
contReceived = conts.size();
LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertEquals(3, conts.size());
//send node2 heartbeat
conts = am.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers();
contReceived = conts.size();
while (contReceived < 10) {
nm2.nodeHeartbeat(true);
conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
new ArrayList<ContainerId>()).getAllocatedContainers());
contReceived = conts.size();
LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
Thread.sleep(WAIT_SLEEP_MS);
}
Assert.assertEquals(10, conts.size());
am.unregisterAppAttempt();
nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
am.waitForState(RMAppAttemptState.FINISHED);
rm.stop();
}
示例15: setLogLevel
import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void setLogLevel(Logger logger, Level level) {
logger.setLevel(level);
}