当前位置: 首页>>代码示例>>Java>>正文


Java Logger.setLevel方法代码示例

本文整理汇总了Java中org.apache.log4j.Logger.setLevel方法的典型用法代码示例。如果您正苦于以下问题:Java Logger.setLevel方法的具体用法?Java Logger.setLevel怎么用?Java Logger.setLevel使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.log4j.Logger的用法示例。


在下文中一共展示了Logger.setLevel方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testAppWithNoContainers

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppWithNoContainers() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  
  RMApp app = rm.submitApp(2000);

  //kick the scheduling
  nm1.nodeHeartbeat(true);

  RMAppAttempt attempt = app.getCurrentAppAttempt();
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();
  am.unregisterAppAttempt();
  nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
  am.waitForState(RMAppAttemptState.FINISHED);
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:22,代码来源:TestRM.java

示例2: configureLogging

import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static boolean configureLogging() {
	if (!_RootDAO.isConfigured()) return false;
	
    org.hibernate.Session hibSession = ApplicationConfigDAO.getInstance().createNewSession();
    try {
    	for (ApplicationConfig config: (List<ApplicationConfig>)hibSession.createQuery("from ApplicationConfig where key like 'log4j.logger.%'").list()) {
    		Level level = Level.toLevel(config.getValue());
    		boolean root = "log4j.logger.root".equals(config.getKey());
    		Logger logger = (root ? Logger.getRootLogger() : Logger.getLogger(config.getKey().substring("log4j.logger.".length())));
    		logger.setLevel(level);
    		Debug.info("Logging level for " + logger.getName() + " set to " + level);
    	}
    } finally {
    	hibSession.close();
    }
    
    return true;
}
 
开发者ID:Jenner4S,项目名称:unitimes,代码行数:19,代码来源:ApplicationConfig.java

示例3: setLoggingLevel

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override
public void setLoggingLevel(String name, Integer level) {
	sLog.info("Set logging level for " + (name == null ? "root" : name) + " to " + (level == null ? "null" : Level.toLevel(level)));
	Logger logger = (name == null ? Logger.getRootLogger() : Logger.getLogger(name));
	if (level == null)
		logger.setLevel(null);
	else
		logger.setLevel(Level.toLevel(level));
}
 
开发者ID:Jenner4S,项目名称:unitimes,代码行数:10,代码来源:AbstractSolverServer.java

示例4: run

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override public void run() {
  Logger zipkinLogger = LogManager.getLogger("zipkin");
  if (!log4Jlevel().equals(zipkinLogger.getLevel())) {
    zipkinLogger.setLevel(log4Jlevel());
    if (zipkinLogger.getAdditivity()) {
      addLogAppendersFromRoot(zipkinLogger);
    }
  }
  java.util.logging.Logger.getLogger("zipkin").setLevel(julLevel());
}
 
开发者ID:openzipkin,项目名称:zipkin-sparkstreaming,代码行数:11,代码来源:LogInitializer.java

示例5: main

import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void main(String[] args) {
	 System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
		
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
      .appName("JavaALSExample")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN); 

	
	HashMap<String, String> params = new HashMap<String, String>();
	params.put("rowTag", "food");
	params.put("failFast", "true");
	 Dataset<Row> docDF = sparkSession.read()
			                   .format("com.databricks.spark.xml")
			                   .options(params)
			                   .load("C:/Users/sumit.kumar/git/learning/src/main/resources/breakfast_menu.xml");
	 
	 docDF.printSchema();		 
	 docDF.show();
	 
	 docDF.write().format("com.databricks.spark.xml")
	    .option("rootTag", "food")
	    .option("rowTag", "food")
	    .save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMenu.xml");

}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:31,代码来源:XMLFileOperations.java

示例6: testApplicationType

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testApplicationType() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM();
  rm.start();
  RMApp app = rm.submitApp(2000);
  RMApp app1 =
      rm.submitApp(200, "name", "user",
        new HashMap<ApplicationAccessType, String>(), false, "default", -1,
        null, "MAPREDUCE");
  Assert.assertEquals("YARN", app.getApplicationType());
  Assert.assertEquals("MAPREDUCE", app1.getApplicationType());
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestYarnClient.java

示例7: setup

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestContainerResourceUsage.java

示例8: setRootLevel

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Override
public void setRootLevel(String levelName) {
    checkNotNull(levelName, "levelName");
    LOGGER.debug("updating root logger level (name={})", levelName);
    Level level = Level.toLevel(levelName);
    Logger logger = LogManager.getRootLogger();
    logger.setLevel(level);
}
 
开发者ID:vy,项目名称:hrrs,代码行数:9,代码来源:Log4jLoggerLevelAccessor.java

示例9: main

import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void main(String[] args) {
   	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
   	//Logger rootLogger = LogManager.getRootLogger();
  		//rootLogger.setLevel(Level.WARN); 
       SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
       String inputDirectory="E:\\hadoop\\streamFolder\\";
    
       JavaSparkContext sc = new JavaSparkContext(conf);
       JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(1));
      // streamingContext.checkpoint("E:\\hadoop\\checkpoint");
       Logger rootLogger = LogManager.getRootLogger();
  		rootLogger.setLevel(Level.WARN); 
  		
  		JavaDStream<String> streamfile = streamingContext.textFileStream(inputDirectory);
  		streamfile.print();
  		streamfile.foreachRDD(rdd-> rdd.foreach(x -> System.out.println(x)));
  		
  			   		
  		JavaPairDStream<LongWritable, Text> streamedFile = streamingContext.fileStream(inputDirectory, LongWritable.class, Text.class, TextInputFormat.class);
  	 streamedFile.print();
  		
  	 streamingContext.start();
  	 

       try {
		streamingContext.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:33,代码来源:FileStreamingEx.java

示例10: testAllocateContainerOnNodeWithoutOffSwitchSpecified

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test
public void testAllocateContainerOnNodeWithoutOffSwitchSpecified()
    throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("127.0.0.1:1234", 6 * GB);

  RMApp app1 = rm.submitApp(2048);
  // kick the scheduling, 2 GB given to AM1, remaining 4GB on nm1
  nm1.nodeHeartbeat(true);
  RMAppAttempt attempt1 = app1.getCurrentAppAttempt();
  MockAM am1 = rm.sendAMLaunched(attempt1.getAppAttemptId());
  am1.registerAppAttempt();

  // add request for containers
  List<ResourceRequest> requests = new ArrayList<ResourceRequest>();
  requests.add(am1.createResourceReq("127.0.0.1", 1 * GB, 1, 1));
  requests.add(am1.createResourceReq("/default-rack", 1 * GB, 1, 1));
  am1.allocate(requests, null); // send the request

  try {
    // kick the schedule
    nm1.nodeHeartbeat(true);
  } catch (NullPointerException e) {
    Assert.fail("NPE when allocating container on node but "
        + "forget to set off-switch request should be handled");
  }
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestFifoScheduler.java

示例11: setup

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = getConf();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
  conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
  DefaultMetricsSystem.setMiniClusterMode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestWorkPreservingRMRestart.java

示例12: verifyAuditLogsCheckPattern

import org.apache.log4j.Logger; //导入方法依赖的package包/类
private void verifyAuditLogsCheckPattern(boolean expectSuccess, int ndupe, Pattern pattern)
    throws IOException {
  // Turn off the logs
  Logger logger = ((Log4JLogger) FSNamesystem.auditLog).getLogger();
  logger.setLevel(Level.OFF);

  // Close the appenders and force all logs to be flushed
  Enumeration<?> appenders = logger.getAllAppenders();
  while (appenders.hasMoreElements()) {
    Appender appender = (Appender)appenders.nextElement();
    appender.close();
  }

  BufferedReader reader = new BufferedReader(new FileReader(auditLogFile));
  String line = null;
  boolean ret = true;
  boolean patternMatches = false;

  try {
      for (int i = 0; i < ndupe; i++) {
        line = reader.readLine();
        assertNotNull(line);
        patternMatches |= pattern.matcher(line).matches();
        ret &= successPattern.matcher(line).matches();
      }
      assertNull("Unexpected event in audit log", reader.readLine());
      assertTrue("Expected audit event not found in audit log", patternMatches);
      assertTrue("Expected success=" + expectSuccess, ret == expectSuccess);
    } finally {
      reader.close();
    }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestAuditLogs.java

示例13: getLogger

import org.apache.log4j.Logger; //导入方法依赖的package包/类
/**
    * 
    * Get logger for logging execution status of the running job
    * 
    * @param jobID
    * @param fileLogLocation
    * @return {@link Logger}
    */
public Logger getLogger(String jobID,String fileLogLocation) {
	
	if (executionTrackingLoggers.containsKey(jobID)) {
		return executionTrackingLoggers.get(jobID);
	}
	
	//creates pattern layout
	PatternLayout layout = new PatternLayout();
	layout.setConversionPattern("%m%n");
	
	//create file appender
	FileAppender fileAppender = new FileAppender();
	fileAppender.setFile(fileLogLocation);
	fileAppender.setLayout(layout);
	fileAppender.activateOptions();
	//configures the root logger
	
	Logger logger = Logger.getLogger(jobID);
	logger.setLevel(Level.DEBUG);
	logger.addAppender(fileAppender);
	logger.getRootLogger().setAdditivity(false);
	logger.setAdditivity(false);
	executionTrackingLoggers.put(jobID, logger);
	
       return logger;
   }
 
开发者ID:capitalone,项目名称:Hydrograph,代码行数:35,代码来源:ExecutionTrackingLogger.java

示例14: testAppOnMultiNode

import org.apache.log4j.Logger; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppOnMultiNode() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  MockNM nm2 = rm.registerNode("h2:5678", 10240);
  
  RMApp app = rm.submitApp(2000);

  //kick the scheduling
  nm1.nodeHeartbeat(true);

  RMAppAttempt attempt = app.getCurrentAppAttempt();
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();
  
  //request for containers
  int request = 13;
  am.allocate("h1" , 1000, request, new ArrayList<ContainerId>());
  
  //kick the scheduler
  List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
      new ArrayList<ContainerId>()).getAllocatedContainers();
  int contReceived = conts.size();
  while (contReceived < 3) {//only 3 containers are available on node1
    nm1.nodeHeartbeat(true);
    conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
        new ArrayList<ContainerId>()).getAllocatedContainers());
    contReceived = conts.size();
    LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
    Thread.sleep(WAIT_SLEEP_MS);
  }
  Assert.assertEquals(3, conts.size());

  //send node2 heartbeat
  conts = am.allocate(new ArrayList<ResourceRequest>(),
      new ArrayList<ContainerId>()).getAllocatedContainers();
  contReceived = conts.size();
  while (contReceived < 10) {
    nm2.nodeHeartbeat(true);
    conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
        new ArrayList<ContainerId>()).getAllocatedContainers());
    contReceived = conts.size();
    LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
    Thread.sleep(WAIT_SLEEP_MS);
  }
  Assert.assertEquals(10, conts.size());

  am.unregisterAppAttempt();
  nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
  am.waitForState(RMAppAttemptState.FINISHED);

  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestRM.java

示例15: setLogLevel

import org.apache.log4j.Logger; //导入方法依赖的package包/类
public static void setLogLevel(Logger logger, Level level) {
  logger.setLevel(level);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:4,代码来源:GenericTestUtils.java


注:本文中的org.apache.log4j.Logger.setLevel方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。