当前位置: 首页>>代码示例>>Java>>正文


Java LogManager.getRootLogger方法代码示例

本文整理汇总了Java中org.apache.log4j.LogManager.getRootLogger方法的典型用法代码示例。如果您正苦于以下问题:Java LogManager.getRootLogger方法的具体用法?Java LogManager.getRootLogger怎么用?Java LogManager.getRootLogger使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.log4j.LogManager的用法示例。


在下文中一共展示了LogManager.getRootLogger方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: Log4jLoggerAdapter

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public Log4jLoggerAdapter() {
	try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
           if (logger != null) {
               Enumeration<Appender> appenders = logger.getAllAppenders();
               if (appenders != null) {
                   while (appenders.hasMoreElements()) {
                       Appender appender = appenders.nextElement();
                       if (appender instanceof FileAppender) {
                           FileAppender fileAppender = (FileAppender)appender;
                           String filename = fileAppender.getFile();
                           file = new File(filename);
                           break;
                       }
                   }
               }
           }
       } catch (Throwable t) {
       }
}
 
开发者ID:eXcellme,项目名称:eds,代码行数:22,代码来源:Log4jLoggerAdapter.java

示例2: LogPageHandler

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@SuppressWarnings("unchecked")
public LogPageHandler() {
    try {
		org.apache.log4j.Logger logger = LogManager.getRootLogger();
        if (logger != null) {
            Enumeration<Appender> appenders = logger.getAllAppenders();
            if (appenders != null) {
                while (appenders.hasMoreElements()) {
                    Appender appender = appenders.nextElement();
                    if (appender instanceof FileAppender) {
                        FileAppender fileAppender = (FileAppender)appender;
                        String filename = fileAppender.getFile();
                        file = new File(filename);
                        break;
                    }
                }
            }
        }
    } catch (Throwable t) {
    }
   }
 
开发者ID:dachengxi,项目名称:EatDubbo,代码行数:22,代码来源:LogPageHandler.java

示例3: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) {
	 System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
		
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
      .appName("JavaALSExample")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN); 

	
	HashMap<String, String> params = new HashMap<String, String>();
	params.put("rowTag", "food");
	params.put("failFast", "true");
	 Dataset<Row> docDF = sparkSession.read()
			                   .format("com.databricks.spark.xml")
			                   .options(params)
			                   .load("C:/Users/sumit.kumar/git/learning/src/main/resources/breakfast_menu.xml");
	 
	 docDF.printSchema();		 
	 docDF.show();
	 
	 docDF.write().format("com.databricks.spark.xml")
	    .option("rootTag", "food")
	    .option("rowTag", "food")
	    .save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMenu.xml");

}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:31,代码来源:XMLFileOperations.java

示例4: setup

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = getConf();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RECOVERY_ENABLED, "true");
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, true);
  conf.setLong(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_SCHEDULING_WAIT_MS, 0);
  DefaultMetricsSystem.setMiniClusterMode(true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestWorkPreservingRMRestart.java

示例5: testApplicationType

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Test(timeout = 30000)
public void testApplicationType() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  MockRM rm = new MockRM();
  rm.start();
  RMApp app = rm.submitApp(2000);
  RMApp app1 =
      rm.submitApp(200, "name", "user",
        new HashMap<ApplicationAccessType, String>(), false, "default", -1,
        null, "MAPREDUCE");
  Assert.assertEquals("YARN", app.getApplicationType());
  Assert.assertEquals("MAPREDUCE", app1.getApplicationType());
  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestYarnClient.java

示例6: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) throws Exception {
  
      System.setProperty("hadoop.home.dir", "E:\\hadoop");
	
   SparkConf sparkConf = new SparkConf().setAppName("WordCountSocketEx").setMaster("local[*]");
   JavaStreamingContext streamingContext = new JavaStreamingContext(sparkConf, Durations.seconds(1));
   Logger rootLogger = LogManager.getRootLogger();
 		rootLogger.setLevel(Level.WARN); 
   List<Tuple2<String, Integer>> tuples = Arrays.asList(new Tuple2<>("hello", 10), new Tuple2<>("world", 10));
   JavaPairRDD<String, Integer> initialRDD = streamingContext.sparkContext().parallelizePairs(tuples);
	    

   JavaReceiverInputDStream<String> StreamingLines = streamingContext.socketTextStream( "10.0.75.1", Integer.parseInt("9000"), StorageLevels.MEMORY_AND_DISK_SER);
   
   JavaDStream<String> words = StreamingLines.flatMap( str -> Arrays.asList(str.split(" ")).iterator() );
  
   JavaPairDStream<String, Integer> wordCounts = words.mapToPair(str-> new Tuple2<>(str, 1)).reduceByKey((count1,count2) ->count1+count2 );
  
   wordCounts.print();
   
JavaPairDStream<String, Integer> joinedDstream = wordCounts
		.transformToPair(new Function<JavaPairRDD<String, Integer>, JavaPairRDD<String, Integer>>() {
			@Override
			public JavaPairRDD<String, Integer> call(JavaPairRDD<String, Integer> rdd) throws Exception {
				JavaPairRDD<String, Integer> modRDD = rdd.join(initialRDD).mapToPair(
						new PairFunction<Tuple2<String, Tuple2<Integer, Integer>>, String, Integer>() {
							@Override
							public Tuple2<String, Integer> call(
									Tuple2<String, Tuple2<Integer, Integer>> joinedTuple) throws Exception {
								return new Tuple2<>(joinedTuple._1(),(joinedTuple._2()._1() + joinedTuple._2()._2()));
							}
						});
				return modRDD;
			}
		});

   joinedDstream.print();
   streamingContext.start();
   streamingContext.awaitTermination();
 }
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:41,代码来源:WordCountTransformOpEx.java

示例7: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) {
   	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
   	//Logger rootLogger = LogManager.getRootLogger();
  		//rootLogger.setLevel(Level.WARN); 
       SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");
       String inputDirectory="E:\\hadoop\\streamFolder\\";
    
       JavaSparkContext sc = new JavaSparkContext(conf);
       JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.seconds(1));
      // streamingContext.checkpoint("E:\\hadoop\\checkpoint");
       Logger rootLogger = LogManager.getRootLogger();
  		rootLogger.setLevel(Level.WARN); 
  		
  		JavaDStream<String> streamfile = streamingContext.textFileStream(inputDirectory);
  		streamfile.print();
  		streamfile.foreachRDD(rdd-> rdd.foreach(x -> System.out.println(x)));
  		
  			   		
  		JavaPairDStream<LongWritable, Text> streamedFile = streamingContext.fileStream(inputDirectory, LongWritable.class, Text.class, TextInputFormat.class);
  	 streamedFile.print();
  		
  	 streamingContext.start();
  	 

       try {
		streamingContext.awaitTermination();
	} catch (InterruptedException e) {
		// TODO Auto-generated catch block
		e.printStackTrace();
	}
}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:33,代码来源:FileStreamingEx.java

示例8: setup

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Before
public void setup() throws IOException {
  conf = getConf();
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  UserGroupInformation.setConfiguration(conf);
  conf.setBoolean(YarnConfiguration.RECOVERY_ENABLED, true);
  conf.setBoolean(YarnConfiguration.RM_WORK_PRESERVING_RECOVERY_ENABLED, false);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  rmAddr = new InetSocketAddress("localhost", 8032);
  Assert.assertTrue(YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS > 1);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:13,代码来源:TestRMRestart.java

示例9: setup

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Before
public void setup() throws UnknownHostException {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.setInt(YarnConfiguration.RM_AM_MAX_ATTEMPTS,
      YarnConfiguration.DEFAULT_RM_AM_MAX_ATTEMPTS);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:TestContainerResourceUsage.java

示例10: setup

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Before
public void setup() {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  ExitUtil.disableSystemExit();
  conf = new YarnConfiguration();
  UserGroupInformation.setConfiguration(conf);
  conf.set(YarnConfiguration.RM_STORE, MemoryRMStateStore.class.getName());
  conf.set(YarnConfiguration.RM_SCHEDULER, FairScheduler.class.getName());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:TestRMDelegationTokens.java

示例11: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) {
	  System.setProperty("hadoop.home.dir", "E:\\sumitK\\Hadoop");
		
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/sumitK/Hadoop/warehouse")
      .appName("JavaALSExample")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN); 

    JavaRDD<Movie> moviesRDD = sparkSession
      .read().textFile("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv")
      .javaRDD().filter( str-> !(null==str))
      .filter(str-> !(str.length()==0))
      .filter(str-> !str.contains("movieId"))	      
      .map(str -> Movie.parseRating(str));
    
    moviesRDD.foreach(m -> System.out.println(m));
    
       Dataset<Row> csv_read = sparkSession.read().format("com.databricks.spark.csv")
	        		      .option("header", "true")
	        		      .option("inferSchema", "true")
	        		      .load("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv");
	       
	       csv_read.printSchema();
	       
	       csv_read.show();
	       
	       
	       StructType customSchema = new StructType(new StructField[] {
	    		    new StructField("movieId", DataTypes.LongType, true, Metadata.empty()),
	    		    new StructField("title", DataTypes.StringType, true, Metadata.empty()),
	    		    new StructField("genres", DataTypes.StringType, true, Metadata.empty())
	    		});
  
	       Dataset<Row> csv_custom_read = sparkSession.read().format("com.databricks.spark.csv")
        		      .option("header", "true")
        		      .schema(customSchema)
        		      .load("C:/Users/sumit.kumar/git/learning/src/main/resources/movies.csv");
       
	       csv_custom_read.printSchema();
       
	       csv_custom_read.show(); 
	       
	       
	       csv_custom_read.write()
	       .format("com.databricks.spark.csv")
	       .option("header", "true")
	       .option("codec", "org.apache.hadoop.io.compress.GzipCodec")
	       .save("C:/Users/sumit.kumar/git/learning/src/main/resources/newMovies.csv");
	       
}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:55,代码来源:CSVFileOperations.java

示例12: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) {
  	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
 System.setProperty("hadoop.home.dir", "E:\\hadoop");
  	//Logger rootLogger = LogManager.getRootLogger();
 		//rootLogger.setLevel(Level.WARN); 
      SparkConf conf = new SparkConf().setAppName("KafkaExample").setMaster("local[*]");    
      JavaSparkContext sc = new JavaSparkContext(conf);
      JavaStreamingContext streamingContext = new JavaStreamingContext(sc, Durations.minutes(2));
      streamingContext.checkpoint("E:\\hadoop\\checkpoint");
      Logger rootLogger = LogManager.getRootLogger();
 		rootLogger.setLevel(Level.WARN); 
      Map<String, Object> kafkaParams = new HashMap<>();
      kafkaParams.put("bootstrap.servers", "10.0.75.1:9092");
      kafkaParams.put("key.deserializer", StringDeserializer.class);
      kafkaParams.put("value.deserializer", StringDeserializer.class);
      kafkaParams.put("group.id", "use_a_separate_group_id_for_each_strea");
      kafkaParams.put("auto.offset.reset", "latest");
     // kafkaParams.put("enable.auto.commit", false);

      Collection<String> topics = Arrays.asList("mytopic", "anothertopic");

      final JavaInputDStream<ConsumerRecord<String, String>> stream = KafkaUtils.createDirectStream(streamingContext,LocationStrategies.PreferConsistent(),
      				ConsumerStrategies.<String, String>Subscribe(topics, kafkaParams));

      JavaPairDStream<String, String> pairRDD = stream.mapToPair(record-> new Tuple2<>(record.key(), record.value()));
     
      pairRDD.foreachRDD(pRDD-> { pRDD.foreach(tuple-> System.out.println(new Date()+" :: Kafka msg key ::"+tuple._1() +" the val is ::"+tuple._2()));});
     
      JavaDStream<String> tweetRDD = pairRDD.map(x-> x._2()).map(new TweetText());
      
      tweetRDD.foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" :: "+x)));
      
     JavaDStream<String> hashtagRDD = tweetRDD.flatMap(twt-> Arrays.stream(twt.split(" ")).filter(str-> str.contains("#")).collect(Collectors.toList()).iterator() );
 
      hashtagRDD.foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(x)));
      
      JavaPairDStream<String, Long> cntByVal = hashtagRDD.countByValue();
      
      cntByVal.foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The count tag is ::"+x._1() +" and the val is ::"+x._2())));
      
     /* hashtagRDD.window(Durations.seconds(60), Durations.seconds(30))
                .countByValue()
               .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
      
     hashtagRDD.countByValueAndWindow(Durations.seconds(60), Durations.seconds(30))
               .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println("The window&count tag is ::"+x._1() +" and the val is ::"+x._2())));
      */
     hashtagRDD.window(Durations.minutes(8)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
     hashtagRDD.window(Durations.minutes(8),Durations.minutes(2)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
     hashtagRDD.window(Durations.minutes(12),Durations.minutes(8)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
     hashtagRDD.window(Durations.minutes(2),Durations.minutes(2)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
     hashtagRDD.window(Durations.minutes(12),Durations.minutes(12)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));
     
     /*hashtagRDD.window(Durations.minutes(5),Durations.minutes(2)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));*/
     /* hashtagRDD.window(Durations.minutes(10),Durations.minutes(1)).countByValue()
     .foreachRDD(tRDD -> tRDD.foreach(x->System.out.println(new Date()+" ::The window count tag is ::"+x._1() +" and the val is ::"+x._2())));*/
     
      streamingContext.start();
      try {
	streamingContext.awaitTermination();
} catch (InterruptedException e) {
	// TODO Auto-generated catch block
	e.printStackTrace();
}
  }
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:72,代码来源:KafkaExample.java

示例13: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
/**
	 * @param args
	 */
	public static void main(String[] args) {
		//C:\Users\sumit.kumar\Downloads\bin\warehouse
		//System.setProperty("hadoop.home.dir", "C:\\Users\\sumit.kumar\\Downloads");
		String logFile = "src/main/resources/Apology_by_Plato.txt"; // Should be some file on your system
		Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN);
		 SparkConf conf = new SparkConf().setMaster("local").setAppName("ActionExamples").set("spark.hadoop.validateOutputSpecs", "false");
			JavaSparkContext sparkContext = new JavaSparkContext(conf);
		    JavaRDD<Integer> rdd = sparkContext.parallelize(Arrays.asList(1, 2, 3,4,5),3).cache();	
		    JavaRDD<Integer> evenRDD= rdd.filter(new org.apache.spark.api.java.function.Function<Integer, Boolean>() {
			@Override
			public Boolean call(Integer v1) throws Exception {
			  return ((v1%2)==0)?true:false;
				}
			});
		    
		    evenRDD.persist(StorageLevel.MEMORY_AND_DISK());
		    evenRDD.foreach(new VoidFunction<Integer>() {
			@Override
			public void call(Integer t) throws Exception {
			System.out.println("The value of RDD are :"+t);
			 }
			});
		   //unpersisting the RDD 
		   evenRDD.unpersist();
		   rdd.unpersist();
		   
		   /* JavaRDD<String> lines = spark.read().textFile(logFile).javaRDD().cache();
		    System.out.println("DEBUG: \n"+ lines.toDebugString());
		   long word= lines.count();
		   JavaRDD<String> distinctLines=lines.distinct();
		   System.out.println("DEBUG: \n"+ distinctLines.toDebugString());
		   JavaRDD<String> finalRdd=lines.subtract(distinctLines);
		    
		   
		   System.out.println("DEBUG: \n"+ finalRdd.toDebugString());
		   System.out.println("The count is "+word);
		   System.out.println("The count is "+distinctLines.count());
		   System.out.println("The count is "+finalRdd.count());
		   
		   finalRdd.foreach(new VoidFunction<String>() {
			
			@Override
			public void call(String t) throws Exception {
				// TODO Auto-generated method stub
				System.out.println(t);
			}
		});
*/	    /*SparkConf conf = new SparkConf().setAppName("Simple Application");
	    JavaSparkContext sc = new JavaSparkContext(conf);
	    StorageLevel newLevel;
		JavaRDD<String> logData = sc.textFile(logFile).cache();

	    long numAs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("a"); }
	    }).count();

	    long numBs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("b"); }
	    }).count();

	    System.out.println("Lines with a: " + numAs + ", lines with b: " + numBs);
	    
	    sc.stop();*/

	}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:70,代码来源:PersistExample.java

示例14: testAppOnMultiNode

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
@Test (timeout = 30000)
public void testAppOnMultiNode() throws Exception {
  Logger rootLogger = LogManager.getRootLogger();
  rootLogger.setLevel(Level.DEBUG);
  conf.set("yarn.scheduler.capacity.node-locality-delay", "-1");
  MockRM rm = new MockRM(conf);
  rm.start();
  MockNM nm1 = rm.registerNode("h1:1234", 5120);
  MockNM nm2 = rm.registerNode("h2:5678", 10240);
  
  RMApp app = rm.submitApp(2000);

  //kick the scheduling
  nm1.nodeHeartbeat(true);

  RMAppAttempt attempt = app.getCurrentAppAttempt();
  MockAM am = rm.sendAMLaunched(attempt.getAppAttemptId());
  am.registerAppAttempt();
  
  //request for containers
  int request = 13;
  am.allocate("h1" , 1000, request, new ArrayList<ContainerId>());
  
  //kick the scheduler
  List<Container> conts = am.allocate(new ArrayList<ResourceRequest>(),
      new ArrayList<ContainerId>()).getAllocatedContainers();
  int contReceived = conts.size();
  while (contReceived < 3) {//only 3 containers are available on node1
    nm1.nodeHeartbeat(true);
    conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
        new ArrayList<ContainerId>()).getAllocatedContainers());
    contReceived = conts.size();
    LOG.info("Got " + contReceived + " containers. Waiting to get " + 3);
    Thread.sleep(WAIT_SLEEP_MS);
  }
  Assert.assertEquals(3, conts.size());

  //send node2 heartbeat
  conts = am.allocate(new ArrayList<ResourceRequest>(),
      new ArrayList<ContainerId>()).getAllocatedContainers();
  contReceived = conts.size();
  while (contReceived < 10) {
    nm2.nodeHeartbeat(true);
    conts.addAll(am.allocate(new ArrayList<ResourceRequest>(),
        new ArrayList<ContainerId>()).getAllocatedContainers());
    contReceived = conts.size();
    LOG.info("Got " + contReceived + " containers. Waiting to get " + 10);
    Thread.sleep(WAIT_SLEEP_MS);
  }
  Assert.assertEquals(10, conts.size());

  am.unregisterAppAttempt();
  nm1.nodeHeartbeat(attempt.getAppAttemptId(), 1, ContainerState.COMPLETE);
  am.waitForState(RMAppAttemptState.FINISHED);

  rm.stop();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:58,代码来源:TestRM.java

示例15: main

import org.apache.log4j.LogManager; //导入方法依赖的package包/类
public static void main(String[] args) {
	//Window Specific property if Hadoop is not instaalled or HADOOP_HOME is not set
	 System.setProperty("hadoop.home.dir", "E:\\hadoop");
	
	 //Build a Spark Session	
      SparkSession sparkSession = SparkSession
      .builder()
      .master("local")
	  .config("spark.sql.warehouse.dir","file:///E:/hadoop/warehouse")
      .appName("EdgeBuilder")
      .getOrCreate();
      Logger rootLogger = LogManager.getRootLogger();
	  rootLogger.setLevel(Level.WARN); 
	// Read the CSV data
		 Dataset<Row> emp_ds = sparkSession.read()
				 .format("com.databricks.spark.csv")
   		         .option("header", "true")
   		         .option("inferSchema", "true")
   		         .load("src/main/resources/employee.txt");    
    		
	    UDF2 calcDays=new CalcDaysUDF();
	  //Registering the UDFs in Spark Session created above      
	    sparkSession.udf().register("calcDays", calcDays, DataTypes.LongType);
	    
	    emp_ds.createOrReplaceTempView("emp_ds");
	    
	    emp_ds.printSchema();
	    emp_ds.show();
	    
	    sparkSession.sql("select calcDays(hiredate,'dd-MM-yyyy') from emp_ds").show();   
	    //Instantiate UDAF
	    AverageUDAF calcAvg= new AverageUDAF();
	    //Register UDAF to SparkSession
	    sparkSession.udf().register("calAvg", calcAvg);
	    //Use UDAF
	    sparkSession.sql("select deptno,calAvg(salary) from emp_ds group by deptno ").show(); 
	   
	    //
	    TypeSafeUDAF typeSafeUDAF=new TypeSafeUDAF();
	    
	    Dataset<Employee> emf = emp_ds.as(Encoders.bean(Employee.class));
	    emf.printSchema();
	    emf.show();
	    
	    TypedColumn<Employee, Double> averageSalary = typeSafeUDAF.toColumn().name("averageTypeSafe");
	    Dataset<Double> result = emf.select(averageSalary);
	   result.show();
	    

}
 
开发者ID:PacktPublishing,项目名称:Apache-Spark-2x-for-Java-Developers,代码行数:51,代码来源:UDFExample.java


注:本文中的org.apache.log4j.LogManager.getRootLogger方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。