當前位置: 首頁>>代碼示例>>Java>>正文


Java JavaRDD.filter方法代碼示例

本文整理匯總了Java中org.apache.spark.api.java.JavaRDD.filter方法的典型用法代碼示例。如果您正苦於以下問題:Java JavaRDD.filter方法的具體用法?Java JavaRDD.filter怎麽用?Java JavaRDD.filter使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.spark.api.java.JavaRDD的用法示例。


在下文中一共展示了JavaRDD.filter方法的9個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: splitNewDataToTrainTest

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * Implementation which splits based solely on time. It will return approximately
 * the earliest {@link #getTestFraction()} of input, ordered by timestamp, as new training
 * data and the rest as test data.
 */
@Override
protected Pair<JavaRDD<String>,JavaRDD<String>> splitNewDataToTrainTest(JavaRDD<String> newData) {
  // Rough approximation; assumes timestamps are fairly evenly distributed
  StatCounter maxMin = newData.mapToDouble(line -> MLFunctions.TO_TIMESTAMP_FN.call(line).doubleValue()).stats();

  long minTime = (long) maxMin.min();
  long maxTime = (long) maxMin.max();
  log.info("New data timestamp range: {} - {}", minTime, maxTime);
  long approxTestTrainBoundary = (long) (maxTime - getTestFraction() * (maxTime - minTime));
  log.info("Splitting at timestamp {}", approxTestTrainBoundary);

  JavaRDD<String> newTrainData = newData.filter(
      line -> MLFunctions.TO_TIMESTAMP_FN.call(line) < approxTestTrainBoundary);
  JavaRDD<String> testData = newData.filter(
      line -> MLFunctions.TO_TIMESTAMP_FN.call(line) >= approxTestTrainBoundary);

  return new Pair<>(newTrainData, testData);
}
 
開發者ID:oncewang,項目名稱:oryx2,代碼行數:24,代碼來源:ALSUpdate.java

示例2: filter

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * filter RDD of {@link Instance}s based on the specified config
 *
 * @param instances RDD of instances to filter
 * @param typeIndex index mapping type URIs to integers
 * @return filtered RDD of instances
 */
public JavaRDD<Instance> filter(JavaRDD<Instance> instances, IndexMap<String> typeIndex) {
    if (config.getTypes().isEmpty()) {
        return instances;
    }
    // get indexes of accepted type URIs
    Set<Integer> acceptedTypes = config.getTypes().stream()
            .map(typeIndex::getIndex)
            .collect(Collectors.toSet());

    instances = instances.filter(instance -> !Collections.disjoint(instance.getTypes(), acceptedTypes));

    if (config.isIgnoreOtherTypes()) {
        // remove other than accepted types from each instance
        instances = instances.map(instance -> {
            Set<Integer> intersect = Sets.intersection(instance.getTypes(), acceptedTypes).immutableCopy();
            instance.getTypes().clear();
            instance.getTypes().addAll(intersect);
            return instance;
        });
    }

    return instances;
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:31,代碼來源:InstanceFilter.java

示例3: filterQuadsByForbiddenSubjects

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * Get quads with specified subjects filtered out, computed by querying an in-memory set of subjects
 *
 * @param quads            RDD of quads to filter
 * @param subjectBlacklist set of requested subject URIs to be filtered out
 * @return filtered RDD with only those quads whose subject is NOT in subjectBlacklist
 */
public static JavaRDD<Quad> filterQuadsByForbiddenSubjects(JavaRDD<Quad> quads, Set<String> subjectBlacklist) {
    if (subjectBlacklist.isEmpty()) {
        return quads;
    }
    return quads.filter(quad -> !quad.getSubject().isURI() ||
            !subjectBlacklist.contains(quad.getSubject().getURI())
    );
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:16,代碼來源:QuadUtils.java

示例4: filterTypeQuads

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * Get quads that specify type of a resource
 *
 * @param quads             RDD of quads to filter
 * @param typePredicateURIs additional type predicates to use together with rdf:type
 * @return RDD of quads that specify type of a resource
 */
public static JavaRDD<Quad> filterTypeQuads(JavaRDD<Quad> quads, List<String> typePredicateURIs) {
    String typePredicateURI = RDF.TYPE.toString();

    return quads.filter(quad -> {
        if (!quad.getPredicate().isURI() || !quad.getObject().isURI()) {
            return false;
        }
        String uri = quad.getPredicate().getURI();
        return uri.equals(typePredicateURI) || typePredicateURIs.contains(uri);
    });
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:19,代碼來源:QuadUtils.java

示例5: main

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
	 * @param args
	 */
	public static void main(String[] args) {
		//C:\Users\sumit.kumar\Downloads\bin\warehouse
		//System.setProperty("hadoop.home.dir", "C:\\Users\\sumit.kumar\\Downloads");
		String logFile = "src/main/resources/Apology_by_Plato.txt"; // Should be some file on your system
		Logger rootLogger = LogManager.getRootLogger();
		rootLogger.setLevel(Level.WARN);
		 SparkConf conf = new SparkConf().setMaster("local").setAppName("ActionExamples").set("spark.hadoop.validateOutputSpecs", "false");
			JavaSparkContext sparkContext = new JavaSparkContext(conf);
		    JavaRDD<Integer> rdd = sparkContext.parallelize(Arrays.asList(1, 2, 3,4,5),3).cache();	
		    JavaRDD<Integer> evenRDD= rdd.filter(new org.apache.spark.api.java.function.Function<Integer, Boolean>() {
			@Override
			public Boolean call(Integer v1) throws Exception {
			  return ((v1%2)==0)?true:false;
				}
			});
		    
		    evenRDD.persist(StorageLevel.MEMORY_AND_DISK());
		    evenRDD.foreach(new VoidFunction<Integer>() {
			@Override
			public void call(Integer t) throws Exception {
			System.out.println("The value of RDD are :"+t);
			 }
			});
		   //unpersisting the RDD 
		   evenRDD.unpersist();
		   rdd.unpersist();
		   
		   /* JavaRDD<String> lines = spark.read().textFile(logFile).javaRDD().cache();
		    System.out.println("DEBUG: \n"+ lines.toDebugString());
		   long word= lines.count();
		   JavaRDD<String> distinctLines=lines.distinct();
		   System.out.println("DEBUG: \n"+ distinctLines.toDebugString());
		   JavaRDD<String> finalRdd=lines.subtract(distinctLines);
		    
		   
		   System.out.println("DEBUG: \n"+ finalRdd.toDebugString());
		   System.out.println("The count is "+word);
		   System.out.println("The count is "+distinctLines.count());
		   System.out.println("The count is "+finalRdd.count());
		   
		   finalRdd.foreach(new VoidFunction<String>() {
			
			@Override
			public void call(String t) throws Exception {
				// TODO Auto-generated method stub
				System.out.println(t);
			}
		});
*/	    /*SparkConf conf = new SparkConf().setAppName("Simple Application");
	    JavaSparkContext sc = new JavaSparkContext(conf);
	    StorageLevel newLevel;
		JavaRDD<String> logData = sc.textFile(logFile).cache();

	    long numAs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("a"); }
	    }).count();

	    long numBs = logData.filter(new Function(logFile, logFile, logFile, logFile, false) {
	      public Boolean call(String s) { return s.contains("b"); }
	    }).count();

	    System.out.println("Lines with a: " + numAs + ", lines with b: " + numBs);
	    
	    sc.stop();*/

	}
 
開發者ID:PacktPublishing,項目名稱:Apache-Spark-2x-for-Java-Developers,代碼行數:70,代碼來源:PersistExample.java

示例6: parseQuads

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
@Override
public JavaRDD<Quad> parseQuads(String path) {

    Configuration conf = new Configuration();

    Integer batchSize = config.getBatchSize();
    conf.set(NLineInputFormat.LINES_PER_MAP, batchSize.toString());

    if (config.getErrorHandling() == ParseErrorHandling.Throw) {
        conf.set(RdfIOConstants.INPUT_IGNORE_BAD_TUPLES, "false");
    } else {
        conf.set(RdfIOConstants.INPUT_IGNORE_BAD_TUPLES, "true");
    }

    Boolean isLineBased = config.getLineBasedFormat();
    if (isLineBased == null) {
        isLineBased = guessIsLineBasedFormat(path);
    }
    JavaRDD<Quad> quads;
    Integer partitions = config.getRepartition();
    if (isLineBased) {
        log.info("Parsing RDF in parallel with batch size: {}", batchSize);
        quads = sc.newAPIHadoopFile(path,
                NQuadsInputFormat.class,
                LongWritable.class, // position
                QuadWritable.class, // value
                conf).values().map(QuadWritable::get);
    } else {
        // let Jena guess the format, load whole files
        log.info("Input format is not line based, parsing RDF by Master node only.");
        quads = sc.newAPIHadoopFile(path,
                TriplesOrQuadsInputFormat.class,
                LongWritable.class, // position
                QuadWritable.class, // value
                conf).values().map(QuadWritable::get);

        if (partitions == null) {
            log.warn("Reading non-line based formats by master node only, consider setting --parsing.repartition to redistribute work to other nodes.");
        }
    }
    if (partitions != null) {
        log.info("Distributing workload, repartitioning into {} partitions", partitions);
        quads = quads.repartition(partitions);
    }


    final List<String> acceptedLanguages = config.getAcceptedLanguages();
    // if only some languages are accepted
    if (!acceptedLanguages.isEmpty()) {
        // filter out literals of unsupported languages
        quads = quads.filter(quad ->
                !quad.getObject().isLiteral() ||
                        quad.getObject().getLiteralLanguage() == null ||
                        quad.getObject().getLiteralLanguage().isEmpty() ||
                        acceptedLanguages.contains(quad.getObject().getLiteralLanguage())
        );
    }

    return quads;
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:61,代碼來源:ElephasQuadParser.java

示例7: checkErrorInstance

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
private JavaRDD<Instance> checkErrorInstance(JavaRDD<Instance> result) {
    int errorType = uriIndex.getIndex(InstanceAggregator.IRI_TYPE_ERROR);
    assertTrue("Parsing error instance is present", result.filter(instance -> instance.getTypes().contains(errorType)).count() == 1L);
    // filter out error instance
    return result.filter(instance -> !instance.getTypes().contains(errorType));
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:7,代碼來源:InstanceAggregatorTest.java

示例8: filterQuadsByAllowedSubjects

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * Get quads with specified subjects only, computed by querying an in-memory set of subjects
 *
 * @param quads       RDD of quads to filter
 * @param subjectURIs set of requested subject URIs to keep
 * @return filtered RDD with only those quads whose subject is in subjectURIs
 */
public static JavaRDD<Quad> filterQuadsByAllowedSubjects(JavaRDD<Quad> quads, Set<String> subjectURIs) {
    return quads.filter(quad -> quad.getSubject().isURI() &&
            subjectURIs.contains(quad.getSubject().getURI())
    );
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:13,代碼來源:QuadUtils.java

示例9: filterQuadsByObjects

import org.apache.spark.api.java.JavaRDD; //導入方法依賴的package包/類
/**
 * Get quads with specified object URIs, computed by querying an in-memory set of subjects
 *
 * @param quads      RDD of quads to filter
 * @param objectURIs set of requested object URIs to filter on
 * @return filtered RDD with only those quads whose object is in objectURIs
 */
public static JavaRDD<Quad> filterQuadsByObjects(JavaRDD<Quad> quads, Set<String> objectURIs) {
    return quads.filter(quad -> quad.getObject().isURI() &&
            objectURIs.contains(quad.getObject().getURI())
    );
}
 
開發者ID:Merck,項目名稱:rdf2x,代碼行數:13,代碼來源:QuadUtils.java


注:本文中的org.apache.spark.api.java.JavaRDD.filter方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。