当前位置: 首页>>代码示例>>Java>>正文


Java DenseVector类代码示例

本文整理汇总了Java中org.apache.spark.mllib.linalg.DenseVector的典型用法代码示例。如果您正苦于以下问题:Java DenseVector类的具体用法?Java DenseVector怎么用?Java DenseVector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DenseVector类属于org.apache.spark.mllib.linalg包,在下文中一共展示了DenseVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: DGER

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DistributedMatrix DGER(double alpha, DenseVector x, DenseVector y, DistributedMatrix A, JavaSparkContext jsc) {

        // Case of IndexedRowMatrix
        if( A.getClass() == IndexedRowMatrix.class) {
            return L2.DGER_IRW((IndexedRowMatrix) A, alpha, x, y, jsc);
        }
        else if (A.getClass() == CoordinateMatrix.class) {
            return L2.DGER_COORD((CoordinateMatrix) A, alpha, x, y, jsc);
        }
        else if (A.getClass() == BlockMatrix.class){
            //return L2.DGER_BCK((BlockMatrix) A, alpha, x, y, jsc);
            return null;
        }
        else {
            return null;
        }

    }
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:19,代码来源:L2.java

示例2: getVectors

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private List<Vector> getVectors(List<String[]> data) {
    List<Vector> vectors = new ArrayList<Vector>();
    List<Map<String, Integer>> encodings = model.getEncodings();
    BasicEncoder encoder = new BasicEncoder.Builder().encodings(encodings).build();
    for (String[] dataEntry : data) {
        String[] encodedEntry;
        try {
            encodedEntry = encoder.call(dataEntry);
        } catch (Exception e) {
            log.warn("Data encoding failed. Cause: " + e.getMessage());
            encodedEntry = dataEntry;
        }
        double[] doubleValues = MLUtils.toDoubleArray(encodedEntry);
        Vector vector = new DenseVector(doubleValues);
        vectors.add(vector);
    }
    return vectors;
}
 
开发者ID:wso2-attic,项目名称:carbon-ml,代码行数:19,代码来源:Predictor.java

示例3: DGEMV

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DenseVector DGEMV(double alpha, DistributedMatrix A, DenseVector x, double beta, DenseVector y, JavaSparkContext jsc){

        // First form  y := beta*y.
        if (beta != 1.0) {
            if (beta == 0.0) {
                y = Vectors.zeros(y.size()).toDense();
            }
            else {
                BLAS.scal(beta, y);
            }
        }

        if (alpha == 0.0) {
            return y;
        }

        DenseVector tmpVector = Vectors.zeros(y.size()).toDense();

        // Form  y := alpha*A*x + y.
        // Case of IndexedRowMatrix
        if( A.getClass() == IndexedRowMatrix.class) {
            tmpVector = L2.DGEMV_IRW((IndexedRowMatrix) A, alpha, x, jsc);
        }
        else if (A.getClass() == CoordinateMatrix.class) {
            tmpVector = L2.DGEMV_COORD((CoordinateMatrix) A, alpha, x, jsc);
        }
        else if (A.getClass() == BlockMatrix.class){
            tmpVector = L2.DGEMV_BCK((BlockMatrix) A, alpha, x, jsc);
        }
        else {
            tmpVector = null;
        }

        BLAS.axpy(1.0, tmpVector, y);


        return y;

    }
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:40,代码来源:L2.java

示例4: DGEMV_COORD

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static DenseVector DGEMV_COORD(CoordinateMatrix matrix, double alpha, DenseVector vector, JavaSparkContext jsc) {

        JavaRDD<MatrixEntry> items = matrix.entries().toJavaRDD();
        DenseVector result = items.mapPartitions(new MatrixEntriesMultiplication(vector, alpha))
                .reduce(new MatrixEntriesMultiplicationReducer());

        return result;
    }
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:9,代码来源:L2.java

示例5: testVectorBinarizerDense

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testVectorBinarizerDense() {
    // prepare data

    JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
            RowFactory.create(0d, 1d, new DenseVector(new double[]{-2d, -3d, -4d, -1d, 6d, -7d, 8d, 0d, 0d, 0d, 0d, 0d})),
            RowFactory.create(1d, 2d, new DenseVector(new double[]{4d, -5d, 6d, 7d, -8d, 9d, -10d, 0d, 0d, 0d, 0d, 0d})),
            RowFactory.create(2d, 3d, new DenseVector(new double[]{-5d, 6d, -8d, 9d, 10d, 11d, 12d, 0d, 0d, 0d, 0d, 0d}))
    ));

    StructType schema = new StructType(new StructField[]{
            new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("value1", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("vector1", new VectorUDT(), false, Metadata.empty())
    });

    DataFrame df = sqlContext.createDataFrame(jrdd, schema);
    VectorBinarizer vectorBinarizer = new VectorBinarizer()
            .setInputCol("vector1")
            .setOutputCol("binarized")
            .setThreshold(2d);


    //Export this model
    byte[] exportedModel = ModelExporter.export(vectorBinarizer, df);

    //Import and get Transformer
    Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);
    //compare predictions
    Row[] sparkOutput = vectorBinarizer.transform(df).orderBy("id").select("id", "value1", "vector1", "binarized").collect();
    for (Row row : sparkOutput) {

        Map<String, Object> data = new HashMap<>();
        data.put(vectorBinarizer.getInputCol(), ((DenseVector) row.get(2)).toArray());
        transformer.transform(data);
        double[] output = (double[]) data.get(vectorBinarizer.getOutputCol());
        assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
    }
}
 
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:40,代码来源:VectorBinarizerBridgeTest.java

示例6: predict

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public Status predict(double lat, double lon, double speedKnots, double courseMinusHeading,
        double preEffectiveSpeedKnots, double preError, double postEffectiveSpeedKnots,
        double postError) {
    Vector features = new DenseVector(new double[] { lat, lon, speedKnots, courseMinusHeading,
            preEffectiveSpeedKnots, preError, postEffectiveSpeedKnots, postError });
    double prediction = model.predict(features);

    if (is(prediction, 1))
        return Status.MOORED;
    else if (is(prediction, 2))
        return Status.ANCHORED;
    else
        return Status.OTHER;
}
 
开发者ID:amsa-code,项目名称:risky,代码行数:15,代码来源:AnchoredPredictor.java

示例7: convertExecRowToVector

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
/**
 *
 * ExecRow is one-based as far as the elements
 *
 * @param execRow
 * @return
 */
public static Vector convertExecRowToVector(ExecRow execRow) throws StandardException {
    int length = execRow.nColumns();
    double[] vectorValues = new double[length];
    for (int i=1;i<=length;i++) {
        vectorValues[i] = execRow.getColumn(i).getDouble();
    }
    return new DenseVector(vectorValues);
}
 
开发者ID:splicemachine,项目名称:spliceengine,代码行数:16,代码来源:SparkMLibUtils.java

示例8: call

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public IndexedRow call(Tuple2<Long, double[]> longTuple2) throws Exception {
    return new IndexedRow(longTuple2._1(), new DenseVector(longTuple2._2()));
}
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:5,代码来源:Array2IndexedRow.java

示例9: GetD_IRW

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
private static IndexedRowMatrix GetD_IRW(IndexedRowMatrix A, boolean inverseValues, JavaSparkContext jsc) {

        JavaRDD<IndexedRow> rows = A.rows().toJavaRDD().cache();

        final Broadcast<Boolean> inverseValuesBC = jsc.broadcast(inverseValues);
        JavaRDD<IndexedRow> LURows = rows.map(new Function<IndexedRow, IndexedRow>() {

            @Override
            public IndexedRow call(IndexedRow indexedRow) throws Exception {
                long index = indexedRow.index();
                DenseVector vect = indexedRow.vector().toDense();

                boolean inverseValuesValue = inverseValuesBC.getValue().booleanValue();

                double newValues[] = new double[vect.size()];


                for(int i = 0; i< vect.size(); i++) {

                    if( i == index) {
                        if(inverseValuesValue) {
                            newValues[i] = 1.0/vect.apply(i);
                        }
                        else {
                            newValues[i] = vect.apply(i);
                        }
                    }
                    else {
                        newValues[i] = 0.0;
                    }

                }

                DenseVector newVector = new DenseVector(newValues);

                return new IndexedRow(index, newVector);

            }
        });

        IndexedRowMatrix newMatrix = new IndexedRowMatrix(LURows.rdd());

        return newMatrix;
    }
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:45,代码来源:OtherOperations.java

示例10: call

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public DenseVector call(DenseVector vector, DenseVector vector2) throws Exception {
    BLAS.axpy(1.0, vector, vector2);

    return vector2;
}
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:7,代码来源:MatrixEntriesMultiplicationReducer.java

示例11: MatrixEntriesMultiplication

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public MatrixEntriesMultiplication(DenseVector vector, double alpha) {

        this.vector = vector;
        this.alpha = alpha;
    }
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:6,代码来源:MatrixEntriesMultiplication.java

示例12: readVectorFromFileInHDFS

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
public static DenseVector readVectorFromFileInHDFS(String file, Configuration conf){

		try {
			FileSystem fs = FileSystem.get(conf);

			Path pt = new Path(file);

			//FileSystem fileSystem = FileSystem.get(context.getConfiguration());
			BufferedReader br=new BufferedReader(new InputStreamReader(fs.open(pt)));
			String line;
			line=br.readLine();

			double vector[] = null;

			boolean arrayInfo = true;

			int i = 0;

			while (line != null){

				if((arrayInfo == true) && (line.charAt(0) == '%')){
					arrayInfo = true;
					//LOG.info("JMAbuin:: Skipping line with %");
				}
				else if((arrayInfo == true) && !(line.charAt(0) == '%')){
					arrayInfo = false;
					String[] matrixInfo = line.split(" ");
					//LOG.info("JMAbuin:: Creating vector after line with %");
					vector = new double[Integer.parseInt(matrixInfo[0])];

				}
				else{
					vector[i] = Double.parseDouble(line);
					i++;
				}

				line=br.readLine();
			}

			br.close();

			return new DenseVector(vector);

		} catch (IOException e) {
			LOG.error("Error in " + IO.class.getName() + ": " + e.getMessage());
			e.printStackTrace();
			System.exit(1);
		}

		return null;
	}
 
开发者ID:jmabuin,项目名称:BLASpark,代码行数:52,代码来源:IO.java

示例13: format

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Override
public Vector format(Object value){
	List<String> labels = getLabels();

	HasProbability hasProbability = (HasProbability)value;

	double[] probabilities = new double[labels.size()];

	for(int i = 0; i < labels.size(); i++){
		String label = labels.get(i);

		probabilities[i] = hasProbability.getProbability(label);
	}

	return new DenseVector(probabilities);
}
 
开发者ID:jeremyore,项目名称:spark-pmml-import,代码行数:17,代码来源:ProbabilityColumnProducer.java

示例14: testVectorAssembler

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testVectorAssembler() {
    // prepare data

    JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
            RowFactory.create(0d, 1d, new DenseVector(new double[]{2d, 3d})),
            RowFactory.create(1d, 2d, new DenseVector(new double[]{3d, 4d})),
            RowFactory.create(2d, 3d, new DenseVector(new double[]{4d, 5d})),
            RowFactory.create(3d, 4d, new DenseVector(new double[]{5d, 6d})),
            RowFactory.create(4d, 5d, new DenseVector(new double[]{6d, 7d}))
    ));

    StructType schema = new StructType(new StructField[]{
            new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("value1", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("vector1", new VectorUDT(), false, Metadata.empty())
    });

    DataFrame df = sqlContext.createDataFrame(jrdd, schema);
    VectorAssembler vectorAssembler = new VectorAssembler()
            .setInputCols(new String[]{"value1", "vector1"})
            .setOutputCol("feature");


    //Export this model
    byte[] exportedModel = ModelExporter.export(vectorAssembler, null);

    String exportedModelJson = new String(exportedModel);
    //Import and get Transformer
    Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);
    //compare predictions
    Row[] sparkOutput = vectorAssembler.transform(df).orderBy("id").select("id", "value1", "vector1", "feature").collect();
    for (Row row : sparkOutput) {

        Map<String, Object> data = new HashMap<>();
        data.put(vectorAssembler.getInputCols()[0], row.get(1));
        data.put(vectorAssembler.getInputCols()[1], ((DenseVector) row.get(2)).toArray());
        transformer.transform(data);
        double[] output = (double[]) data.get(vectorAssembler.getOutputCol());
        assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
    }
}
 
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:43,代码来源:VectorAssemblerBridgeTest.java

示例15: testChiSqSelector

import org.apache.spark.mllib.linalg.DenseVector; //导入依赖的package包/类
@Test
public void testChiSqSelector() {
    // prepare data

    JavaRDD<Row> jrdd = sc.parallelize(Arrays.asList(
            RowFactory.create(0d, 0d, new DenseVector(new double[]{8d, 7d, 0d})),
            RowFactory.create(1d, 1d, new DenseVector(new double[]{0d, 9d, 6d})),
            RowFactory.create(2d, 1d, new DenseVector(new double[]{0.0d, 9.0d, 8.0d})),
            RowFactory.create(3d, 2d, new DenseVector(new double[]{8.0d, 9.0d, 5.0d}))
    ));

    double[] preFilteredData = {0.0d, 6.0d, 8.0d, 5.0d};

    StructType schema = new StructType(new StructField[]{
            new StructField("id", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("features", new VectorUDT(), false, Metadata.empty())
    });

    DataFrame df = sqlContext.createDataFrame(jrdd, schema);
    ChiSqSelector chiSqSelector = new ChiSqSelector();
    chiSqSelector.setNumTopFeatures(1);
    chiSqSelector.setFeaturesCol("features");
    chiSqSelector.setLabelCol("label");
    chiSqSelector.setOutputCol("output");

    ChiSqSelectorModel chiSqSelectorModel = chiSqSelector.fit(df);

    //Export this model
    byte[] exportedModel = ModelExporter.export(chiSqSelectorModel, null);

    String exportedModelJson = new String(exportedModel);

    //Import and get Transformer
    Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);

    //compare predictions
    Row[] sparkOutput = chiSqSelectorModel.transform(df).orderBy("id").select("id", "label", "features", "output").collect();
    for (Row row : sparkOutput) {
        Map<String, Object> data = new HashMap<>();
        data.put(chiSqSelectorModel.getFeaturesCol(), ((DenseVector) row.get(2)).toArray());
        transformer.transform(data);
        double[] output = (double[]) data.get(chiSqSelectorModel.getOutputCol());
        System.out.println(Arrays.toString(output));
        assertArrayEquals(output, ((DenseVector) row.get(3)).toArray(), 0d);
    }
}
 
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:48,代码来源:ChiSqSelectorBridgeTest.java


注:本文中的org.apache.spark.mllib.linalg.DenseVector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。