当前位置: 首页>>代码示例>>Java>>正文


Java VectorUDT类代码示例

本文整理汇总了Java中org.apache.spark.ml.linalg.VectorUDT的典型用法代码示例。如果您正苦于以下问题:Java VectorUDT类的具体用法?Java VectorUDT怎么用?Java VectorUDT使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


VectorUDT类属于org.apache.spark.ml.linalg包,在下文中一共展示了VectorUDT类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: convertToStructField

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
/**
 * StructField,
 *
 * @return
 * @throws CantConverException
 */
public static StructField convertToStructField(FieldInfo info) throws CantConverException {
    if (info.getIndex() != -1) {
        return DataTypes.createStructField(info.getName(), sparkDataType(info.getDataType()), info.isNullable());
    } else {
        switch (info.getDataType()) {
            case FieldInfo.STRING_DATATYPE: {
                return new StructField(info.getName(), DataTypes.createArrayType(DataTypes.StringType), info.isNullable(), Metadata.empty());
            }
            case FieldInfo.DOUBLE_DATATYPE:
            case FieldInfo.INTEGER_DATATYPE:
            case FieldInfo.LONG_DATATYPE: {
                return new StructField(info.getName(), new VectorUDT(), info.isNullable(), Metadata.empty());
            }
            default:
                throw new CantConverException("不合法类型");
        }
    }
}
 
开发者ID:hays2hong,项目名称:stonk,代码行数:25,代码来源:SparkDataFileConverter.java

示例2: binaryBlockToDataFrame

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
public static Dataset<Row> binaryBlockToDataFrame(SparkSession sparkSession,
		JavaPairRDD<MatrixIndexes, MatrixBlock> in, MatrixCharacteristics mc, boolean toVector)  
{
	if( !mc.colsKnown() )
		throw new RuntimeException("Number of columns needed to convert binary block to data frame.");
	
	//slice blocks into rows, align and convert into data frame rows
	JavaRDD<Row> rowsRDD = in
		.flatMapToPair(new SliceBinaryBlockToRowsFunction(mc.getRowsPerBlock()))
		.groupByKey().map(new ConvertRowBlocksToRows((int)mc.getCols(), mc.getColsPerBlock(), toVector));
	
	//create data frame schema
	List<StructField> fields = new ArrayList<>();
	fields.add(DataTypes.createStructField(DF_ID_COLUMN, DataTypes.DoubleType, false));
	if( toVector )
		fields.add(DataTypes.createStructField("C1", new VectorUDT(), false));
	else { // row
		for(int i = 1; i <= mc.getCols(); i++)
			fields.add(DataTypes.createStructField("C"+i, DataTypes.DoubleType, false));
	}
	
	//rdd to data frame conversion
	return sparkSession.createDataFrame(rowsRDD.rdd(), DataTypes.createStructType(fields));
}
 
开发者ID:apache,项目名称:systemml,代码行数:25,代码来源:RDDConverterUtils.java

示例3: testDataFrameSumDMLVectorWithIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLVectorWithIDColumn() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with ID column");

	List<Tuple2<Double, Vector>> list = new ArrayList<Tuple2<Double, Vector>>();
	list.add(new Tuple2<Double, Vector>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, Vector>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, Vector>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR_WITH_INDEX);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:24,代码来源:MLContextTest.java

示例4: testDataFrameSumPYDMLVectorWithIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLVectorWithIDColumn() {
	System.out.println("MLContextTest - DataFrame sum PYDML, vector with ID column");

	List<Tuple2<Double, Vector>> list = new ArrayList<Tuple2<Double, Vector>>();
	list.add(new Tuple2<Double, Vector>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, Vector>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, Vector>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR_WITH_INDEX);

	Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:24,代码来源:MLContextTest.java

示例5: testDataFrameSumDMLMllibVectorWithIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLMllibVectorWithIDColumn() {
	System.out.println("MLContextTest - DataFrame sum DML, mllib vector with ID column");

	List<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> list = new ArrayList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>>();
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0,
			org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0,
			org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0,
			org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleMllibVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new org.apache.spark.mllib.linalg.VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR_WITH_INDEX);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:27,代码来源:MLContextTest.java

示例6: testDataFrameSumPYDMLMllibVectorWithIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLMllibVectorWithIDColumn() {
	System.out.println("MLContextTest - DataFrame sum PYDML, mllib vector with ID column");

	List<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> list = new ArrayList<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>>();
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(1.0,
			org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(2.0,
			org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, org.apache.spark.mllib.linalg.Vector>(3.0,
			org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, org.apache.spark.mllib.linalg.Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleMllibVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new org.apache.spark.mllib.linalg.VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR_WITH_INDEX);

	Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:27,代码来源:MLContextTest.java

示例7: testDataFrameSumDMLVectorWithNoIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLVectorWithNoIDColumn() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with no ID column");

	List<Vector> list = new ArrayList<Vector>();
	list.add(Vectors.dense(1.0, 2.0, 3.0));
	list.add(Vectors.dense(4.0, 5.0, 6.0));
	list.add(Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:MLContextTest.java

示例8: testDataFrameSumPYDMLVectorWithNoIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLVectorWithNoIDColumn() {
	System.out.println("MLContextTest - DataFrame sum PYDML, vector with no ID column");

	List<Vector> list = new ArrayList<Vector>();
	list.add(Vectors.dense(1.0, 2.0, 3.0));
	list.add(Vectors.dense(4.0, 5.0, 6.0));
	list.add(Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);

	Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:MLContextTest.java

示例9: testDataFrameSumDMLMllibVectorWithNoIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLMllibVectorWithNoIDColumn() {
	System.out.println("MLContextTest - DataFrame sum DML, mllib vector with no ID column");

	List<org.apache.spark.mllib.linalg.Vector> list = new ArrayList<org.apache.spark.mllib.linalg.Vector>();
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0));
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0));
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<org.apache.spark.mllib.linalg.Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new MllibVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new org.apache.spark.mllib.linalg.VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:MLContextTest.java

示例10: testDataFrameSumPYDMLMllibVectorWithNoIDColumn

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLMllibVectorWithNoIDColumn() {
	System.out.println("MLContextTest - DataFrame sum PYDML, mllib vector with no ID column");

	List<org.apache.spark.mllib.linalg.Vector> list = new ArrayList<org.apache.spark.mllib.linalg.Vector>();
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(1.0, 2.0, 3.0));
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(4.0, 5.0, 6.0));
	list.add(org.apache.spark.mllib.linalg.Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<org.apache.spark.mllib.linalg.Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new MllibVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new org.apache.spark.mllib.linalg.VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	MatrixMetadata mm = new MatrixMetadata(MatrixFormat.DF_VECTOR);

	Script script = pydml("print('sum: ' + sum(M))").in("M", dataFrame, mm);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:23,代码来源:MLContextTest.java

示例11: testDataFrameSumDMLVectorWithIDColumnNoFormatSpecified

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLVectorWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with ID column, no format specified");

	List<Tuple2<Double, Vector>> list = new ArrayList<Tuple2<Double, Vector>>();
	list.add(new Tuple2<Double, Vector>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, Vector>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, Vector>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:22,代码来源:MLContextTest.java

示例12: testDataFrameSumPYDMLVectorWithIDColumnNoFormatSpecified

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLVectorWithIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum PYDML, vector with ID column, no format specified");

	List<Tuple2<Double, Vector>> list = new ArrayList<Tuple2<Double, Vector>>();
	list.add(new Tuple2<Double, Vector>(1.0, Vectors.dense(1.0, 2.0, 3.0)));
	list.add(new Tuple2<Double, Vector>(2.0, Vectors.dense(4.0, 5.0, 6.0)));
	list.add(new Tuple2<Double, Vector>(3.0, Vectors.dense(7.0, 8.0, 9.0)));
	JavaRDD<Tuple2<Double, Vector>> javaRddTuple = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddTuple.map(new DoubleVectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField(RDDConverterUtils.DF_ID_COLUMN, DataTypes.DoubleType, true));
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M))").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:22,代码来源:MLContextTest.java

示例13: testDataFrameSumDMLVectorWithNoIDColumnNoFormatSpecified

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumDMLVectorWithNoIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum DML, vector with no ID column, no format specified");

	List<Vector> list = new ArrayList<Vector>();
	list.add(Vectors.dense(1.0, 2.0, 3.0));
	list.add(Vectors.dense(4.0, 5.0, 6.0));
	list.add(Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M));").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:21,代码来源:MLContextTest.java

示例14: testDataFrameSumPYDMLVectorWithNoIDColumnNoFormatSpecified

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testDataFrameSumPYDMLVectorWithNoIDColumnNoFormatSpecified() {
	System.out.println("MLContextTest - DataFrame sum PYDML, vector with no ID column, no format specified");

	List<Vector> list = new ArrayList<Vector>();
	list.add(Vectors.dense(1.0, 2.0, 3.0));
	list.add(Vectors.dense(4.0, 5.0, 6.0));
	list.add(Vectors.dense(7.0, 8.0, 9.0));
	JavaRDD<Vector> javaRddVector = sc.parallelize(list);

	JavaRDD<Row> javaRddRow = javaRddVector.map(new VectorRow());
	List<StructField> fields = new ArrayList<StructField>();
	fields.add(DataTypes.createStructField("C1", new VectorUDT(), true));
	StructType schema = DataTypes.createStructType(fields);
	Dataset<Row> dataFrame = spark.createDataFrame(javaRddRow, schema);

	Script script = dml("print('sum: ' + sum(M))").in("M", dataFrame);
	setExpectedStdOut("sum: 45.0");
	ml.execute(script);
}
 
开发者ID:apache,项目名称:systemml,代码行数:21,代码来源:MLContextTest.java

示例15: testMinMaxScaler

import org.apache.spark.ml.linalg.VectorUDT; //导入依赖的package包/类
@Test
public void testMinMaxScaler() {
    //prepare data
    JavaRDD<Row> jrdd = jsc.parallelize(Arrays.asList(
            RowFactory.create(1.0, Vectors.dense(data[0])),
            RowFactory.create(2.0, Vectors.dense(data[1])),
            RowFactory.create(3.0, Vectors.dense(data[2])),
            RowFactory.create(4.0, Vectors.dense(data[3]))
    ));

    StructType schema = new StructType(new StructField[]{
            new StructField("label", DataTypes.DoubleType, false, Metadata.empty()),
            new StructField("features", new VectorUDT(), false, Metadata.empty())
    });

    Dataset<Row> df = spark.createDataFrame(jrdd, schema);

    //train model in spark
    MinMaxScalerModel sparkModel = new MinMaxScaler()
            .setInputCol("features")
            .setOutputCol("scaled")
            .setMin(-5)
            .setMax(5)
            .fit(df);


    //Export model, import it back and get transformer
    byte[] exportedModel = ModelExporter.export(sparkModel);
    final Transformer transformer = ModelImporter.importAndGetTransformer(exportedModel);

    //compare predictions
    List<Row> sparkOutput = sparkModel.transform(df).orderBy("label").select("features", "scaled").collectAsList();
    assertCorrectness(sparkOutput, expected, transformer);
}
 
开发者ID:flipkart-incubator,项目名称:spark-transformers,代码行数:35,代码来源:MinMaxScalerBridgeTest.java


注:本文中的org.apache.spark.ml.linalg.VectorUDT类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。