本文整理汇总了Java中org.deeplearning4j.nn.conf.layers.RBM类的典型用法代码示例。如果您正苦于以下问题:Java RBM类的具体用法?Java RBM怎么用?Java RBM使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
RBM类属于org.deeplearning4j.nn.conf.layers包,在下文中一共展示了RBM类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getConfiguration
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
protected MultiLayerConfiguration getConfiguration()
{
int hiddenLayerNodes = parameters.getHiddeLayerNodes()[0];
final RBM hiddenLayer = new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(parameters.getInputSize()).nOut(hiddenLayerNodes).weightInit(WeightInit.XAVIER).k(1)
.activation("relu").lossFunction(LossFunctions.LossFunction.RMSE_XENT).updater(Updater.ADAGRAD)
.dropOut(0.5).build();
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT).nIn(hiddenLayerNodes)
.nOut(parameters.getOutputSize()).activation("softmax").build();
return new NeuralNetConfiguration.Builder().seed(parameters.getSeed()).iterations(parameters.getIterations())
.learningRate(parameters.getLearningRate()).optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.l2(2e-4).regularization(true).momentum(0.9).useDropConnect(true).list(2).layer(0, hiddenLayer)
.layer(1, outputLayer).build();
}
示例2: getConfiguration
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration(DataFrame dataset) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.constrainGradientToUnitNorm(true)
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(rows * columns).nOut(600).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(600).nOut(250).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.weightInit(WeightInit.XAVIER)
.nIn(250).nOut(200).build())
.layer(3, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.weightInit(WeightInit.XAVIER)
.activation("softmax")
.nIn(200).nOut(AUTOMATIC).build())
.pretrain(true).backprop(false)
.build();
return conf;
}
示例3: main
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(8)
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(2000).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(500).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(30).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(5, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(6, new RBM.Builder().nIn(1000).nOut(2000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(2000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例4: getConfiguration
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
@Override
protected MultiLayerConfiguration getConfiguration()
{
final int[] hiddenLayerNodes = parameters.getHiddeLayerNodes();
final int nLayers = hiddenLayerNodes.length;
final ListBuilder list = new NeuralNetConfiguration.Builder().seed(parameters.getSeed())
.iterations(parameters.getIterations()).optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(nLayers);
for (int i = 0; i < nLayers; i++)
{
int nIn;
if (i == 0)
{
nIn = parameters.getInputSize();
}
else
{
nIn = hiddenLayerNodes[i - 1];
}
if (i < nLayers - 1)
{
final RBM hiddenLayer = new RBM.Builder().nIn(nIn).nOut(hiddenLayerNodes[i])
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build();
list.layer(i, hiddenLayer);
}
else
{
final OutputLayer outputLayer = new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
.nIn(nIn).nOut(parameters.getOutputSize()).build();
list.layer(nLayers - 1, outputLayer);
}
}
return list.pretrain(true).backprop(true).build();
}
示例5: getModel
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static MultiLayerNetwork getModel(int numInputs) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.gradientNormalization(GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.regularization(true)
.dropOut(Config.DROPOUT)
.updater(Config.UPDATER)
.adamMeanDecay(0.5)
.adamVarDecay(0.5)
.weightInit(WeightInit.XAVIER)
.optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
.list()
.layer(0, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.GAUSSIAN)
.nIn(numInputs).nOut(2750).dropOut(0.75)
.activation(Activation.RELU).build())
.layer(1, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.nIn(2750).nOut(2000)
.activation(Activation.RELU).build())
.layer(2, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.nIn(2000).nOut(1000)
.activation(Activation.RELU).build())
.layer(3, new RBM.Builder(RBM.HiddenUnit.BINARY, RBM.VisibleUnit.BINARY)
.nIn(1000).nOut(200)
.activation(Activation.RELU).build())
.layer(4, new OutputLayer.Builder(Config.LOSS_FUNCTION)
.nIn(200).nOut(Config.NUM_OUTPUTS).updater(Config.UPDATER)
.adamMeanDecay(0.6).adamVarDecay(0.7)
.build())
.pretrain(true).backprop(true)
.build();
return new MultiLayerNetwork(conf);
}
示例6: getConfiguration
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerConfiguration getConfiguration() {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(11L) // Seed to lock in weight initialization for tuning
.iterations(100) // # training iterations predict/classify & backprop
.learningRate(1e-3f) // Optimization step size
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT) // Backprop method (calculate the gradients)
.momentum(0.9)
.constrainGradientToUnitNorm(true)
.useDropConnect(true)
.list(2) // # NN layers (does not count input layer)
.layer(0, new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(4) // # input nodes
.nOut(3) // # fully connected hidden layer nodes. Add list if multiple layers.
.weightInit(WeightInit.XAVIER)
.activation("relu")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT)
.updater(Updater.ADAGRAD)
.k(1) // # contrastive divergence iterations
.dropOut(0.5)
.build()
) // NN layer type
.layer(1, new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(3) // # input nodes
.nOut(3) // # output nodes
.activation("softmax")
.weightInit(WeightInit.XAVIER)
.updater(Updater.ADAGRAD)
.dropOut(0.5)
.build()
) // NN layer type
.build();
return conf;
}
示例7: deepBeliefNetwork
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
private static MultiLayerNetwork deepBeliefNetwork(int seed,
int iterations, int numRows, int numColumns, int outputNum) {
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.gradientNormalization(
GradientNormalization.ClipElementWiseAbsoluteValue)
.gradientNormalizationThreshold(1.0)
.iterations(iterations)
.momentum(0.5)
.momentumAfter(Collections.singletonMap(3, 0.9))
.optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
.list(4)
.layer(0,
new RBM.Builder().nIn(numRows * numColumns).nOut(500)
.weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT)
.visibleUnit(RBM.VisibleUnit.BINARY)
.hiddenUnit(RBM.HiddenUnit.BINARY).build())
.layer(1,
new RBM.Builder().nIn(500).nOut(250)
.weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT)
.visibleUnit(RBM.VisibleUnit.BINARY)
.hiddenUnit(RBM.HiddenUnit.BINARY).build())
.layer(2,
new RBM.Builder().nIn(250).nOut(200)
.weightInit(WeightInit.XAVIER)
.lossFunction(LossFunction.RMSE_XENT)
.visibleUnit(RBM.VisibleUnit.BINARY)
.hiddenUnit(RBM.HiddenUnit.BINARY).build())
.layer(3,
new OutputLayer.Builder(
LossFunction.NEGATIVELOGLIKELIHOOD)
.activation("softmax").nIn(200).nOut(outputNum)
.build()).pretrain(true).backprop(false)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
return model;
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:42,代码来源:NeuralNetworks.java
示例8: DeepAutoEncoderExample
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public DeepAutoEncoderExample() {
try {
int seed = 123;
int numberOfIterations = 1;
iterator = new MnistDataSetIterator(1000, MnistDataFetcher.NUM_EXAMPLES, true);
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(numberOfIterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list()
.layer(0, new RBM.Builder().nIn(numberOfRows * numberOfColumns)
.nOut(1000)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
.layer(5, new RBM.Builder().nIn(30).nOut(100)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
.layer(6, new RBM.Builder().nIn(100).nOut(250)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000)
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(9, new OutputLayer.Builder(
LossFunctions.LossFunction.RMSE_XENT).nIn(1000)
.nOut(numberOfRows * numberOfColumns).build())
.pretrain(true).backprop(true)
.build();
model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Collections.singletonList(
(IterationListener) new ScoreIterationListener()));
while (iterator.hasNext()) {
DataSet dataSet = iterator.next();
model.fit(new DataSet(dataSet.getFeatureMatrix(),
dataSet.getFeatureMatrix()));
}
modelFile = new File("savedModel");
ModelSerializer.writeModel(model, modelFile, true);
} catch (IOException ex) {
ex.printStackTrace();
}
}
开发者ID:PacktPublishing,项目名称:Machine-Learning-End-to-Endguide-for-Java-developers,代码行数:55,代码来源:DeepAutoEncoderExample.java
示例9: main
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list(10)
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //encoding stops
.layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build()) //decoding starts
.layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT).nIn(1000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例10: main
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws IOException {
Nd4j.MAX_SLICES_TO_PRINT = -1;
Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
final int numRows = 4;
final int numColumns = 1;
int outputNum = 10;
int numSamples = 150;
int batchSize = 150;
int iterations = 100;
int seed = 123;
int listenerFreq = iterations/2;
log.info("Load data....");
DataSetIterator iter = new IrisDataSetIterator(batchSize, numSamples);
DataSet iris = iter.next();
iris.normalizeZeroMeanZeroUnitVariance();
log.info("Build model....");
NeuralNetConfiguration conf = new NeuralNetConfiguration.Builder().regularization(true)
.miniBatch(true)
.layer(new RBM.Builder().l2(1e-1).l1(1e-3)
.nIn(numRows * numColumns)
.nOut(outputNum)
.activation("relu")
.weightInit(WeightInit.RELU)
.lossFunction(LossFunctions.LossFunction.RECONSTRUCTION_CROSSENTROPY).k(3)
.hiddenUnit(HiddenUnit.RECTIFIED).visibleUnit(VisibleUnit.GAUSSIAN)
.updater(Updater.ADAGRAD).gradientNormalization(GradientNormalization.ClipL2PerLayer)
.build())
.seed(seed)
.iterations(iterations)
.learningRate(1e-3)
.optimizationAlgo(OptimizationAlgorithm.LBFGS)
.build();
Layer model = LayerFactories.getFactory(conf.getLayer()).create(conf);
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Evaluate weights....");
INDArray w = model.getParam(DefaultParamInitializer.WEIGHT_KEY);
log.info("Weights: " + w);
log.info("Scaling the dataset");
iris.scale();
log.info("Train model....");
for(int i = 0; i < 20; i++) {
log.info("Epoch "+i+":");
model.fit(iris.getFeatureMatrix());
}
}
示例11: main
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
final int numRows = 28;
final int numColumns = 28;
int seed = 123;
int numSamples = MnistDataFetcher.NUM_EXAMPLES;
int batchSize = 1000;
int iterations = 1;
int listenerFreq = iterations/5;
log.info("Load data....");
DataSetIterator iter = new MnistDataSetIterator(batchSize,numSamples,true);
log.info("Build model....");
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(seed)
.iterations(iterations)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.list()
.layer(0, new RBM.Builder().nIn(numRows * numColumns).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(1, new RBM.Builder().nIn(1000).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(2, new RBM.Builder().nIn(500).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(3, new RBM.Builder().nIn(250).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(4, new RBM.Builder().nIn(100).nOut(30).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(5, new RBM.Builder().nIn(30).nOut(100).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(6, new RBM.Builder().nIn(100).nOut(250).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(7, new RBM.Builder().nIn(250).nOut(500).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(8, new RBM.Builder().nIn(500).nOut(1000).lossFunction(LossFunctions.LossFunction.KL_DIVERGENCE).build())
.layer(9, new OutputLayer.Builder(LossFunctions.LossFunction.MSE).activation(Activation.SIGMOID).nIn(1000).nOut(numRows*numColumns).build())
.pretrain(true).backprop(true)
.build();
MultiLayerNetwork model = new MultiLayerNetwork(conf);
model.init();
model.setListeners(new ScoreIterationListener(listenerFreq));
log.info("Train model....");
while(iter.hasNext()) {
DataSet next = iter.next();
model.fit(new DataSet(next.getFeatureMatrix(),next.getFeatureMatrix()));
}
}
示例12: testFromSvmLight
import org.deeplearning4j.nn.conf.layers.RBM; //导入依赖的package包/类
@Test
public void testFromSvmLight() throws Exception {
JavaRDD<LabeledPoint> data = MLUtils.loadLibSVMFile(sc.sc(), new ClassPathResource("svmLight/iris_svmLight_0.txt").getTempFileFromArchive().getAbsolutePath()).toJavaRDD().map(new Function<LabeledPoint, LabeledPoint>() {
@Override
public LabeledPoint call(LabeledPoint v1) throws Exception {
return new LabeledPoint(v1.label(), Vectors.dense(v1.features().toArray()));
}
}).cache();
DataSet d = new IrisDataSetIterator(150,150).next();
MultiLayerConfiguration conf = new NeuralNetConfiguration.Builder()
.seed(123)
.optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
.iterations(100).miniBatch(true)
.maxNumLineSearchIterations(10)
.list()
.layer(0, new RBM.Builder(RBM.HiddenUnit.RECTIFIED, RBM.VisibleUnit.GAUSSIAN)
.nIn(4).nOut(100)
.weightInit(WeightInit.XAVIER)
.activation("relu")
.lossFunction(LossFunctions.LossFunction.RMSE_XENT).build())
.layer(1, new org.deeplearning4j.nn.conf.layers.OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
.nIn(100).nOut(3)
.activation("softmax")
.weightInit(WeightInit.XAVIER)
.build())
.backprop(false)
.build();
MultiLayerNetwork network = new MultiLayerNetwork(conf);
network.init();
System.out.println("Initializing network");
SparkDl4jMultiLayer master = new SparkDl4jMultiLayer(sc,getBasicConf(),new ParameterAveragingTrainingMaster(true,numExecutors(),1,5,1,0));
MultiLayerNetwork network2 = master.fitLabeledPoint(data);
Evaluation evaluation = new Evaluation();
evaluation.eval(d.getLabels(), network2.output(d.getFeatureMatrix()));
System.out.println(evaluation.stats());
}
开发者ID:PacktPublishing,项目名称:Deep-Learning-with-Hadoop,代码行数:42,代码来源:TestSparkMultiLayerParameterAveraging.java