本文整理汇总了Java中org.nd4j.linalg.api.ndarray.INDArray.dup方法的典型用法代码示例。如果您正苦于以下问题:Java INDArray.dup方法的具体用法?Java INDArray.dup怎么用?Java INDArray.dup使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.nd4j.linalg.api.ndarray.INDArray
的用法示例。
在下文中一共展示了INDArray.dup方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: finetune
import org.nd4j.linalg.api.ndarray.INDArray; //导入方法依赖的package包/类
public void finetune(INDArray X, INDArray T, int minibatchSize, double learningRate) {
List<INDArray> layerInputs = new ArrayList<>(nLayers + 1);
layerInputs.add(X);
INDArray Z = X.dup();
INDArray dY;
// forward hidden layers
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_; // layer input
INDArray Z_;
if (layer == 0) {
x_ = X;
} else {
x_ = Z;
}
Z_ = hiddenLayers[layer].forward(x_);
Z = Z_;
layerInputs.add(Z.dup());
}
// forward & backward output layer
dY = outputLayer.train(Z, T, minibatchSize, learningRate);
// backward hidden layers
INDArray Wprev;
INDArray dZ = Z.dup();
for (int layer = nLayers - 1; layer >= 0; layer--) {
if (layer == nLayers - 1) {
Wprev = outputLayer.getW();
} else {
Wprev = hiddenLayers[layer + 1].getW();
dY = dZ.dup();
}
dZ = hiddenLayers[layer].backward(layerInputs.get(layer), layerInputs.get(layer + 1), dY, Wprev, minibatchSize,
learningRate);
}
}
示例2: predict
import org.nd4j.linalg.api.ndarray.INDArray; //导入方法依赖的package包/类
public INDArray predict(INDArray x) {
INDArray z = x.dup();
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_;
if (layer == 0) x_ = x;
else x_ = z.dup();
z = hiddenLayers[layer].forward(x_);
}
return outputLayer.predict(z);
}
示例3: finetune
import org.nd4j.linalg.api.ndarray.INDArray; //导入方法依赖的package包/类
public void finetune(INDArray X, INDArray T, int minibatchSize, double learningRate) {
List<INDArray> layerInputs = new ArrayList<>(nLayers + 1);
layerInputs.add(X);
INDArray Z = X.dup();
INDArray dY;
// forward hidden layers
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_; // layer input
INDArray Z_;
if (layer == 0)
x_ = X;
else
x_ = Z;
Z_ = hiddenLayers[layer].forward(x_);
Z = Z_;
layerInputs.add(Z.dup());
}
// forward & backward output layer
dY = outputLayer.train(Z, T, minibatchSize, learningRate);
// backward hidden layers
INDArray Wprev;
INDArray dZ = Z.dup();
for (int layer = nLayers - 1; layer >= 0; layer--) {
if (layer == nLayers - 1)
Wprev = outputLayer.getW();
else {
Wprev = hiddenLayers[layer + 1].getW();
dY = dZ.dup();
}
dZ = hiddenLayers[layer].backward(layerInputs.get(layer), layerInputs.get(layer+1),
dY, Wprev, minibatchSize, learningRate);
}
}