本文整理匯總了Java中org.nd4j.linalg.api.ndarray.INDArray.dup方法的典型用法代碼示例。如果您正苦於以下問題:Java INDArray.dup方法的具體用法?Java INDArray.dup怎麽用?Java INDArray.dup使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.nd4j.linalg.api.ndarray.INDArray
的用法示例。
在下文中一共展示了INDArray.dup方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: finetune
import org.nd4j.linalg.api.ndarray.INDArray; //導入方法依賴的package包/類
public void finetune(INDArray X, INDArray T, int minibatchSize, double learningRate) {
List<INDArray> layerInputs = new ArrayList<>(nLayers + 1);
layerInputs.add(X);
INDArray Z = X.dup();
INDArray dY;
// forward hidden layers
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_; // layer input
INDArray Z_;
if (layer == 0) {
x_ = X;
} else {
x_ = Z;
}
Z_ = hiddenLayers[layer].forward(x_);
Z = Z_;
layerInputs.add(Z.dup());
}
// forward & backward output layer
dY = outputLayer.train(Z, T, minibatchSize, learningRate);
// backward hidden layers
INDArray Wprev;
INDArray dZ = Z.dup();
for (int layer = nLayers - 1; layer >= 0; layer--) {
if (layer == nLayers - 1) {
Wprev = outputLayer.getW();
} else {
Wprev = hiddenLayers[layer + 1].getW();
dY = dZ.dup();
}
dZ = hiddenLayers[layer].backward(layerInputs.get(layer), layerInputs.get(layer + 1), dY, Wprev, minibatchSize,
learningRate);
}
}
示例2: predict
import org.nd4j.linalg.api.ndarray.INDArray; //導入方法依賴的package包/類
public INDArray predict(INDArray x) {
INDArray z = x.dup();
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_;
if (layer == 0) x_ = x;
else x_ = z.dup();
z = hiddenLayers[layer].forward(x_);
}
return outputLayer.predict(z);
}
示例3: finetune
import org.nd4j.linalg.api.ndarray.INDArray; //導入方法依賴的package包/類
public void finetune(INDArray X, INDArray T, int minibatchSize, double learningRate) {
List<INDArray> layerInputs = new ArrayList<>(nLayers + 1);
layerInputs.add(X);
INDArray Z = X.dup();
INDArray dY;
// forward hidden layers
for (int layer = 0; layer < nLayers; layer++) {
INDArray x_; // layer input
INDArray Z_;
if (layer == 0)
x_ = X;
else
x_ = Z;
Z_ = hiddenLayers[layer].forward(x_);
Z = Z_;
layerInputs.add(Z.dup());
}
// forward & backward output layer
dY = outputLayer.train(Z, T, minibatchSize, learningRate);
// backward hidden layers
INDArray Wprev;
INDArray dZ = Z.dup();
for (int layer = nLayers - 1; layer >= 0; layer--) {
if (layer == nLayers - 1)
Wprev = outputLayer.getW();
else {
Wprev = hiddenLayers[layer + 1].getW();
dY = dZ.dup();
}
dZ = hiddenLayers[layer].backward(layerInputs.get(layer), layerInputs.get(layer+1),
dY, Wprev, minibatchSize, learningRate);
}
}