本文整理汇总了Java中edu.stanford.nlp.math.ArrayMath.softmax方法的典型用法代码示例。如果您正苦于以下问题:Java ArrayMath.softmax方法的具体用法?Java ArrayMath.softmax怎么用?Java ArrayMath.softmax使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.math.ArrayMath
的用法示例。
在下文中一共展示了ArrayMath.softmax方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: computeCliquePotential
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
@Override
public double computeCliquePotential(int cliqueSize, int labelIndex, int[] cliqueFeatures, double[] featureVal) {
double output = 0.0;
if (cliqueSize > 1) { // linear potential for edge cliques
for (int m = 0; m < cliqueFeatures.length; m++) {
output += linearWeights[cliqueFeatures[m]][labelIndex];
}
} else { // non-linear potential for node cliques
double[] hiddenLayer = hiddenLayerOutput(inputLayerWeights, cliqueFeatures, flags, featureVal);
int outputLayerSize = inputLayerWeights.length / outputLayerWeights[0].length;
// transform the hidden layer to output layer through linear transformation
if (flags.useOutputLayer) {
double[] outputWs = null;
if (flags.tieOutputLayer) {
outputWs = outputLayerWeights[0];
} else {
outputWs = outputLayerWeights[labelIndex];
}
if (flags.softmaxOutputLayer) {
outputWs = ArrayMath.softmax(outputWs);
}
for (int i = 0; i < inputLayerWeights.length; i++) {
if (flags.sparseOutputLayer || flags.tieOutputLayer) {
if (i % outputLayerSize == labelIndex) {
output += outputWs[ i / outputLayerSize ] * hiddenLayer[i];
}
} else {
output += outputWs[i] * hiddenLayer[i];
}
}
} else {
output = hiddenLayer[labelIndex];
}
}
return output;
}
示例2: getCliquePotentialFunction
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
public CliquePotentialFunction getCliquePotentialFunction(double[] x) {
double[] rawScales = separateLopScales(x);
double[] scales = ArrayMath.softmax(rawScales);
double[][][] learnedLopExpertWeights2D = lopExpertWeights2D;
if (backpropTraining) {
learnedLopExpertWeights2D = separateLopExpertWeights2D(x);
}
double[][] combinedWeights2D = combineAndScaleLopWeights2D(numLopExpert, learnedLopExpertWeights2D, scales);
return new LinearCliquePotentialFunction(combinedWeights2D);
}
示例3: computeCliquePotential
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
@Override
public double computeCliquePotential(int cliqueSize, int labelIndex, int[] cliqueFeatures, double[] featureVal) {
double output = 0.0;
double[][] inputWeights, outputWeights = null;
if (cliqueSize > 1) {
inputWeights = inputLayerWeights4Edge;
outputWeights = outputLayerWeights4Edge;
} else {
inputWeights = inputLayerWeights;
outputWeights = outputLayerWeights;
}
double[] hiddenLayer = hiddenLayerOutput(inputWeights, cliqueFeatures, flags, featureVal, cliqueSize);
int outputLayerSize = inputWeights.length / outputWeights[0].length;
// transform the hidden layer to output layer through linear transformation
if (flags.useOutputLayer) {
double[] outputWs = null;
if (flags.tieOutputLayer) {
outputWs = outputWeights[0];
} else {
outputWs = outputWeights[labelIndex];
}
if (flags.softmaxOutputLayer) {
outputWs = ArrayMath.softmax(outputWs);
}
for (int i = 0; i < inputWeights.length; i++) {
if (flags.sparseOutputLayer || flags.tieOutputLayer) {
if (i % outputLayerSize == labelIndex) {
output += outputWs[ i / outputLayerSize ] * hiddenLayer[i];
}
} else {
output += outputWs[i] * hiddenLayer[i];
}
}
} else {
output = hiddenLayer[labelIndex];
}
return output;
}
示例4: computeCliquePotential
import edu.stanford.nlp.math.ArrayMath; //导入方法依赖的package包/类
@Override
public double computeCliquePotential(int cliqueSize, int labelIndex, int[] cliqueFeatures, double[] featureVal) {
double output = 0.0;
double[][] inputWeights, outputWeights = null;
if (cliqueSize > 1) {
inputWeights = inputLayerWeights4Edge;
outputWeights = outputLayerWeights4Edge;
} else {
inputWeights = inputLayerWeights;
outputWeights = outputLayerWeights;
}
double[] hiddenLayer = NonLinearCliquePotentialFunction.hiddenLayerOutput(inputWeights, cliqueFeatures, flags, featureVal);
int outputLayerSize = inputWeights.length / outputWeights[0].length;
// transform the hidden layer to output layer through linear transformation
if (flags.useOutputLayer) {
double[] outputWs = null;
if (flags.tieOutputLayer) {
outputWs = outputWeights[0];
} else {
outputWs = outputWeights[labelIndex];
}
if (flags.softmaxOutputLayer) {
outputWs = ArrayMath.softmax(outputWs);
}
for (int i = 0; i < inputWeights.length; i++) {
if (flags.sparseOutputLayer || flags.tieOutputLayer) {
if (i % outputLayerSize == labelIndex) {
output += outputWs[ i / outputLayerSize ] * hiddenLayer[i];
}
} else {
output += outputWs[i] * hiddenLayer[i];
}
}
} else {
output = hiddenLayer[labelIndex];
}
return output;
}