本文整理汇总了Java中org.jblas.FloatMatrix类的典型用法代码示例。如果您正苦于以下问题:Java FloatMatrix类的具体用法?Java FloatMatrix怎么用?Java FloatMatrix使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
FloatMatrix类属于org.jblas包,在下文中一共展示了FloatMatrix类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: compute
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Computes the output of the GRU layer
*/
@Override
public void compute(Layer input) {
//Temporary variable to calculate the updateGate, for input x and previous activation h, calculates updateW*x+updateU*h
FloatMatrix updateTemp = this.updateW.mmul(input.output);
updateTemp.addi(this.updateU.mmul(this.output));
updateTemp.addi(this.updateBias);
this.updateGate = this.updateActivation.apply(updateTemp);
// Temporary variable to calculate the resetGate, for input x and previous activation h, calculates resetW*x+resetU*h
FloatMatrix resetTemp = this.resetW.mmul(input.output);
resetTemp.addi(this.resetU.mmul(this.output));
resetTemp.addi(this.resetBias);
this.resetGate = this.resetActivation.apply(resetTemp);
// Temporary variable which holds the linear combination of input and hidden activation to calculate the candidate activation
FloatMatrix candidateActivation = this.hiddenU.mmul(this.resetGate.mul(this.output));
candidateActivation.addi(this.hiddenW.mmul(input.output));
candidateActivation.addi(this.hiddenBias);
// Temporary variable which holds the linear combination of input and candidate activation to calculate the final activation
FloatMatrix outputTemp = this.updateGate.mul(-1);
outputTemp.addi(1);
outputTemp.muli(this.activationFunction.apply(candidateActivation));
this.output = outputTemp.add(this.updateGate.mul(this.output));
}
示例2: initialize
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Initializes the GRU layer
* @param hiddenW1 multiplies x_t in the expression to calculate candidate activation
* @param hiddenU1 multiplies r_t.h_t-1 in the expression to calculate candidate activation
* @param updateW1 multiplies x_t in the expression to calculate update gate
* @param updateU1 multiplies h_t-1 in the expression to calculate update gate
* @param resetW1 multiplies x_t in the expression to calculate reset gate
* @param resetU1 multiplies h_t-1 in the expression to calculate reset gate
* @param hiddenBias1 bias values of the neurons to calculate the candidate activation
* @param updateBias1 bias values of the neurons to calculate the update gate
* @param resetBias1 bias values of the neurons to calculate the reset gate
* @param activationFunction1 activation function to calculate the candidate activation
* @param updateActivation1 activation function to calculate the update gate
* @param resetActivation1 activation function to calculate the reset gate
*/
public void initialize(float[][] hiddenW1, float[][] hiddenU1, float[][] updateW1, float[][] updateU1, float[][] resetW1, float[][] resetU1, float[] hiddenBias1, float[] updateBias1, float[] resetBias1, Activation activationFunction1, Activation updateActivation1, Activation resetActivation1) {
this.hiddenW = new FloatMatrix(hiddenW1);
this.hiddenU = new FloatMatrix(hiddenU1);
this.resetW = new FloatMatrix(resetW1);
this.resetU = new FloatMatrix(resetU1);
this.updateW = new FloatMatrix(updateW1);
this.updateU = new FloatMatrix(updateU1);
this.hiddenBias = new FloatMatrix(hiddenBias1);
this.updateBias = new FloatMatrix(updateBias1);
this.resetBias = new FloatMatrix(resetBias1);
this.output = FloatMatrix.zeros(this.hiddenU.rows);
this.activationFunction = activationFunction1;
this.updateActivation = updateActivation1;
this.resetActivation = resetActivation1;
this.updateGate = FloatMatrix.zeros(this.hiddenBias.length);
this.resetGate = FloatMatrix.zeros(this.hiddenBias.length);
}
示例3: sumLatentVectors
import org.jblas.FloatMatrix; //导入依赖的package包/类
@Test
public void sumLatentVectors() {
Map<Integer, FloatMatrix> latentA = new HashMap<>();
latentA.put(1, new FloatMatrix(new float[]{1, 3, 4}));
latentA.put(2, new FloatMatrix(new float[]{2, 2, 4}));
latentA.put(3, new FloatMatrix(new float[]{1, 3, 4}));
Map<Integer, FloatMatrix> latentB = new HashMap<>();
latentB.put(1, new FloatMatrix(new float[]{3, 3, 5}));
latentB.put(2, new FloatMatrix(new float[]{2, 2, 4}));
Map<Integer, FloatMatrix> got = DatasetGenerator.sumLatentVectors(latentA, latentB);
Map<Integer, FloatMatrix> expected = new HashMap<>();
expected.put(1, new FloatMatrix(new float[]{4, 6, 9}));
expected.put(2, new FloatMatrix(new float[]{4, 4, 8}));
assertEquals(expected, got);
}
示例4: predict
import org.jblas.FloatMatrix; //导入依赖的package包/类
public float predict(SparseVector sparseVector) {
float predicted = 0.0f;
for (int i = 0; i < sparseVector.size(); ++i) {
// linear term
SparseVector.SparseEntry entryA = sparseVector.get(i);
int keyA = entryA.key;
float valueA = entryA.value;
FloatMatrix vectorA = latentVectors[keyA];
predicted += valueA * biases.get(keyA);
// quadratic term
// XXX(od): do not include quadratic features
for (int j = i + 1; j < sparseVector.size(); ++j) {
SparseVector.SparseEntry entryB = sparseVector.get(j);
int keyB = entryB.key;
float valueB = entryB.value;
FloatMatrix vectorB = latentVectors[keyB];
predicted += valueA * valueB * vectorA.dot(vectorB);
}
}
return predicted;
}
示例5: processRNNList
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Process the RNN in a batch manner
*/
public void processRNNList() {
if (this.binnedDataList.isEmpty()) {
return;
}
FloatMatrix tempOutput;
tempOutput = FloatMatrix.zeros(this.getnChannels());
for (float[] currentBinnedData : this.binnedDataList) {
tempOutput = this.rnnetwork.output(currentBinnedData);
}
this.networkOutput = RNNfilterExpFeatures.DMToFloat(tempOutput);
this.label = RNNfilterExpFeatures.indexOfMaxValue(this.networkOutput);
}
示例6: processRNN
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Processes the RNN network for continuous recording setting
*
* @param timeStamp - the timestamp of the present event
*/
public void processRNN(int timeStamp) {
for (int i = 0; i < this.binnedData.length; i++) {
this.binnedData[i] /= this.binEventCount;
}
FloatMatrix tempOutput = this.rnnetwork.output(this.binnedData);
this.networkOutput = RNNfilterExpFeatures.DMToFloat(tempOutput);
if (chip.getCanvas().getDisplayMethod() instanceof RollingCochleaGramDisplayMethod) {
if (!addedDisplayMethodPropertyChangeListener) {
chip.getCanvas().getDisplayMethod().getSupport().addPropertyChangeListener(this);
addedDisplayMethodPropertyChangeListener = true;
}
// save outputs for rendering
if (screenCleared) {
// reset memory
}
// save results
}
this.label = RNNfilterExpFeatures.indexOfMaxValue(this.networkOutput);
this.lastBinCompleteTime += this.getBinTimeLength();
this.resetBins();
// if the present timeStamp is very far from the last time RNN was processed, that means an appropriate number
// of zero bins have to be sent to the network
while (timeStamp > (this.lastBinCompleteTime + this.getBinTimeLength())) {
tempOutput = this.rnnetwork.output(this.binnedData);
this.networkOutput = RNNfilterExpFeatures.DMToFloat(tempOutput);
this.rnnOutputList.add(this.networkOutput);
this.label = RNNfilterExpFeatures.indexOfMaxValue(this.networkOutput);
this.lastBinCompleteTime += this.getBinTimeLength();
}
}
示例7: DMToFloat
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Copies a 1 dimensional FloatMatrix (jblas) into a 1 dimensional float
* array
*
* @param floatMatrix - 1 dimensional float matrix, there is no check make
* sure the input is indeed 1 dimensional
* @return 1 dimensional float array
*/
public static float[] DMToFloat(FloatMatrix floatMatrix) {
int length = floatMatrix.length;
float[] floatArray = new float[length];
for (int i = 0; i < length; i++) {
floatArray[i] = floatMatrix.get(i);
}
return floatArray;
}
示例8: output
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Computes the output of the network when given an input frame
* @param input - A one dimensional float array as the input frame
* @return - the activation of the last layer as a DoubleMatrix (jblas)
*/
public FloatMatrix output(float[] input) {
FloatMatrix tempOutput = new FloatMatrix(input);
this.layers[0].computeFromInput(tempOutput);
for(int i=1;i<this.nLayers;i++) {
this.layers[i].compute(this.layers[i-1]);
}
return this.layers[this.nLayers-1].output;
}
示例9: apply
import org.jblas.FloatMatrix; //导入依赖的package包/类
@Override
public FloatMatrix apply(FloatMatrix input) {
FloatMatrix denom = new FloatMatrix().copy(input);
denom.muli(-1);
MatrixFunctions.expi(denom);
denom.addi(1);
FloatMatrix num = FloatMatrix.ones(denom.rows, denom.columns);
return num.divi(denom);
}
示例10: sumLatentVectors
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Sums up two latent vector mappings. The factorization machines model will return a latent vector for each
* additional item feature. This function is used to sum all of them in order to get a single embedding for each
* item.
* @param latentA A mapping from item id to a latent vector
* @param latentB A mapping from item id to a latent vector
* @return A mapping where corresponding item latent vectors have been summed up.
*/
public static Map<Integer, FloatMatrix> sumLatentVectors(Map<Integer, FloatMatrix> latentA, Map<Integer, FloatMatrix> latentB) {
Map<Integer, FloatMatrix> result = new HashMap<>();
latentA.forEach((keyA, vectorA) -> {
FloatMatrix vectorB = latentB.get(keyA);
if (vectorB != null)
result.put(keyA, vectorA.add(vectorB));
}
);
return result;
}
示例11: write
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Writes a vector of biases to an output file.
* @param w1 One dimensionsal vector containing biases of a model.
* @param writer A writer to write to
* @throws IOException if writer is not writable
*/
public static void write(FloatMatrix w1, Writer writer) throws IOException {
PrintWriter printWriter = new PrintWriter(new BufferedWriter(writer), false);
for (int i = 0; i < w1.length; ++i)
printWriter.println(String.format("%.4f ", w1.get(i)));
printWriter.close();
}
示例12: processRNN
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Processes the RNN network for continuous recording setting
*
* @param timeStamp - the timestamp of the present event
*/
public void processRNN(int timeStamp) {
long now = System.nanoTime();
FloatMatrix tempOutput = this.rnnetwork.output(RNNfilter.intToFloat(this.binnedData));
long dt = System.nanoTime() - now;
// log.log(Level.INFO, String.format("%d nanoseconds for one frame computation", dt));
this.networkOutput = RNNfilter.DMToFloat(tempOutput);
if (chip.getCanvas().getDisplayMethod() instanceof RollingCochleaGramDisplayMethod) {
if (!addedDisplayMethodPropertyChangeListener) {
chip.getCanvas().getDisplayMethod().getSupport().addPropertyChangeListener(this);
addedDisplayMethodPropertyChangeListener = true;
}
// save outputs for rendering
if (screenCleared) {
// reset memory
}
// save results
}
this.label = RNNfilter.indexOfMaxValue(this.networkOutput);
this.lastBinCompleteTime += this.getBinTimeLength();
this.resetBins();
// if the present timeStamp is very far from the last time RNN was processed, that means an appropriate number
// of zero bins have to be sent to the network
while (timeStamp > (this.lastBinCompleteTime + this.getBinTimeLength())) {
tempOutput = this.rnnetwork.output(RNNfilter.intToFloat(this.binnedData));
this.networkOutput = RNNfilter.DMToFloat(tempOutput);
this.rnnOutputList.add(this.networkOutput);
this.label = RNNfilter.indexOfMaxValue(this.networkOutput);
this.lastBinCompleteTime += this.getBinTimeLength();
}
}
示例13: initParameters
import org.jblas.FloatMatrix; //导入依赖的package包/类
/**
* Init context, item and feature latent vectors with a normal distribution and
* set biases to 0.
* @param numFeatures number of additional features
* @param dimensions number of latent factors
*/
public void initParameters(int numFeatures, int dimensions) {
logger.info("initializing parameters...");
biases = FloatMatrix.zeros(numFeatures);
latentVectors = new FloatMatrix[numFeatures];
for (int i = 0; i < numFeatures; ++i)
latentVectors[i] = gaussVector(dimensions).divi(dimensions);
}
示例14: readLatentVectors
import org.jblas.FloatMatrix; //导入依赖的package包/类
@Test
public void readLatentVectors() throws IOException {
String input = "10 1.0 2.0 3.0\n20 2.0 3.0 4.0";
Map<Integer, FloatMatrix> got = DatasetIO.readLatentVectors(new StringReader(input));
Map<Integer, FloatMatrix> expected = new HashMap<>();
expected.put(10, new FloatMatrix(new float[]{1, 2, 3}));
expected.put(20, new FloatMatrix(new float[]{2, 3, 4}));
assertEquals(expected, got);
}
示例15: resetLayer
import org.jblas.FloatMatrix; //导入依赖的package包/类
@Override
public void resetLayer() {
this.output = FloatMatrix.zeros(this.Uo.rows);
this.memoryCell = FloatMatrix.zeros(this.Uc.rows);
this.forgetGate = FloatMatrix.zeros(this.Uf.rows);
this.inputGate = FloatMatrix.zeros(this.Ui.rows);
}