本文整理汇总了Java中org.ejml.simple.SimpleMatrix.set方法的典型用法代码示例。如果您正苦于以下问题:Java SimpleMatrix.set方法的具体用法?Java SimpleMatrix.set怎么用?Java SimpleMatrix.set使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.ejml.simple.SimpleMatrix
的用法示例。
在下文中一共展示了SimpleMatrix.set方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: MatrixTreeTheorem
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
public MatrixTreeTheorem(double[][] weight) {
W = weight;
int n = W.length - 1; // #. of nodes except <root-node>
Q = new SimpleMatrix(n, n);
for (int i = 1; i <= n; i++) {
for (int j = 1; j <= n; j++) {
int x = i - 1, y = j - 1;
if (i == 1) {
Q.set(x, y, W[0][j]);
} else {
if (i == j) {
double v = 0;
for (int k = 1; k <= n; k++) {
v += W[k][j];
}
Q.set(x, y, v);
} else {
Q.set(x, y, -W[i][j]);
}
}
}
} // filling the laplacian matrix.
Z = Q.determinant();
}
示例2: eval
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
public static SimpleMatrix eval(SimpleMatrix matrix){
SimpleMatrix aux = new SimpleMatrix(matrix.numRows(), matrix.numCols());
double max;
int pos;
for (int i = 0; i < aux.numRows(); i++) {
SimpleMatrix row = matrix.extractVector(true, i);
max = row.get(0);
pos = 0;
//inicializamos en 1 ya que el 0 ya fue tomado
for (int j = 1; j < row.numCols(); j++) {
if (max < row.get(j)) {
max = row.get(j);
//guardamos la posición del mas grande
pos = j;
}
}
//guardamos en la fila y columna un 1
aux.set(i, pos, 1);
}
return aux;
}
示例3: addScalar
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
static SimpleMatrix addScalar(SimpleMatrix mat, double scalar) {
SimpleMatrix temp = mat.copy();
int M = mat.numRows();
int N = mat.numCols();
for (int i = 0; i < M; i++) {
for (int j = 0; j < N; j++)
temp.set(i, j, mat.get(i, j) + scalar);
}
return temp;
}
示例4: output
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* de forma matricial: <br>
* -1 + 2 / (1 + e^(-2 . x))
*
* @param x
* @return
*/
@Override
public SimpleMatrix output(SimpleMatrix x) {
SimpleMatrix div = x.scale(-2).elementExp().plus(1);
SimpleMatrix b = new SimpleMatrix(x.numRows(), x.numCols());
b.set(2);
return b.elementDiv(div).plus(-1);
}
示例5: derivative
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* Función Derivada de la softplus
*
* @param z
* @return 1 / (1 + e^(-z))
*/
@Override
public SimpleMatrix derivative(SimpleMatrix z) {
SimpleMatrix div = z.scale(-1).elementExp().plus(1);
SimpleMatrix b = new SimpleMatrix(z.numRows(), z.numCols());
b.set(1);
return b.elementDiv(div);
}
示例6: output
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* Logsig: <br>
* 1 / (1 + e^(-a))
*
* @param a
* @return
*/
@Override
public SimpleMatrix output(SimpleMatrix a) {
SimpleMatrix div = a.scale(-1).elementExp().plus(1);
SimpleMatrix b = new SimpleMatrix(a.numRows(), a.numCols());
b.set(1);
return b.elementDiv(div);
}
示例7: output
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
@Override
public SimpleMatrix output(SimpleMatrix z) {
double parm = 3;
SimpleMatrix p = new SimpleMatrix(z.numRows(), z.numCols());
for (int i = 0; i < p.getNumElements(); i++) {
p.set(i, Math.max(z.get(i), z.get(i) * parm));
}
return p;
}
示例8: derivative
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
@Override
public SimpleMatrix derivative(SimpleMatrix a) {
double parm = 3;
SimpleMatrix p = new SimpleMatrix(a.numRows(), a.numCols());
for (int i = 0; i < p.getNumElements(); i++) {
p.set(i, (a.get(i) >= 0) ? parm : -1);
}
return p;
}
示例9: derivative
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* f'(z) = 1
*
* @param z
* @return
*/
@Override
public SimpleMatrix derivative(SimpleMatrix z) {
SimpleMatrix derivada = new SimpleMatrix(z.numRows(), z.numCols());
derivada.set(1);
return derivada;
}
示例10: output
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
@Override
public SimpleMatrix output(SimpleMatrix z) {
SimpleMatrix p = new SimpleMatrix(z.numRows(), z.numCols());
for (int i = 0; i < p.getNumElements(); i++) {
p.set(i, Math.max(0, z.get(i)));
}
return p;
}
示例11: derivative
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
@Override
public SimpleMatrix derivative(SimpleMatrix a) {
SimpleMatrix p = new SimpleMatrix(a.numRows(), a.numCols());
for (int i = 0; i < p.getNumElements(); i++) {
p.set(i, (a.get(i) >= 0) ? 1 : 0);
}
return p;
}
示例12: init
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* Inicializamos una matriz de pesos con las filas y columnas dadas<br>
* r = sqrt(6)/(sqrt(s + n + 1)) val = 2 * rand * r - r
*
* @param matrix inicializada según la forma: sqrt(6)/(sqrt(s + n + 1))
*/
@Override
public void init(SimpleMatrix matrix) {
double r = Math.sqrt(6) / Math.sqrt(matrix.numRows() + matrix.numCols() + 1);
for (int i = 0; i < matrix.getNumElements(); i++) {
matrix.set(i, random.nextDouble() * 2 * r - r);
}
}
示例13: init
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* <b>Small random numbers</b>: we still want the weights to be very close
* to zero, but as we have argued above, not identically zero. As a
* solution, it is common to initialize the weights of the neurons to small
* numbers and refer to doing so as symmetry breaking. The idea is that the
* neurons are all random and unique in the beginning, so they will compute
* distinct updates and integrate themselves as diverse parts of the full
* network. The implementation for one weight matrix might look like W =
* 0.01 * random.nextGaussian(), where random samples from a zero mean, unit
* standard deviation gaussian. With this formulation, every neuron’s weight
* vector is initialized as a random vector sampled from a multi-dimensional
* gaussian, so the neurons point in random direction in the input space. It
* is also possible to use small numbers drawn from a uniform distribution,
* but this seems to have relatively little impact on the final performance
* in practice.
* <br><br>
* <b>Warning</b>: It’s not necessarily the case that smaller numbers will
* work strictly better. For example, a Neural Network layer that has very
* small weights will during backpropagation compute very small gradients on
* its data (since this gradient is proportional to the value of the
* weights). This could greatly diminish the “gradient signal” flowing
* backward through a network, and could become a concern for deep networks.
* <br><br>
*
* see:
* <url>http://cs231n.github.io/neural-networks-2/#weight-initialization</url>
*
* @param matrix
*/
@Override
public void init(SimpleMatrix matrix) {
for (int i = 0; i < matrix.getNumElements(); i++) {
matrix.set(i, 0.01 * random.nextGaussian());
}
}
示例14: init
import org.ejml.simple.SimpleMatrix; //导入方法依赖的package包/类
/**
* <b>Positive random numbers</b>: we still want the weights to be positive,
* but between zero and one.
* <br><br>
*
* @param matrix
*/
@Override
public void init(SimpleMatrix matrix) {
for (int i = 0; i < matrix.getNumElements(); i++) {
matrix.set(i, random.nextDouble());
}
}