本文整理汇总了C#中numl.Math.LinearAlgebra.Vector.ToBinary方法的典型用法代码示例。如果您正苦于以下问题:C# Vector.ToBinary方法的具体用法?C# Vector.ToBinary怎么用?C# Vector.ToBinary使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numl.Math.LinearAlgebra.Vector
的用法示例。
在下文中一共展示了Vector.ToBinary方法的2个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Generate
/// <summary>Generate Logistic Regression model based on a set of examples.</summary>
/// <param name="X">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <returns>Model.</returns>
public override IModel Generate(Matrix X, Vector y)
{
X = IncreaseDimensions(X, this.PolynomialFeatures);
this.Preprocess(X);
// guarantee 1/0 based label vector
y = y.ToBinary(f => f == 1d, falseValue: 0d);
// add intercept term
X = X.Insert(Vector.Ones(X.Rows), 0, VectorType.Col, false);
Vector theta = Vector.Rand(X.Cols);
// run gradient descent
var optimizer = new numl.Math.Optimization.Optimizer(theta, this.MaxIterations, this.LearningRate)
{
CostFunction = new numl.Math.Functions.Cost.LogisticCostFunction()
{
X = X,
Y = y,
Lambda = this.Lambda,
Regularizer = new numl.Math.Functions.Regularization.L2Regularizer(),
LogisticFunction = this.LogisticFunction
}
};
optimizer.Run();
LogisticRegressionModel model = new LogisticRegressionModel()
{
Descriptor = this.Descriptor,
NormalizeFeatures = base.NormalizeFeatures,
FeatureNormalizer = base.FeatureNormalizer,
FeatureProperties = base.FeatureProperties,
Theta = optimizer.Properties.Theta,
LogisticFunction = this.LogisticFunction,
PolynomialFeatures = this.PolynomialFeatures
};
return model;
}
示例2: Generate
/// <summary>Generates a SVM model based on a set of examples.</summary>
/// <param name="X">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <returns>Model.</returns>
public override IModel Generate(Matrix X, Vector y)
{
this.Preprocess(X);
// expect truth = 1 and false = -1
y = y.ToBinary(k => k == 1d, falseValue: -1.0);
// initialise variables
int m = X.Rows, n = X.Cols, i = -1, j = -1, changes = 0, iterations = 0;
double lagLow = 0.0, lagHigh = 0.0, cost = 0.0, tempAI = 0d, tempAJ = 0d;
Vector gradient = Vector.Zeros(m), alpha = Vector.Zeros(m);
// precompute kernal matrix (using similarity function)
Matrix K = this.KernelFunction.Compute(X);
// synchronise SVM parameters with working set selection function.
this.SelectionFunction.Bias = this.Bias; this.SelectionFunction.C = this.C; this.SelectionFunction.Epsilon = this.Epsilon;
this.SelectionFunction.K = K; this.SelectionFunction.Y = y;
bool finalise = false;
this.SelectionFunction.Initialize(alpha, gradient);
while (finalise == false && iterations < this.MaxIterations)
{
changes = 0;
#region Training
for (int p = 0; p < m; p++)
{
// get new working set selection using heuristic function
Tuple<int, int> newPair = this.SelectionFunction.GetWorkingSet(i, j, gradient, alpha);
// check for valid i, j pairs
if (newPair.Item1 >= 0 && newPair.Item2 >= 0 && newPair.Item1 != newPair.Item2)
{
i = newPair.Item1; j = newPair.Item2;
// compute new gradients
gradient[i] = Bias + (alpha * y * K[i, VectorType.Col]).Sum() - y[i];
if ((y[i] * gradient[i] < -this.Epsilon && alpha[i] < this.C) || (y[i] * gradient[i] > this.Epsilon && alpha[i] > 0))
{
gradient[j] = Bias + (alpha * y * K[j, VectorType.Col]).Sum() - y[j];
// store temp working copies of alpha from both pairs (i, j)
tempAI = alpha[i]; tempAJ = alpha[j];
// update lower and upper bounds of lagrange multipliers
if (y[i] == y[j])
{
// pairs are same class don't apply large margin
lagLow = System.Math.Max(0.0, alpha[j] + alpha[i] - this.C);
lagHigh = System.Math.Min(this.C, alpha[j] + alpha[i]);
}
else
{
// pairs are not same class, apply large margin
lagLow = System.Math.Max(0.0, alpha[j] - alpha[i]);
lagHigh = System.Math.Min(this.C, this.C + alpha[j] - alpha[i]);
}
// if lagrange constraints are not diverse then get new working set
if (lagLow == lagHigh) continue;
// compute cost and if it's greater than 0 skip
// cost should optimise large margin where fit line intercepts <= 0
cost = 2.0 * K[i, j] - K[i, i] - K[j, j];
if (cost >= 0.0) continue;
else
{
// update alpha of (j) w.r.t to the relative cost difference of the i-th and j-th gradient
alpha[j] = alpha[j] - (y[j] * (gradient[i] - gradient[j])) / cost;
// clip alpha with lagrange multipliers
alpha[j] = System.Math.Min(lagHigh, alpha[j]);
alpha[j] = System.Math.Max(lagLow, alpha[j]);
// check alpha tolerance factor
if (System.Math.Abs(alpha[j] - tempAJ) < this.Epsilon)
{
// we're optimising large margins so skip small ones
alpha[j] = tempAJ; continue;
}
// update alpha of i if we have a large margin w.r.t to alpha (j)
alpha[i] = alpha[i] + y[i] * y[j] * (tempAJ - alpha[j]);
// precompute i, j into feasible region for Bias
double yBeta = (alpha[i] - tempAI) * K[i, j] - y[j] * (alpha[j] - tempAJ);
// store temp beta with gradient for i, j pairs
double beta_i = this.Bias - gradient[i] - y[i] * yBeta * K[i, j];
double beta_j = this.Bias - gradient[j] - y[i] * yBeta * K[j, j];
// update new bias with constrained alpha limits (0 < alpha < C)
//.........这里部分代码省略.........