本文整理汇总了C#中numl.Math.LinearAlgebra.Vector.Slice方法的典型用法代码示例。如果您正苦于以下问题:C# Vector.Slice方法的具体用法?C# Vector.Slice怎么用?C# Vector.Slice使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类numl.Math.LinearAlgebra.Vector
的用法示例。
在下文中一共展示了Vector.Slice方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: ComputeGradient
/// <summary>
/// Compute the error cost of the given Theta parameter for the training and label sets
/// </summary>
/// <param name="theta">Learning Theta parameters</param>
/// <returns></returns>
public override Vector ComputeGradient(Vector theta)
{
Matrix ThetaX = theta.Slice(0, (R.Rows * CollaborativeFeatures) - 1).Reshape(CollaborativeFeatures, VectorType.Col);
Matrix ThetaY = theta.Slice((R.Rows * CollaborativeFeatures), theta.Length - 1).Reshape(CollaborativeFeatures, VectorType.Col);
Matrix A = ((ThetaY * ThetaX.T).T - YReformed);
Matrix S = A.Each(R, (i, j) => i * j);
Matrix gradX = (S * ThetaY) + (Lambda * ThetaX);
Matrix gradTheta = (S.T * ThetaX) + (Lambda * ThetaY);
return Vector.Combine(gradX.Unshape(), gradTheta.Unshape());
}
示例2: ComputeCost
/// <summary>
/// Compute the error cost of the given Theta parameter for the training and label sets
/// </summary>
/// <param name="theta">Learning Theta parameters</param>
/// <returns></returns>
public override double ComputeCost(Vector theta)
{
double j = 0.0;
Matrix ThetaX = theta.Slice(0, (R.Rows * CollaborativeFeatures) - 1).Reshape(CollaborativeFeatures, VectorType.Col);
Matrix ThetaY = theta.Slice((R.Rows * CollaborativeFeatures), theta.Length - 1).Reshape(CollaborativeFeatures, VectorType.Col);
j = (1.0 / 2.0) * ((ThetaY * ThetaX.T).T - YReformed).Each(i => System.Math.Pow(i, 2.0)).Each((v, r, c) => v * R[r, c]).Sum();
if (Lambda != 0)
{
j = j + ((Lambda / 2.0) * (ThetaY.Each(i => System.Math.Pow(i, 2.0)).Sum()) + (Lambda / 2.0 * ThetaX.Each(i => System.Math.Pow(i, 2.0)).Sum()));
}
return j;
}
示例3: Test_Vector_Slicing_With_Indices
public void Test_Vector_Slicing_With_Indices(IEnumerable<double> source, IEnumerable<int> indices, IEnumerable<double> truth)
{
var x = new Vector(source);
var t = new Vector(truth);
var slice = x.Slice(indices);
Assert.AreEqual(t, slice);
}
示例4: SegmentedConditional
/// <summary>
/// Calculates segmented conditional impurity of y | x When stipulating ranges (r), X is broken
/// up into
/// |r| many segments therefore P(X=x_r) becomes a range probability
/// rather than a fixed probability. In essence the average over H(Y|X = x) becomes SUM_s [ p_r *
/// H(Y|X = x_r) ]. The values that were used to do the split are stored in the Splits member.
/// </summary>
/// <exception cref="InvalidOperationException">Thrown when the requested operation is invalid.</exception>
/// <param name="y">Target impurity.</param>
/// <param name="x">Conditioned impurity.</param>
/// <param name="ranges">Number of segments over x to condition upon.</param>
/// <returns>Segmented conditional impurity measure.</returns>
public double SegmentedConditional(Vector y, Vector x, IEnumerable<Range> ranges)
{
if (x == null && y == null)
throw new InvalidOperationException("x and y do not exist!");
double p = 0, // probability of slice
h = 0, // impurity of y | x_i : ith slice
result = 0, // aggregated sum
count = x.Count(); // total items in list
Segments = ranges.OrderBy(r => r.Min).ToArray();
Discrete = false;
// for each range calculate
// conditional impurity and
// aggregate results
foreach (Range range in Segments)
{
// get slice
var s = x.Indices(d => d >= range.Min && d < range.Max);
// slice probability
p = (double)s.Count() / (double)count;
// impurity of (y | x_i)
h = Calculate(y.Slice(s));
// sum up
result += p * h;
}
return result;
}
示例5: Conditional
/// <summary>
/// Calculates conditional impurity of y | x
/// R(Y|X) is the average of H(Y|X = x) over all possible values
/// X may take.
/// </summary>
/// <param name="y">Target impurity</param>
/// <param name="x">Conditioned impurity</param>
/// <param name="width">Split of values over x to condition upon</param>
/// <returns>Conditional impurity measure</returns>
public double Conditional(Vector y, Vector x)
{
if (x == null && y == null)
throw new InvalidOperationException("x and y do not exist!");
double p = 0, // probability of slice
h = 0, // impurity of y | x_i : ith slice
result = 0, // aggregated sum
count = x.Count(); // total items in list
var values = x.Distinct().OrderBy(z => z); // distinct values to split on
Segments = values.Select(z => Range.Make(z, z)).ToArray();
Discrete = true;
// for each distinct value
// calculate conditional impurity
// and aggregate results
foreach (var i in values)
{
// get slice
var s = x.Indices(d => d == i);
// slice probability
p = (double)s.Count() / (double)count;
// impurity of (y | x_i)
h = Calculate(y.Slice(s));
// sum up
result += p * h;
}
return result;
}
示例6: Generate
/// <summary>Generate model based on a set of examples.</summary>
/// <param name="X">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <returns>Model.</returns>
public override IModel Generate(Matrix X, Vector y)
{
this.Preprocess(X);
int N = y.Length;
Vector a = Vector.Zeros(N);
// compute kernel
Matrix K = Kernel.Compute(X);
int n = 1;
// hopefully enough to converge right? ;)
// need to be smarter about storing SPD kernels...
bool found_error = true;
while (n < 500 && found_error)
{
found_error = false;
for (int i = 0; i < N; i++)
{
found_error = y[i] * a.Dot(K[i]) <= 0;
if (found_error) a[i] += y[i];
}
n++;
}
// anything that *matters*
// i.e. support vectors
var indices = a.Indices(d => d != 0);
// slice up examples to contain
// only support vectors
return new KernelPerceptronModel
{
Kernel = Kernel,
A = a.Slice(indices),
Y = y.Slice(indices),
X = X.Slice(indices),
Descriptor = this.Descriptor,
NormalizeFeatures = base.NormalizeFeatures,
FeatureNormalizer = base.FeatureNormalizer,
FeatureProperties = base.FeatureProperties
};
}
示例7: BuildTree
/// <summary>Builds a tree.</summary>
/// <param name="x">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <param name="depth">The depth.</param>
/// <param name="used">The used.</param>
/// <returns>A Node.</returns>
private Node BuildTree(Matrix x, Vector y, int depth, List<int> used, Tree tree)
{
if (depth < 0)
return BuildLeafNode(y.Mode());
var tuple = GetBestSplit(x, y, used);
var col = tuple.Item1;
var gain = tuple.Item2;
var measure = tuple.Item3;
// uh oh, need to return something?
// a weird node of some sort...
// but just in case...
if (col == -1)
return BuildLeafNode(y.Mode());
used.Add(col);
Node node = new Node
{
Column = col,
Gain = gain,
IsLeaf = false,
Name = Descriptor.ColumnAt(col)
};
// populate edges
List<Edge> edges = new List<Edge>(measure.Segments.Length);
for (int i = 0; i < measure.Segments.Length; i++)
{
// working set
var segment = measure.Segments[i];
var edge = new Edge()
{
ParentId = node.Id,
Discrete = measure.Discrete,
Min = segment.Min,
Max = segment.Max
};
IEnumerable<int> slice;
if (edge.Discrete)
{
// get discrete label
edge.Label = Descriptor.At(col).Convert(segment.Min).ToString();
// do value check for matrix slicing
slice = x.Indices(v => v[col] == segment.Min);
}
else
{
// get range label
edge.Label = string.Format("{0} <= x < {1}", segment.Min, segment.Max);
// do range check for matrix slicing
slice = x.Indices(v => v[col] >= segment.Min && v[col] < segment.Max);
}
// something to look at?
// if this number is 0 then this edge
// leads to a dead end - the edge will
// not be built
if (slice.Count() > 0)
{
Vector ySlice = y.Slice(slice);
// only one answer, set leaf
if (ySlice.Distinct().Count() == 1)
{
var child = BuildLeafNode(ySlice[0]);
tree.AddVertex(child);
edge.ChildId = child.Id;
}
// otherwise continue to build tree
else
{
var child = BuildTree(x.Slice(slice), ySlice, depth - 1, used, tree);
tree.AddVertex(child);
edge.ChildId = child.Id;
}
edges.Add(edge);
}
}
// problem, need to convert
// parent to terminal node
// with mode
if (edges.Count <= 1)
{
var val = y.Mode();
node.IsLeaf = true;
node.Value = val;
}
tree.AddVertex(node);
//.........这里部分代码省略.........
示例8: GenerateModel
private static LearningModel GenerateModel(IGenerator generator, Matrix x, Vector y, IEnumerable<object> examples, double trainingPct)
{
var descriptor = generator.Descriptor;
var total = examples.Count();
var trainingCount = (int)System.Math.Floor(total * trainingPct);
// 100 - trainingPercentage for testing
var testingSlice = GetTestPoints(total - trainingCount, total).ToArray();
// trainingPercentage for training
var trainingSlice = GetTrainingPoints(testingSlice, total).ToArray();
// training
var x_t = x.Slice(trainingSlice);
var y_t = y.Slice(trainingSlice);
// generate model
var model = generator.Generate(x_t, y_t);
model.Descriptor = descriptor;
// testing
object[] test = GetTestExamples(testingSlice, examples);
double accuracy = 0;
for (int j = 0; j < test.Length; j++)
{
// items under test
object o = test[j];
// get truth
var truth = Ject.Get(o, descriptor.Label.Name);
// if truth is a string, sanitize
if (descriptor.Label.Type == typeof(string))
truth = StringHelpers.Sanitize(truth.ToString());
// make prediction
var features = descriptor.Convert(o, false).ToVector();
var p = model.Predict(features);
var pred = descriptor.Label.Convert(p);
// assess accuracy
if (truth.Equals(pred))
accuracy += 1;
}
// get percentage correct
accuracy /= test.Length;
return new LearningModel { Generator = generator, Model = model, Accuracy = accuracy };
}
示例9: Generate
//.........这里部分代码省略.........
if (newPair.Item1 >= 0 && newPair.Item2 >= 0 && newPair.Item1 != newPair.Item2)
{
i = newPair.Item1; j = newPair.Item2;
// compute new gradients
gradient[i] = Bias + (alpha * y * K[i, VectorType.Col]).Sum() - y[i];
if ((y[i] * gradient[i] < -this.Epsilon && alpha[i] < this.C) || (y[i] * gradient[i] > this.Epsilon && alpha[i] > 0))
{
gradient[j] = Bias + (alpha * y * K[j, VectorType.Col]).Sum() - y[j];
// store temp working copies of alpha from both pairs (i, j)
tempAI = alpha[i]; tempAJ = alpha[j];
// update lower and upper bounds of lagrange multipliers
if (y[i] == y[j])
{
// pairs are same class don't apply large margin
lagLow = System.Math.Max(0.0, alpha[j] + alpha[i] - this.C);
lagHigh = System.Math.Min(this.C, alpha[j] + alpha[i]);
}
else
{
// pairs are not same class, apply large margin
lagLow = System.Math.Max(0.0, alpha[j] - alpha[i]);
lagHigh = System.Math.Min(this.C, this.C + alpha[j] - alpha[i]);
}
// if lagrange constraints are not diverse then get new working set
if (lagLow == lagHigh) continue;
// compute cost and if it's greater than 0 skip
// cost should optimise large margin where fit line intercepts <= 0
cost = 2.0 * K[i, j] - K[i, i] - K[j, j];
if (cost >= 0.0) continue;
else
{
// update alpha of (j) w.r.t to the relative cost difference of the i-th and j-th gradient
alpha[j] = alpha[j] - (y[j] * (gradient[i] - gradient[j])) / cost;
// clip alpha with lagrange multipliers
alpha[j] = System.Math.Min(lagHigh, alpha[j]);
alpha[j] = System.Math.Max(lagLow, alpha[j]);
// check alpha tolerance factor
if (System.Math.Abs(alpha[j] - tempAJ) < this.Epsilon)
{
// we're optimising large margins so skip small ones
alpha[j] = tempAJ; continue;
}
// update alpha of i if we have a large margin w.r.t to alpha (j)
alpha[i] = alpha[i] + y[i] * y[j] * (tempAJ - alpha[j]);
// precompute i, j into feasible region for Bias
double yBeta = (alpha[i] - tempAI) * K[i, j] - y[j] * (alpha[j] - tempAJ);
// store temp beta with gradient for i, j pairs
double beta_i = this.Bias - gradient[i] - y[i] * yBeta * K[i, j];
double beta_j = this.Bias - gradient[j] - y[i] * yBeta * K[j, j];
// update new bias with constrained alpha limits (0 < alpha < C)
if (0.0 < alpha[i] && alpha[i] < this.C) this.Bias = beta_i;
else if (0.0 < alpha[j] && alpha[j] < this.C) this.Bias = beta_j;
else this.Bias = (beta_i + beta_j) / 2.0;
changes++;
}
}
}
else if (newPair.Item1 == -1 || newPair.Item2 == -1)
{
// unable to find suitable sub problem (j) to optimise
finalise = true;
break;
}
}
if (changes == 0) iterations++;
else iterations = 0;
#endregion
}
// get only supporting parameters where alpha is positive
// i.e. because 0 < alpha < large margin
int[] fitness = (alpha > 0d).ToArray();
// return initialised model
return new SVMModel()
{
Descriptor = this.Descriptor,
FeatureNormalizer = base.FeatureNormalizer,
FeatureProperties = base.FeatureProperties,
Theta = ((alpha * y) * X).ToVector(),
Alpha = alpha.Slice(fitness),
Bias = this.Bias,
X = X.Slice(fitness, VectorType.Row),
Y = y.Slice(fitness),
KernelFunction = this.KernelFunction
};
}
示例10: Generate
/// <summary>Generate model based on a set of examples.</summary>
/// <param name="x">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <returns>Model.</returns>
public override IModel Generate(Matrix x, Vector y)
{
var N = y.Length;
var a = Vector.Zeros(N);
// compute kernel
var K = this.Kernel.Compute(x);
var n = 1;
// hopefully enough to converge right? ;)
// need to be smarter about storing SPD kernels...
var found_error = true;
while (n < 500 && found_error)
{
found_error = false;
for (var i = 0; i < N; i++)
{
found_error = y[i] * a.Dot(K[i]) <= 0;
if (found_error)
{
a[i] += y[i];
}
}
n++;
}
// anything that *matters*
// i.e. support vectors
var indices = a.Indices(d => d != 0);
// slice up examples to contain
// only support vectors
return new KernelPerceptronModel
{
Kernel = this.Kernel, A = a.Slice(indices), Y = y.Slice(indices), X = x.Slice(indices)
};
}
示例11: GenerateModel
/// <summary>Generates a model.</summary>
/// <param name="generator">Model generator used.</param>
/// <param name="x">The Matrix to process.</param>
/// <param name="y">The Vector to process.</param>
/// <param name="examples">Source data.</param>
/// <param name="trainingPct">The training pct.</param>
/// <param name="total">Number of Examples</param>
/// <returns>The model.</returns>
private static LearningModel GenerateModel(IGenerator generator, Matrix x, Vector y, IEnumerable<object> examples, double trainingPct, int total)
{
var descriptor = generator.Descriptor;
//var total = examples.Count();
var trainingCount = (int)System.Math.Floor(total * trainingPct);
// 100 - trainingPercentage for testing
var testingSlice = GetTestPoints(total - trainingCount, total).ToArray();
// trainingPercentage for training
var trainingSlice = GetTrainingPoints(testingSlice, total).ToArray();
// training
var x_t = x.Slice(trainingSlice);
var y_t = y.Slice(trainingSlice);
// generate model
var model = generator.Generate(x_t, y_t);
model.Descriptor = descriptor;
Score score = new Score();
if (testingSlice.Count() > 0)
{
// testing
object[] test = GetTestExamples(testingSlice, examples);
Vector y_pred = new Vector(test.Length);
Vector y_test = descriptor.ToExamples(test).Item2;
bool isBinary = y_test.IsBinary();
if (isBinary)
y_test = y_test.ToBinary(f => f == 1d, 1.0, 0.0);
for (int j = 0; j < test.Length; j++)
{
// items under test
object o = test[j];
// make prediction
var features = descriptor.Convert(o, false).ToVector();
// --- temp changes ---
double val = model.Predict(features);
var pred = descriptor.Label.Convert(val);
var truth = Ject.Get(o, descriptor.Label.Name);
if (truth.Equals(pred))
y_pred[j] = y_test[j];
else
y_pred[j] = (isBinary ? (y_test[j] >= 1d ? 0d : 1d) : val);
}
// score predictions
score = Score.ScorePredictions(y_pred, y_test);
}
return new LearningModel { Generator = generator, Model = model, Score = score };
}