本文整理汇总了C#中IDataset.GetDoubleValues方法的典型用法代码示例。如果您正苦于以下问题:C# IDataset.GetDoubleValues方法的具体用法?C# IDataset.GetDoubleValues怎么用?C# IDataset.GetDoubleValues使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类IDataset
的用法示例。
在下文中一共展示了IDataset.GetDoubleValues方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: Scaling
public Scaling(IDataset ds, IEnumerable<string> variables, IEnumerable<int> rows) {
foreach (var variable in variables) {
var values = ds.GetDoubleValues(variable, rows);
var min = values.Where(x => !double.IsNaN(x)).Min();
var max = values.Where(x => !double.IsNaN(x)).Max();
scalingParameters[variable] = Tuple.Create(min, max);
}
}
示例2: PrepareInputMatrix
public static double[,] PrepareInputMatrix(IDataset dataset, IEnumerable<string> variables, IEnumerable<int> rows) {
List<string> variablesList = variables.ToList();
List<int> rowsList = rows.ToList();
double[,] matrix = new double[rowsList.Count, variablesList.Count];
int col = 0;
foreach (string column in variables) {
var values = dataset.GetDoubleValues(column, rows);
int row = 0;
foreach (var value in values) {
matrix[row, col] = value;
row++;
}
col++;
}
return matrix;
}
示例3: GetSymbolicExpressionTreeValues
public IEnumerable<IEnumerable<double>> GetSymbolicExpressionTreeValues(ISymbolicExpressionTree tree, IDataset dataset, IEnumerable<int> rows, IEnumerable<int> horizons) {
if (CheckExpressionsWithIntervalArithmetic.Value)
throw new NotSupportedException("Interval arithmetic is not yet supported in the symbolic data analysis interpreter.");
if (targetVariableCache == null || targetVariableCache.GetLength(0) < dataset.Rows)
targetVariableCache = dataset.GetDoubleValues(TargetVariable).ToArray();
if (invalidateCacheIndexes == null)
invalidateCacheIndexes = new List<int>(10);
string targetVariable = TargetVariable;
lock (EvaluatedSolutions) {
EvaluatedSolutions.Value++; // increment the evaluated solutions counter
}
var state = PrepareInterpreterState(tree, dataset, targetVariableCache, TargetVariable);
var rowsEnumerator = rows.GetEnumerator();
var horizonsEnumerator = horizons.GetEnumerator();
// produce a n-step forecast for all rows
while (rowsEnumerator.MoveNext() & horizonsEnumerator.MoveNext()) {
int row = rowsEnumerator.Current;
int horizon = horizonsEnumerator.Current;
double[] vProgs = new double[horizon];
for (int i = 0; i < horizon; i++) {
int localRow = i + row; // create a local variable for the ref parameter
vProgs[i] = Evaluate(dataset, ref localRow, state);
targetVariableCache[localRow] = vProgs[i];
invalidateCacheIndexes.Add(localRow);
state.Reset();
}
yield return vProgs;
int j = 0;
foreach (var targetValue in dataset.GetDoubleValues(targetVariable, invalidateCacheIndexes)) {
targetVariableCache[invalidateCacheIndexes[j]] = targetValue;
j++;
}
invalidateCacheIndexes.Clear();
}
if (rowsEnumerator.MoveNext() || horizonsEnumerator.MoveNext())
throw new ArgumentException("Number of elements in rows and horizon enumerations doesn't match.");
}
示例4: GetEstimatedClassValues
// uses sorting to return the values in the order of rows, instead of using nested for loops
// to avoid O(n²) runtime
public override IEnumerable<double> GetEstimatedClassValues(IDataset dataset, IEnumerable<int> rows) {
var values = dataset.GetDoubleValues(Variable, rows).ToArray();
var rowsArray = rows.ToArray();
var order = Enumerable.Range(0, rowsArray.Length).ToArray();
double[] estimated = new double[rowsArray.Length];
Array.Sort(rowsArray, order);
Array.Sort(values, rowsArray);
int curSplit = 0, curIndex = 0;
while (curIndex < values.Length && Double.IsNaN(values[curIndex])) {
estimated[curIndex] = MissingValuesClass;
curIndex++;
}
while (curSplit < Splits.Length) {
while (curIndex < values.Length && Splits[curSplit] > values[curIndex]) {
estimated[curIndex] = classes[curSplit];
curIndex++;
}
curSplit++;
}
Array.Sort(rowsArray, estimated);
Array.Sort(order, estimated);
return estimated;
}
示例5: CalculateModel
private void CalculateModel(IDataset ds, IEnumerable<int> rows, bool scaleInputs = true) {
this.trainingDataset = (IDataset)ds.Clone();
this.trainingRows = rows.ToArray();
this.inputScaling = scaleInputs ? new Scaling(ds, allowedInputVariables, rows) : null;
x = GetData(ds, this.allowedInputVariables, this.trainingRows, this.inputScaling);
IEnumerable<double> y;
y = ds.GetDoubleValues(TargetVariable, rows);
int n = x.GetLength(0);
var columns = Enumerable.Range(0, x.GetLength(1)).ToArray();
// calculate cholesky decomposed (lower triangular) covariance matrix
var cov = covarianceFunction.GetParameterizedCovarianceFunction(covarianceParameter, columns);
this.l = CalculateL(x, cov, sqrSigmaNoise);
// calculate mean
var mean = meanFunction.GetParameterizedMeanFunction(meanParameter, columns);
double[] m = Enumerable.Range(0, x.GetLength(0))
.Select(r => mean.Mean(x, r))
.ToArray();
// calculate sum of diagonal elements for likelihood
double diagSum = Enumerable.Range(0, n).Select(i => Math.Log(l[i, i])).Sum();
// solve for alpha
double[] ym = y.Zip(m, (a, b) => a - b).ToArray();
int info;
alglib.densesolverreport denseSolveRep;
alglib.spdmatrixcholeskysolve(l, n, false, ym, out info, out denseSolveRep, out alpha);
for (int i = 0; i < alpha.Length; i++)
alpha[i] = alpha[i] / sqrSigmaNoise;
negativeLogLikelihood = 0.5 * Util.ScalarProd(ym, alpha) + diagSum + (n / 2.0) * Math.Log(2.0 * Math.PI * sqrSigmaNoise);
// derivatives
int nAllowedVariables = x.GetLength(1);
alglib.matinvreport matInvRep;
double[,] lCopy = new double[l.GetLength(0), l.GetLength(1)];
Array.Copy(l, lCopy, lCopy.Length);
alglib.spdmatrixcholeskyinverse(ref lCopy, n, false, out info, out matInvRep);
if (info != 1) throw new ArgumentException("Can't invert matrix to calculate gradients.");
for (int i = 0; i < n; i++) {
for (int j = 0; j <= i; j++)
lCopy[i, j] = lCopy[i, j] / sqrSigmaNoise - alpha[i] * alpha[j];
}
double noiseGradient = sqrSigmaNoise * Enumerable.Range(0, n).Select(i => lCopy[i, i]).Sum();
double[] meanGradients = new double[meanFunction.GetNumberOfParameters(nAllowedVariables)];
for (int k = 0; k < meanGradients.Length; k++) {
var meanGrad = new double[alpha.Length];
for (int g = 0; g < meanGrad.Length; g++)
meanGrad[g] = mean.Gradient(x, g, k);
meanGradients[k] = -Util.ScalarProd(meanGrad, alpha);
}
double[] covGradients = new double[covarianceFunction.GetNumberOfParameters(nAllowedVariables)];
if (covGradients.Length > 0) {
for (int i = 0; i < n; i++) {
for (int j = 0; j < i; j++) {
var g = cov.CovarianceGradient(x, i, j);
for (int k = 0; k < covGradients.Length; k++) {
covGradients[k] += lCopy[i, j] * g[k];
}
}
var gDiag = cov.CovarianceGradient(x, i, i);
for (int k = 0; k < covGradients.Length; k++) {
// diag
covGradients[k] += 0.5 * lCopy[i, i] * gDiag[k];
}
}
}
hyperparameterGradients =
meanGradients
.Concat(covGradients)
.Concat(new double[] { noiseGradient }).ToArray();
}
示例6: Reduce
public double[,] Reduce(IDataset dataset, IEnumerable<int> rows) {
var data = AlglibUtil.PrepareInputMatrix(dataset, allowedInputVariables, rows);
var targets = dataset.GetDoubleValues(targetVariable, rows).ToArray();
var result = new double[data.GetLength(0), transformationMatrix.GetLength(1) + 1];
for (int i = 0; i < data.GetLength(0); i++)
for (int j = 0; j < data.GetLength(1); j++) {
for (int x = 0; x < transformationMatrix.GetLength(1); x++) {
result[i, x] += data[i, j] * transformationMatrix[j, x];
}
result[i, transformationMatrix.GetLength(1)] = targets[i];
}
return result;
}
示例7: GetScaledValues
public IEnumerable<double> GetScaledValues(IDataset ds, string variable, IEnumerable<int> rows) {
double min = scalingParameters[variable].Item1;
double max = scalingParameters[variable].Item2;
if (min.IsAlmost(max)) return rows.Select(i => 0.0); // return enumerable of zeros
return ds.GetDoubleValues(variable, rows).Select(x => (x - min) / (max - min)); // scale to range [0..1]
}
示例8: PCAReduce
private static double[,] PCAReduce(IDataset dataset, IEnumerable<int> rows, IEnumerable<string> variables) {
var instances = rows.ToArray();
var attributes = variables.ToArray();
var data = new double[instances.Length, attributes.Length + 1];
for (int j = 0; j < attributes.Length; j++) {
int i = 0;
var values = dataset.GetDoubleValues(attributes[j], instances);
foreach (var v in values) {
data[i++, j] = v;
}
}
int info;
double[] variances;
var matrix = new double[0, 0];
alglib.pcabuildbasis(data, instances.Length, attributes.Length, out info, out variances, out matrix);
var result = new double[instances.Length, matrix.GetLength(1)];
int r = 0;
foreach (var inst in instances) {
int i = 0;
foreach (var attrib in attributes) {
double val = dataset.GetDoubleValue(attrib, inst);
for (int j = 0; j < result.GetLength(1); j++)
result[r, j] += val * matrix[i, j];
i++;
}
r++;
}
return result;
}
示例9: CheckVariablesForPossibleTargetVariables
public static IEnumerable<string> CheckVariablesForPossibleTargetVariables(IDataset dataset) {
int maxSamples = Math.Min(InspectedRowsToDetermineTargets, dataset.Rows);
var validTargetVariables = (from v in dataset.DoubleVariables
let distinctValues = dataset.GetDoubleValues(v)
.Take(maxSamples)
.Distinct()
.Count()
where distinctValues <= MaximumNumberOfClasses
select v).ToArray();
if (!validTargetVariables.Any())
throw new ArgumentException("Import of classification problem data was not successful, because no target variable was found." +
" A target variable must have at most " + MaximumNumberOfClasses + " distinct values to be applicable to classification.");
return validTargetVariables;
}