本文整理汇总了C#中MathNet.Numerics.LinearAlgebra.Double.SparseMatrix.EnumerateIndexed方法的典型用法代码示例。如果您正苦于以下问题:C# SparseMatrix.EnumerateIndexed方法的具体用法?C# SparseMatrix.EnumerateIndexed怎么用?C# SparseMatrix.EnumerateIndexed使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类MathNet.Numerics.LinearAlgebra.Double.SparseMatrix
的用法示例。
在下文中一共展示了SparseMatrix.EnumerateIndexed方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: PredictRatings
/// <summary>
/// Ordinal Matrix Factorization.
/// </summary>
/// <param name="R_train">The matrix contains training ratings</param>
/// <param name="R_unknown">The matrix contains ones indicating unknown ratings</param>
/// <param name="R_scorer">This matrix contains ratings predicted by the scorer on
/// both the R_train and R_unknown sets</param>
/// <returns>The predicted ratings on R_unknown</returns>
#region PredictRatings
public static string PredictRatings(SparseMatrix R_train, SparseMatrix R_unknown,
SparseMatrix R_scorer, List<double> quantizer, out DataMatrix R_predicted,
out Dictionary<Tuple<int, int>, List<double>> OMFDistributionByUserItem)
{
StringBuilder log = new StringBuilder();
/************************************************************
* Parameterization and Initialization
************************************************************/
#region Parameterization and Initialization
// This matrix stores predictions
SparseMatrix R_predicted_out = (SparseMatrix)Matrix.Build.Sparse(R_unknown.RowCount, R_unknown.ColumnCount);
Dictionary<Tuple<int, int>, List<double>> OMFDistributionByUserItem_out =
new Dictionary<Tuple<int, int>, List<double>>(R_unknown.NonZerosCount);
// User specified parameters
double maxEpoch = Config.OMF.MaxEpoch;
double learnRate = Config.OMF.LearnRate;
double regularization = Config.OMF.Regularization;
int intervalCount = quantizer.Count;
int userCount = R_train.RowCount;
// Parameters for each user
Dictionary<int, ParametersOfUser> paramtersByUser = new Dictionary<int, ParametersOfUser>(R_train.RowCount);
// Compute initial values of t1 and betas
// that will be used for all users, Eq. 5
double t1_initial = (double)(quantizer[0] + quantizer[1]) / 2;
Vector<double> betas_initial = Vector.Build.Dense(quantizer.Count - 2);
for (int i = 1; i <= betas_initial.Count; i++)
{
double t_r = t1_initial;
double t_r_plus_1 = (quantizer[i] + quantizer[i + 1]) * 0.5f;
betas_initial[i - 1] = Math.Log(t_r_plus_1 - t_r); // natural base
t_r = t_r_plus_1;
}
// Initialize parameters (t1, betas) for each user
for (int indexOfUser = 0; indexOfUser < R_train.RowCount; indexOfUser++)
{
paramtersByUser[indexOfUser] = new ParametersOfUser(t1_initial, betas_initial);
}
#endregion
/************************************************************
* Learn parameters from training data R_train and R_score
************************************************************/
#region Learn parameters from training data R_train and R_score
// Learn parameters for each user, note that each user has his own model
Object lockMe = new Object();
Parallel.ForEach(R_train.EnumerateRowsIndexed(), row =>
{
int indexOfUser = row.Item1;
SparseVector ratingsOfUser = (SparseVector)row.Item2;
// Store this user's ratings from R_train and correpsonding ratings from scorer
List<double> ratingsFromScorer = new List<double>(ratingsOfUser.NonZerosCount);
List<double> ratingsFromRTrain = new List<double>(ratingsOfUser.NonZerosCount);
foreach (var element in ratingsOfUser.EnumerateIndexed(Zeros.AllowSkip))
{
int indexOfItem = element.Item1;
double rating = element.Item2;
// Ratings need to be added in the same order
ratingsFromScorer.Add(R_scorer[indexOfUser, indexOfItem]);
ratingsFromRTrain.Add(rating);
}
Debug.Assert(ratingsFromScorer.Count == ratingsOfUser.NonZerosCount);
Debug.Assert(ratingsFromRTrain.Count == ratingsOfUser.NonZerosCount);
// Parameters for the current user are estimated by
// maximizing the log likelihood (Eq. 21) using stochastic gradient ascent
// Eq. 22
double t1 = paramtersByUser[indexOfUser].t1;
Vector<double> betas = paramtersByUser[indexOfUser].betas;
for (int epoch = 0; epoch < maxEpoch; epoch++)
{
for (int i = 0; i < ratingsFromRTrain.Count; i++)
{
double ratingFromRTrain = ratingsFromRTrain[i];
double ratingFromScorer = ratingsFromScorer[i];
int r = quantizer.IndexOf(ratingFromRTrain); // r is the interval that the rating falls into
double probLE_r = ComputeProbLE(ratingFromScorer, r, t1, betas); // Eq. 9
double probLE_r_minus_1 = ComputeProbLE(ratingFromScorer, r - 1, t1, betas);
double probE_r = probLE_r - probLE_r_minus_1; // Eq. 10
// Compute derivatives/gradients
double derivativeOft1 = learnRate / probE_r * (probLE_r * (1 - probLE_r) * DerivativeOfBeta(r, 0, t1)
- probLE_r_minus_1 * (1 - probLE_r_minus_1) * DerivativeOfBeta(r - 1, 0, t1)
- Config.OMF.Regularization * t1);
//.........这里部分代码省略.........