本文整理汇总了C#中HiddenMarkovClassifier.LogLikelihood方法的典型用法代码示例。如果您正苦于以下问题:C# HiddenMarkovClassifier.LogLikelihood方法的具体用法?C# HiddenMarkovClassifier.LogLikelihood怎么用?C# HiddenMarkovClassifier.LogLikelihood使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类HiddenMarkovClassifier
的用法示例。
在下文中一共展示了HiddenMarkovClassifier.LogLikelihood方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: btnTrain_Click
private void btnTrain_Click(object sender, EventArgs e)
{
if (dataGridView1.Rows.Count == 0)
{
MessageBox.Show("Please load or insert some data first.");
return;
}
int states = (int)numStates.Value;
int iterations = (int)numIterations.Value;
double tolerance = (double)numConvergence.Value;
if (rbStopIterations.Checked) tolerance = 0.0;
if (rbStopConvergence.Checked) iterations = 0;
// Retrieve the training data from the data grid view
int rows = dataGridView1.Rows.Count;
int[] outputs = new int[rows];
var sequences = new int[rows][];
for (int i = 0; i < rows; i++)
{
outputs[i] = (int)dataGridView1.Rows[i].Cells["colLabel"].Value - 1;
sequences[i] = GetFeatures((double[][])dataGridView1.Rows[i].Tag);
}
int classes = outputs.Distinct().Count();
string[] labels = new string[classes];
for (int i = 0; i < labels.Length; i++)
labels[i] = (i+1).ToString();
// Create a sequence classifier for 3 classes
classifier = new HiddenMarkovClassifier(labels.Length,
new Forward(states), symbols: 20, names: labels);
// Create the learning algorithm for the ensemble classifier
var teacher = new HiddenMarkovClassifierLearning(classifier,
// Train each model using the selected convergence criteria
i => new BaumWelchLearning(classifier.Models[i])
{
Tolerance = tolerance,
Iterations = iterations,
}
);
// Create and use a rejection threshold model
teacher.Rejection = cbRejection.Checked;
teacher.Empirical = true;
teacher.Smoothing = (double)numSmoothing.Value;
// Run the learning algorithm
teacher.Run(sequences, outputs);
double error = classifier.LogLikelihood(sequences, outputs);
int hits = 0;
toolStripProgressBar1.Visible = true;
toolStripProgressBar1.Value = 0;
toolStripProgressBar1.Step = 1;
toolStripProgressBar1.Maximum = dataGridView1.Rows.Count;
for (int i = 0; i < rows; i++)
{
double likelihood;
int index = classifier.Compute(sequences[i], out likelihood);
DataGridViewRow row = dataGridView1.Rows[i];
if (index == -1)
{
row.Cells["colClassification"].Value = String.Empty;
}
else
{
row.Cells["colClassification"].Value = classifier.Models[index].Tag;
}
int expected = (int)row.Cells["colLabel"].Value;
if (expected == index + 1)
{
row.Cells[0].Style.BackColor = Color.LightGreen;
row.Cells[1].Style.BackColor = Color.LightGreen;
row.Cells[2].Style.BackColor = Color.LightGreen;
hits++;
}
else
{
row.Cells[0].Style.BackColor = Color.White;
row.Cells[1].Style.BackColor = Color.White;
row.Cells[2].Style.BackColor = Color.White;
}
//.........这里部分代码省略.........
示例2: check4
private static void check4(double[][][] words, HiddenMarkovClassifier<Independent> model, MarkovMultivariateFunction target, HiddenConditionalRandomField<double[]> hcrf)
{
double actual;
double expected;
foreach (var x in words)
{
for (int c = 0; c < model.Classes; c++)
{
for (int i = 0; i < model[c].States; i++)
{
// Check initial state transitions
double xa = model.Priors[c];
double xb = Math.Exp(model[c].Probabilities[i]);
double xc = model[c].Emissions[i].ProbabilityDensityFunction(x[0]);
expected = xa * xb * xc;
actual = Math.Exp(target.Factors[c].Compute(-1, i, x, 0, c));
Assert.IsTrue(expected.IsRelativelyEqual(actual, 1e-10));
Assert.IsFalse(double.IsNaN(actual));
}
for (int t = 1; t < x.Length; t++)
{
// Check normal state transitions
for (int i = 0; i < model[c].States; i++)
{
for (int j = 0; j < model[c].States; j++)
{
double xb = Math.Exp(model[c].Transitions[i, j]);
double xc = model[c].Emissions[j].ProbabilityDensityFunction(x[t]);
expected = xb * xc;
actual = Math.Exp(target.Factors[c].Compute(i, j, x, t, c));
Assert.IsTrue(expected.IsRelativelyEqual(actual, 1e-10));
Assert.IsFalse(double.IsNaN(actual));
}
}
}
actual = Math.Exp(model.LogLikelihood(x, c));
expected = Math.Exp(hcrf.LogLikelihood(x, c));
Assert.AreEqual(expected, actual, 1e-10);
Assert.IsFalse(double.IsNaN(actual));
actual = model.Compute(x);
expected = hcrf.Compute(x);
Assert.AreEqual(expected, actual);
Assert.IsFalse(double.IsNaN(actual));
}
}
}
示例3: SimpleGestureRecognitionTest
//.........这里部分代码省略.........
var hmm = new HiddenMarkovClassifier<Independent<NormalDistribution>>
(
classes: numberOfWords,
topology: new Forward(numberOfStates), // word classifiers should use a forward topology
initial: initial
);
// Create a new learning algorithm to train the sequence classifier
var teacher = new HiddenMarkovClassifierLearning<Independent<NormalDistribution>>(hmm,
// Train each model until the log-likelihood changes less than 0.001
modelIndex => new BaumWelchLearning<Independent<NormalDistribution>>(hmm.Models[modelIndex])
{
Tolerance = 0.001,
Iterations = 100,
// This is necessary so the code doesn't blow up when it realize
// there is only one sample per word class. But this could also be
// needed in normal situations as well.
//
FittingOptions = new IndependentOptions()
{
InnerOption = new NormalOptions() { Regularization = 1e-5 }
}
}
);
// Finally, we can run the learning algorithm!
double logLikelihood = teacher.Run(words, labels);
// At this point, the classifier should be successfully
// able to distinguish between our three word classes:
//
int tc1 = hmm.Compute(hello);
int tc2 = hmm.Compute(car);
int tc3 = hmm.Compute(wardrobe);
Assert.AreEqual(0, tc1);
Assert.AreEqual(1, tc2);
Assert.AreEqual(2, tc3);
// Now, we can use the Markov classifier to initialize a HCRF
var function = new MarkovMultivariateFunction(hmm);
var hcrf = new HiddenConditionalRandomField<double[]>(function);
// We can check that both are equivalent, although they have
// formulations that can be learned with different methods
//
for (int i = 0; i < words.Length; i++)
{
// Should be the same
int expected = hmm.Compute(words[i]);
int actual = hcrf.Compute(words[i]);
// Should be the same
double h0 = hmm.LogLikelihood(words[i], 0);
double c0 = hcrf.LogLikelihood(words[i], 0);
double h1 = hmm.LogLikelihood(words[i], 1);
double c1 = hcrf.LogLikelihood(words[i], 1);
double h2 = hmm.LogLikelihood(words[i], 2);
double c2 = hcrf.LogLikelihood(words[i], 2);
Assert.AreEqual(expected, actual);
Assert.AreEqual(h0, c0, 1e-10);
Assert.IsTrue(h1.IsRelativelyEqual(c1, 1e-10));
Assert.IsTrue(h2.IsRelativelyEqual(c2, 1e-10));
Assert.IsFalse(double.IsNaN(c0));
Assert.IsFalse(double.IsNaN(c1));
Assert.IsFalse(double.IsNaN(c2));
}
// Now we can learn the HCRF using one of the best learning
// algorithms available, Resilient Backpropagation learning:
// Create a learning algorithm
var rprop = new HiddenResilientGradientLearning<double[]>(hcrf)
{
Iterations = 50,
Tolerance = 1e-5
};
// Run the algorithm and learn the models
double error = rprop.Run(words, labels);
// At this point, the HCRF should be successfully
// able to distinguish between our three word classes:
//
int hc1 = hcrf.Compute(hello);
int hc2 = hcrf.Compute(car);
int hc3 = hcrf.Compute(wardrobe);
Assert.AreEqual(0, hc1);
Assert.AreEqual(1, hc2);
Assert.AreEqual(2, hc3);
}