本文整理汇总了C#中CvMat.Dispose方法的典型用法代码示例。如果您正苦于以下问题:C# CvMat.Dispose方法的具体用法?C# CvMat.Dispose怎么用?C# CvMat.Dispose使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类CvMat
的用法示例。
在下文中一共展示了CvMat.Dispose方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C#代码示例。
示例1: MushroomCreateDTree
/// <summary>
///
/// </summary>
/// <param name="data"></param>
/// <param name="missing"></param>
/// <param name="responses"></param>
/// <param name="pWeight"></param>
/// <returns></returns>
private CvDTree MushroomCreateDTree(CvMat data, CvMat missing, CvMat responses, float pWeight)
{
float[] priors = { 1, pWeight };
CvMat varType = new CvMat(data.Cols + 1, 1, MatrixType.U8C1);
Cv.Set(varType, CvScalar.ScalarAll(CvStatModel.CV_VAR_CATEGORICAL)); // all the variables are categorical
CvDTree dtree = new CvDTree();
CvDTreeParams p = new CvDTreeParams(8, // max depth
10, // min sample count
0, // regression accuracy: N/A here
true, // compute surrogate split, as we have missing data
15, // max number of categories (use sub-optimal algorithm for larger numbers)
10, // the number of cross-validation folds
true, // use 1SE rule => smaller tree
true, // throw away the pruned tree branches
priors // the array of priors, the bigger p_weight, the more attention
// to the poisonous mushrooms
// (a mushroom will be judjed to be poisonous with bigger chance)
);
dtree.Train(data, DTreeDataLayout.RowSample, responses, null, null, varType, missing, p);
// compute hit-rate on the training database, demonstrates predict usage.
int hr1 = 0, hr2 = 0, pTotal = 0;
for (int i = 0; i < data.Rows; i++)
{
CvMat sample, mask;
Cv.GetRow(data, out sample, i);
Cv.GetRow(missing, out mask, i);
double r = dtree.Predict(sample, mask).Value;
bool d = Math.Abs(r - responses.DataArraySingle[i]) >= float.Epsilon;
if (d)
{
if (r != 'p')
hr1++;
else
hr2++;
}
//Console.WriteLine(responses.DataArraySingle[i]);
pTotal += (responses.DataArraySingle[i] == (float)'p') ? 1 : 0;
}
Console.WriteLine("Results on the training database");
Console.WriteLine("\tPoisonous mushrooms mis-predicted: {0} ({1}%)", hr1, (double)hr1 * 100 / pTotal);
Console.WriteLine("\tFalse-alarms: {0} ({1}%)", hr2, (double)hr2 * 100 / (data.Rows - pTotal));
varType.Dispose();
return dtree;
}
示例2: BuildMlpClassifier
//.........这里部分代码省略.........
// Create or load MLP classifier
if (filenameToLoad != null)
{
// load classifier from the specified file
mlp.Load(filenameToLoad);
ntrainSamples = 0;
if (mlp.GetLayerCount() == 0)
{
Console.WriteLine("Could not read the classifier {0}", filenameToLoad);
return;
}
Console.WriteLine("The classifier {0} is loaded.", filenameToLoad);
}
else
{
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// MLP does not support categorical variables by explicitly.
// So, instead of the output class label, we will use
// a binary vector of <class_count> components for training and,
// therefore, MLP will give us a vector of "probabilities" at the
// prediction stage
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
using (CvMat newResponses = new CvMat(ntrainSamples, ClassCount, MatrixType.F32C1))
{
// 1. unroll the responses
Console.WriteLine("Unrolling the responses...");
unsafe
{
for (int i = 0; i < ntrainSamples; i++)
{
int clsLabel = Cv.Round(responses.DataArraySingle[i]) - 'A';
float* bitVec = (float*)(newResponses.DataByte + i * newResponses.Step);
for (int j = 0; j < ClassCount; j++)
{
bitVec[j] = 0.0f;
}
bitVec[clsLabel] = 1.0f;
}
}
Cv.GetRows(data, out trainData, 0, ntrainSamples);
// 2. train classifier
int[] layerSizesData = { data.Cols, 100, 100, ClassCount };
layerSizes = new CvMat(1, layerSizesData.Length, MatrixType.S32C1, layerSizesData);
mlp.Create(layerSizes);
Console.Write("Training the classifier (may take a few minutes)...");
mlp.Train(
trainData, newResponses, null, null,
new CvANN_MLP_TrainParams(new CvTermCriteria(300, 0.01), MLPTrainingMethod.RPROP, 0.01)
);
}
Console.WriteLine();
}
mlpResponse = new CvMat(1, ClassCount, MatrixType.F32C1);
// compute prediction error on train and test data
for (int i = 0; i < nsamplesAll; i++)
{
int bestClass;
CvMat sample;
CvPoint minLoc, maxLoc;
Cv.GetRow(data, out sample, i);
mlp.Predict(sample, mlpResponse);
mlpResponse.MinMaxLoc(out minLoc, out maxLoc, null);
bestClass = maxLoc.X + 'A';
int r = (Math.Abs((double)bestClass - responses.DataArraySingle[i]) < float.Epsilon) ? 1 : 0;
if (i < ntrainSamples)
trainHr += r;
else
testHr += r;
}
testHr /= (double)(nsamplesAll - ntrainSamples);
trainHr /= (double)ntrainSamples;
Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0);
// Save classifier to file if needed
if (filenameToSave != null)
{
mlp.Save(filenameToSave);
}
Console.Read();
mlpResponse.Dispose();
data.Dispose();
responses.Dispose();
if (layerSizes != null) layerSizes.Dispose();
mlp.Dispose();
}
示例3: BuildBoostClassifier
//.........这里部分代码省略.........
using (CvMat newResponses = new CvMat(ntrainSamples * ClassCount, 1, MatrixType.S32C1))
{
// 1. unroll the database type mask
Console.WriteLine("Unrolling the database...");
for (int i = 0; i < ntrainSamples; i++)
{
unsafe
{
float* dataRow = (float*)(data.DataByte + data.Step * i);
for (int j = 0; j < ClassCount; j++)
{
float* newDataRow = (float*)(newData.DataByte + newData.Step * (i * ClassCount + j));
for (int k = 0; k < varCount; k++)
{
newDataRow[k] = dataRow[k];
}
newDataRow[varCount] = (float)j;
newResponses.DataInt32[i * ClassCount + j] = (responses.DataSingle[i] == j + 'A') ? 1 : 0;
}
}
}
// 2. create type mask
varType = new CvMat(varCount + 2, 1, MatrixType.U8C1);
varType.Set(CvScalar.ScalarAll(CvStatModel.CV_VAR_ORDERED));
// the last indicator variable, as well
// as the new (binary) response are categorical
varType.SetReal1D(varCount, CvStatModel.CV_VAR_CATEGORICAL);
varType.SetReal1D(varCount + 1, CvStatModel.CV_VAR_CATEGORICAL);
// 3. train classifier
Console.Write("Training the classifier (may take a few minutes)...");
boost.Train(
newData, DTreeDataLayout.RowSample, newResponses, null, null, varType, null,
new CvBoostParams(CvBoost.REAL, 100, 0.95, 5, false, null)
);
}
Console.WriteLine();
}
tempSample = new CvMat(1, varCount + 1, MatrixType.F32C1);
weakResponses = new CvMat(1, boost.GetWeakPredictors().Total, MatrixType.F32C1);
// compute prediction error on train and test data
for (int i = 0; i < nsamplesAall; i++)
{
int bestClass = 0;
double maxSum = double.MinValue;
double r;
CvMat sample;
Cv.GetRow(data, out sample, i);
for (int k = 0; k < varCount; k++)
{
tempSample.DataArraySingle[k] = sample.DataArraySingle[k];
}
for (int j = 0; j < ClassCount; j++)
{
tempSample.DataArraySingle[varCount] = (float)j;
boost.Predict(tempSample, null, weakResponses);
double sum = weakResponses.Sum().Val0;
if (maxSum < sum)
{
maxSum = sum;
bestClass = j + 'A';
}
}
r = (Math.Abs(bestClass - responses.DataArraySingle[i]) < float.Epsilon) ? 1 : 0;
if (i < ntrainSamples)
trainHr += r;
else
testHr += r;
}
testHr /= (double)(nsamplesAall - ntrainSamples);
trainHr /= (double)ntrainSamples;
Console.WriteLine("Recognition rate: train = {0:F1}%, test = {1:F1}%", trainHr * 100.0, testHr * 100.0);
Console.WriteLine("Number of trees: {0}", boost.GetWeakPredictors().Total);
// Save classifier to file if needed
if (filenameToSave != null)
{
boost.Save(filenameToSave);
}
Console.Read();
tempSample.Dispose();
weakResponses.Dispose();
if (varType != null) varType.Dispose();
data.Dispose();
responses.Dispose();
boost.Dispose();
}
示例4: DisposeToNull
/// <summary>
/// オブジェクトが確保されている場合にDisposeします
/// </summary>
/// <param name="obj"></param>
public static void DisposeToNull(ref CvMat obj)
{
if (obj != null)
{
obj.Dispose();
obj = null;
}
}
示例5: InitCvMat
/// <summary>
/// 領域が未確保またはフォーマットが異なる場合は新しく領域を確保します.
/// </summary>
/// <param name="dest"></param>
/// <param name="rows"></param>
/// <param name="cols"></param>
/// <param name="type"></param>
public static void InitCvMat(ref CvMat dest, int rows, int cols, MatrixType type)
{
if (dest == null || dest.Cols != cols || dest.Rows != rows || dest.ElemType != type)
{
if (dest != null)
{
dest.Dispose();
}
dest = new CvMat(rows, cols, type);
}
}
示例6: Transform
private void Transform(double[] srcPoints)
{
const int POINT_COUNT = 8;
System.Diagnostics.Debug.Assert(srcPoints.Length == POINT_COUNT);
double leftOffset = (srcGrid.Width - imgRaw.Source.Width) / 2;
double topOffset = (srcGrid.Height - imgRaw.Source.Height) / 2;
CvMat srcPointsMat = new CvMat(4, 2, MatrixType.F64C1, srcPoints);
CvMat dstPointsMat = new CvMat(4, 2, MatrixType.F64C1,
new double[POINT_COUNT] {
dstGrid.Width * 1 / 4, dstGrid.Height * 1 / 4, dstGrid.Width * 3 / 4, dstGrid.Height * 1 / 4,
dstGrid.Width * 3 / 4, dstGrid.Height * 3 / 4, dstGrid.Width * 1 / 4, dstGrid.Height * 3 / 4 });
CvMat viewerHomographyMatrix = new CvMat(3, 3, MatrixType.F64C1, new double[9]);
Cv.FindHomography(srcPointsMat, dstPointsMat, viewerHomographyMatrix);
CV.Mat src = WriteableBitmapConverter.ToMat((WriteableBitmap)imgRaw.Source);
CV.Mat dst = new CV.Mat((int)srcGrid.Height, (int)srcGrid.Width, src.Type());
Cv.WarpPerspective(src.ToCvMat(), dst.ToCvMat(), viewerHomographyMatrix);
imgTransformed.Source = WriteableBitmapConverter.ToWriteableBitmap(dst);
srcPointsMat.Dispose();
dstPointsMat.Dispose();
src.Dispose();
dst.Dispose();
}