当前位置: 首页>>代码示例>>Java>>正文


Java MatrixOps.setAll方法代码示例

本文整理汇总了Java中cc.mallet.types.MatrixOps.setAll方法的典型用法代码示例。如果您正苦于以下问题:Java MatrixOps.setAll方法的具体用法?Java MatrixOps.setAll怎么用?Java MatrixOps.setAll使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在cc.mallet.types.MatrixOps的用法示例。


在下文中一共展示了MatrixOps.setAll方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getValueGradient

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer)
{
	// PriorGradient is -parameter/gaussianPriorVariance
	// Gradient is (constraint - expectation + PriorGradient)
	// == -(expectation - constraint - PriorGradient).
	// Gradient points "up-hill", i.e. in the direction of higher value
	if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
		getValue (); // This will fill in the this.expectation, updating it if necessary
		MatrixOps.setAll(cachedGradie, 0);
		double[] b2 = new double[buffer.length];
		for (int i = 0; i < opts.length; i++) {
			MatrixOps.setAll(b2, 0);
			opts[i].getValueGradient(b2);
			MatrixOps.plusEquals(cachedGradie, b2);
		}
		cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
	}
	System.arraycopy(cachedGradie, 0, buffer, 0, cachedGradie.length);
}
 
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:20,代码来源:CRFTrainerByValueGradients.java

示例2: getValueGradient

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double [] buffer) {
if (cachedGradientWeightsStamp != crf.weightsValueChangeStamp) {
	getValue ();
	MatrixOps.setAll(cachedGradient, 0);
	double[] b2 = new double[buffer.length];
	for (int i = 0; i < optimizables.length; i++) {
		MatrixOps.setAll(b2, 0);
		optimizables[i].getValueGradient(b2);
		MatrixOps.plusEquals(cachedGradient, b2);
	}
	cachedGradientWeightsStamp = crf.weightsValueChangeStamp;
}
System.arraycopy(cachedGradient, 0, buffer, 0, cachedGradient.length);
}
 
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:15,代码来源:CRFOptimizableByGradientValues.java

示例3: OrthantWiseLimitedMemoryBFGS

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public OrthantWiseLimitedMemoryBFGS(Optimizable.ByGradientValue function,
		double l1wt) {
	this.optimizable = function;
	this.l1Weight = l1wt;
	String parts[] = optimizable.getClass().getName().split("\\.");
	this.optName = parts[parts.length - 1];

	// initialize optimizer state
	iterations = 0;
	s = new LinkedList<double[]>();
	y = new LinkedList<double[]>();
	rhos = new LinkedList<Double>();
	alphas = new double[m];
	MatrixOps.setAll(alphas, 0.0);
	yDotY = 0;

	int numParameters = optimizable.getNumParameters();

	// get initial parameters
	parameters = new double[numParameters];
	optimizable.getParameters(parameters);

	// get initial value
	value = evalL1();

	// get initial gradient
	grad = new double[numParameters];
	evalGradient();

	// initialize direction
	direction = new double[numParameters];
	steepestDescentDirection = new double[numParameters];

	// initialize backups
	oldParameters = new double[numParameters];
	oldGrad = new double[numParameters];
}
 
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:38,代码来源:OrthantWiseLimitedMemoryBFGS.java

示例4: getValueGradient

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
	double[] b2 = new double[buffer.length];
	for (Optimizable.ByGradientValue o : optimizables) {
		MatrixOps.setAll(b2, 0);
		o.getValueGradient(b2);
		MatrixOps.plusEquals(buffer, b2);
	}
}
 
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:9,代码来源:OptimizableCollection.java

示例5: getValueGradient

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public void getValueGradient (double[] buffer) {
	double[] b2 = new double[buffer.length];
	for (ByGradientValue o : optimizables) {
		MatrixOps.setAll(b2, 0);
		o.getValueGradient(b2);
		MatrixOps.plusEquals(buffer, b2);
	}
}
 
开发者ID:shalomeir,项目名称:tctm,代码行数:9,代码来源:OptimizableCollection.java

示例6: getValue

import cc.mallet.types.MatrixOps; //导入方法依赖的package包/类
public double getValue ()
	{
		if (cachedValueStale) {
			numGetValueCalls++;
			cachedValue = 0;
			// We'll store the expectation values in "cachedGradient" for now
			cachedGradientStale = true;
			MatrixOps.setAll (cachedGradient, 0.0);
			// Incorporate likelihood of data
			double[] scores = new double[trainingList.getTargetAlphabet().size()];
			double value = 0.0;
			Iterator<Instance> iter = trainingList.iterator();
			int ii=0;
			while (iter.hasNext()) {
				ii++;
				Instance instance = iter.next();
				double instanceWeight = trainingList.getInstanceWeight(instance);
				Labeling labeling = instance.getLabeling ();
				if (labeling == null)
					continue;
				//System.out.println("L Now "+inputAlphabet.size()+" regular features.");

				this.theClassifier.getClassificationScores (instance, scores);
				FeatureVector fv = (FeatureVector) instance.getData ();
				int li = labeling.getBestIndex();
				value = - (instanceWeight * Math.log (scores[li]));
				if(Double.isNaN(value)) {
					logger.fine ("MaxEntTrainer: Instance " + instance.getName() +
							"has NaN value. log(scores)= " + Math.log(scores[li]) +
							" scores = " + scores[li] + 
							" has instance weight = " + instanceWeight);

				}
				if (Double.isInfinite(value)) {
					logger.warning ("Instance "+instance.getSource() + " has infinite value; skipping value and gradient");
					cachedValue -= value;
					cachedValueStale = false;
					return -value;
//					continue;
				}
				cachedValue += value;
				for (int si = 0; si < scores.length; si++) {
					if (scores[si] == 0) continue;
					assert (!Double.isInfinite(scores[si]));
					MatrixOps.rowPlusEquals (cachedGradient, numFeatures,
							si, fv, -instanceWeight * scores[si]);
					cachedGradient[numFeatures*si + defaultFeatureIndex] += (-instanceWeight * scores[si]);
				}
			}
			//logger.info ("-Expectations:"); cachedGradient.print();

			// Incorporate prior on parameters
			double prior = 0;
			if (usingHyperbolicPrior) {
				for (int li = 0; li < numLabels; li++)
					for (int fi = 0; fi < numFeatures; fi++)
						prior += (hyperbolicPriorSlope / hyperbolicPriorSharpness
								* Math.log (Maths.cosh (hyperbolicPriorSharpness * parameters[li *numFeatures + fi])));
			}
			else if (usingGaussianPrior) {
				for (int li = 0; li < numLabels; li++)
					for (int fi = 0; fi < numFeatures; fi++) {
						double param = parameters[li*numFeatures + fi];
						prior += param * param / (2 * gaussianPriorVariance);
					}
			}

			double oValue = cachedValue;
			cachedValue += prior;
			cachedValue *= -1.0; // MAXIMIZE, NOT MINIMIZE
			cachedValueStale = false;
			progressLogger.info ("Value (labelProb="+oValue+" prior="+prior+") loglikelihood = "+cachedValue);
		}
		return cachedValue;
	}
 
开发者ID:kostagiolasn,项目名称:NucleosomePatternClassifier,代码行数:76,代码来源:MaxEntOptimizableByLabelLikelihood.java


注:本文中的cc.mallet.types.MatrixOps.setAll方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。