当前位置: 首页>>代码示例>>Java>>正文


Java DifferentiableFunction类代码示例

本文整理汇总了Java中tberg.murphy.opt.DifferentiableFunction的典型用法代码示例。如果您正苦于以下问题:Java DifferentiableFunction类的具体用法?Java DifferentiableFunction怎么用?Java DifferentiableFunction使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


DifferentiableFunction类属于tberg.murphy.opt包,在下文中一共展示了DifferentiableFunction类的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: train

import tberg.murphy.opt.DifferentiableFunction; //导入依赖的package包/类
public CounterInterface<Integer> train(CounterInterface<Integer> initWeights, final LossAugmentedLinearModel<D> model, List<D> data, int iters) {
	double[] denseInitWeights = dense(initWeights, numFeatures);
	List<DifferentiableFunction> objs = new ArrayList<DifferentiableFunction>();

	int numBatches = (int) Math.ceil(data.size() / (double) batchSize);
	for (int b=0; b<numBatches; ++b) {
		final List<D> batch = data.subList(b*batchSize, Math.min(data.size(), (b+1)*batchSize));
		objs.add(new DifferentiableFunction() {
			public Pair<Double, double[]> calculate(double[] x) {
				CounterInterface<Integer> weights = sparse(x);
				model.setWeights(weights);
				List<UpdateBundle> ubBatch = model.getLossAugmentedUpdateBundleBatch(batch, 1.0);
				double valBatch = 0.0;
				CounterInterface<Integer> deltaBatch = new IntCounter();
				for (UpdateBundle ub : ubBatch) {
					CounterInterface<Integer> delta = new IntCounter();
					delta.incrementAll(ub.gold, -1.0);
					delta.incrementAll(ub.guess, 1.0);
					double val = ub.loss + delta.dotProduct(weights);
					if (val > 0.0) {
						valBatch += val;
						deltaBatch.incrementAll(delta);
					}
				}
				return Pair.makePair(valBatch, dense(deltaBatch, numFeatures));
			}
		});
	}
	
	OnlineMinimizer minimizer = (L1reg? new AdaGradL1Minimizer(stepSize, delta, C, iters) : new AdaGradL2Minimizer(stepSize, delta, C, iters));
	return sparse(minimizer.minimize(objs, denseInitWeights, true, null));
}
 
开发者ID:tberg12,项目名称:murphy,代码行数:33,代码来源:PrimalSubgradientSVMLearner.java

示例2: gradientTester

import tberg.murphy.opt.DifferentiableFunction; //导入依赖的package包/类
public static void gradientTester(float[][] Wtrans0, float[][] Htrans0, float[][] Xtrans0, float silenceEps) {
	int[] loud = null;
	if (silenceEps > 0.0) {
		Pair<float[][], int[]> filterSilence = filterSilence(Xtrans0, silenceEps);
		Xtrans0 = filterSilence.getFirst();
		loud = filterSilence.getSecond();
		Htrans0 = filterSilence(Htrans0, loud);
	}

	// W : n x r
	// H : r x m
	// X : n x m

	final int n = Xtrans0[0].length;
	final int m = Xtrans0.length;
	final int r = Wtrans0.length;

	System.out.println("Gradient Test GPU: ");

	final Matrix X = Matrix.build(a.transpose(Xtrans0));
	float[] flattenedWinit = Matrix.build(a.transpose(Wtrans0)).toArray();
	float[] flattenedHinit = Matrix.build(a.transpose(Htrans0)).toArray();

	DifferentiableFunction obj = new DifferentiableFunction() {
		public Pair<Double, double[]> calculate(double[] xDouble) {
			float[] x = a.toFloat(xDouble);
			Matrix W = Matrix.build(n, r, Arrays.copyOfRange(x, 0, n * r));
			Matrix H = Matrix.build(r, m, Arrays.copyOfRange(x, n * r, n * r + r * m));
			// Matrix Htrans = H.transpose();
			// Matrix Wtrans = W.transpose();
			Matrix WH = W.mmul(H);
			// Matrix logX = X.log();
			// Matrix logWH = WH.log();
			// float c = 1.0f;
			// Matrix logXsubLogWHdivWH = logX.sub(logWH).add(Matrix.ones(n,
			// m).mul(c)).div(WH);

			float beta = 0.5f;
			Matrix WHbetaSub1 = WH.pow(beta - 1.0f);
			Matrix WHbetaSub2 = WH.pow(beta - 2.0f);
			Matrix gH = W.transpose().mmul(WHbetaSub1.sub(X.mul(WHbetaSub2)));
			Matrix gW = (WHbetaSub1.sub(X.mul(WHbetaSub2))).mmul(H.transpose());
			double val = (1.0f / (beta * (beta - 1.0f))) * (X.pow(beta).norm1()
					+ (beta - 1.0f) * WH.pow(beta).norm1() - beta * (X.mul(WH.pow(beta - 1.0f)).norm1()));

			// Matrix gW = logXsubLogWHdivWH.mmul(Htrans).mul(-2.0f);
			// Matrix gH = Wtrans.mmul(logXsubLogWHdivWH).mul(-2.0f);
			// double val = Math.pow(logX.sub(logWH).add(Matrix.ones(n,
			// m).mul(c)).norm2(), 2.0);

			// Matrix gW = (X.mmul(Htrans)).comb(-2.0f, 2.0f,
			// (WH).mmul(Htrans));
			// Matrix gH = (Wtrans.mmul(X)).comb(-2.0f, 2.0f,
			// Wtrans.mmul(WH));
			// double val = Math.pow(X.distance2(WH), 2.0);
			Pair<Double, double[]> result = Pair.makePair(val, a.toDouble(a.append(gW.toArray(), gH.toArray())));
			CublasUtil.freeAllBut(X);
			return result;
		}
	};

	EmpiricalGradientTester.test(obj, a.toDouble(a.append(flattenedWinit, flattenedHinit)), 1e-2, 1.0, 1e-7);
}
 
开发者ID:tberg12,项目名称:klavier,代码行数:64,代码来源:NMFUtil.java

示例3: gradientTester

import tberg.murphy.opt.DifferentiableFunction; //导入依赖的package包/类
public static void gradientTester(float[][] Wtrans0, float[][] Htrans0, float[][] Xtrans0, float silenceEps) {
	int[] loud = null;
	if (silenceEps > 0.0) {
		Pair<float[][], int[]> filterSilence = filterSilence(Xtrans0, silenceEps);
		Xtrans0 = filterSilence.getFirst();
		loud = filterSilence.getSecond();
		Htrans0 = filterSilence(Htrans0, loud);
	}

	// W : n x r
	// H : r x m
	// X : n x m

	final int n = Xtrans0[0].length;
	final int m = Xtrans0.length;
	final int r = Wtrans0.length;

	System.out.println("Gradient Test GPU: ");

	final Matrix X = Matrix.build(a.transpose(Xtrans0));
	float[] flattenedWinit = Matrix.build(a.transpose(Wtrans0)).toArray();
	float[] flattenedHinit = Matrix.build(a.transpose(Htrans0)).toArray();

	DifferentiableFunction obj = new DifferentiableFunction() {
		public Pair<Double, double[]> calculate(double[] xDouble) {
			float[] x = a.toFloat(xDouble);
			Matrix W = Matrix.build(n, r, Arrays.copyOfRange(x, 0, n * r));
			Matrix H = Matrix.build(r, m, Arrays.copyOfRange(x, n * r, n * r + r * m));
			// Matrix Htrans = H.transpose();
			// Matrix Wtrans = W.transpose();
			Matrix WH = W.mmul(H);
			// Matrix logX = X.log();
			// Matrix logWH = WH.log();
			// float c = 1.0f;
			// Matrix logXsubLogWHdivWH = logX.sub(logWH).add(Matrix.ones(n,
			// m).mul(c)).div(WH);

			float beta = 0.5f;
			Matrix WHbetaSub1 = WH.pow(beta - 1.0f);
			Matrix WHbetaSub2 = WH.pow(beta - 2.0f);
			Matrix gH = W.transpose().mmul(WHbetaSub1.sub(X.mul(WHbetaSub2)));
			Matrix gW = (WHbetaSub1.sub(X.mul(WHbetaSub2))).mmul(H.transpose());
			double val = (1.0f / (beta * (beta - 1.0f))) * (X.pow(beta).norm1()
					+ (beta - 1.0f) * WH.pow(beta).norm1() - beta * (X.mul(WH.pow(beta - 1.0f)).norm1()));

			// Matrix gW = logXsubLogWHdivWH.mmul(Htrans).mul(-2.0f);
			// Matrix gH = Wtrans.mmul(logXsubLogWHdivWH).mul(-2.0f);
			// double val = Math.pow(logX.sub(logWH).add(Matrix.ones(n,
			// m).mul(c)).norm2(), 2.0);

			// Matrix gW = (X.mmul(Htrans)).comb(-2.0f, 2.0f,
			// (WH).mmul(Htrans));
			// Matrix gH = (Wtrans.mmul(X)).comb(-2.0f, 2.0f,
			// Wtrans.mmul(WH));
			// double val = Math.pow(X.distance2(WH), 2.0);
			Pair<Double, double[]> result = Pair.makePair(val, a.toDouble(a.append(gW.toArray(), gH.toArray())));
			JOCLBlasUtil.freeAllBut(X);
			return result;
		}
	};

	EmpiricalGradientTester.test(obj, a.toDouble(a.append(flattenedWinit, flattenedHinit)), 1e-2, 1.0, 1e-7);
}
 
开发者ID:tberg12,项目名称:klavier,代码行数:64,代码来源:NMFUtilOpenCL.java


注:本文中的tberg.murphy.opt.DifferentiableFunction类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。