本文整理汇总了Java中cc.mallet.optimize.LimitedMemoryBFGS类的典型用法代码示例。如果您正苦于以下问题:Java LimitedMemoryBFGS类的具体用法?Java LimitedMemoryBFGS怎么用?Java LimitedMemoryBFGS使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
LimitedMemoryBFGS类属于cc.mallet.optimize包,在下文中一共展示了LimitedMemoryBFGS类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
RTMFunction optimizable=new RTMFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
}
示例2: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
LexWSBBSLDAFunction optimizable=new LexWSBBSLDAFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
for (int vocab=0; vocab<param.numVocab; vocab++)
{
tau[vocab]=optimizable.getParameter(vocab+param.numTopics);
}
}
示例3: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
LexWSBMedLDAFunction optimizable=new LexWSBMedLDAFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
for (int vocab=0; vocab<param.numVocab; vocab++)
{
tau[vocab]=optimizable.getParameter(vocab+param.numTopics);
}
}
示例4: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
BSLDAFunction optimizable=new BSLDAFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
}
示例5: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
SLDAFunction optimizable=new SLDAFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
}
示例6: train
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
public MCMaxEnt train (InstanceList trainingSet)
{
logger.fine ("trainingSet.size() = "+trainingSet.size());
mt = new MaximizableTrainer (trainingSet, (MCMaxEnt)initialClassifier);
Optimizer maximizer = new LimitedMemoryBFGS(mt);
// CPAL - change the tolerance for large vocab experiments
((LimitedMemoryBFGS)maximizer).setTolerance(.00001); // std is .0001;
maximizer.optimize (); // XXX given the loop below, this seems wrong.
logger.info("MCMaxEnt ngetValueCalls:"+getValueCalls()+"\nMCMaxEnt ngetValueGradientCalls:"+getValueGradientCalls());
// boolean converged;
//
// for (int i = 0; i < numIterations; i++) {
// converged = maximizer.maximize (mt, 1);
// if (converged)
// break;
// else if (evaluator != null)
// if (!evaluator.evaluate (mt.getClassifier(), converged, i, mt.getValue(),
// trainingSet, validationSet, testSet))
// break;
// }
// TestMaximizable.testValueAndGradient (mt);
progressLogger.info("\n"); // progess messages are on one line; move on.
return mt.getClassifier ();
}
示例7: train
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
public void train(SparseVector[] designMatrix, double[] responses, int K) {
if (verbose) {
System.out.println("Training ...");
System.out.println("--- # instances: " + designMatrix.length + ". " + responses.length);
System.out.println("--- # features: " + designMatrix[0].getDimension() + ". " + K);
}
RidgeLinearRegressionOptimizable optimizable = new RidgeLinearRegressionOptimizable(
responses, new double[K], designMatrix, rho, mu, sigma);
LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(optimizable);
try {
optimizer.optimize();
} catch (Exception ex) {
ex.printStackTrace();
}
this.weights = new double[K];
for (int kk = 0; kk < K; kk++) {
this.weights[kk] = optimizable.getParameter(kk);
}
}
示例8: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
LexWSBMedRTMFunction optimizable=new LexWSBMedRTMFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
for (int vocab=0; vocab<param.numVocab; vocab++)
{
tau[vocab]=optimizable.getParameter(vocab+param.numTopics);
}
if (param.blockFeat && wsbm!=null)
{
for (int b1=0; b1<param.numBlocks; b1++)
{
for (int b2=0; b2<param.numBlocks; b2++)
{
int pos=b1*param.numBlocks+b2+param.numTopics+param.numVocab;
rho[b1][b2]=optimizable.getParameter(pos);
}
}
}
}
示例9: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
protected void optimize()
{
LexWSBRTMFunction optimizable=new LexWSBRTMFunction(this);
LimitedMemoryBFGS lbfgs=new LimitedMemoryBFGS(optimizable);
try
{
lbfgs.optimize();
}
catch (Exception e)
{
return;
}
for (int topic=0; topic<param.numTopics; topic++)
{
eta[topic]=optimizable.getParameter(topic);
}
for (int vocab=0; vocab<param.numVocab; vocab++)
{
tau[vocab]=optimizable.getParameter(vocab+param.numTopics);
}
if (param.blockFeat && wsbm!=null)
{
for (int b1=0; b1<param.numBlocks; b1++)
{
for (int b2=0; b2<param.numBlocks; b2++)
{
int pos=b1*param.numBlocks+b2+param.numTopics+param.numVocab;
rho[b1][b2]=optimizable.getParameter(pos);
}
}
}
}
示例10: updateVs
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
private long updateVs() {
long sTime = System.currentTimeMillis();
// debug
printLabelLogLikelihood();
Objective obj = new Objective(v);
LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(obj);
boolean converged = false;
try {
converged = optimizer.optimize();
} catch (Exception ex) {
ex.printStackTrace();
}
if (verbose) {
logln("--- converged: " + converged);
}
int count = 0;
for (int l = 0; l < L; l++) {
for (int k = 0; k < K; k++) {
v[l][k] = obj.getParameter(count++);
}
}
updateScores();
// debug
printLabelLogLikelihood();
return System.currentTimeMillis() - sTime;
}
示例11: optimizeTopicRegressionParametersLBFGS
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
private void optimizeTopicRegressionParametersLBFGS() {
designMatrix = new double[D][K];
for (int d = 0; d < D; d++) {
designMatrix[d] = docTopics[d].getEmpiricalDistribution();
}
GaussianIndLinearRegObjective optimizable = new GaussianIndLinearRegObjective(
regParams, designMatrix, responses,
hyperparams.get(RHO),
hyperparams.get(MU),
hyperparams.get(SIGMA));
LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(optimizable);
boolean converged = false;
try {
converged = optimizer.optimize();
} catch (Exception ex) {
// This exception may be thrown if L-BFGS
// cannot step in the current direction.
// This condition does not necessarily mean that
// the optimizer has failed, but it doesn't want
// to claim to have succeeded...
// do nothing
ex.printStackTrace();
}
if (verbose) {
logln("--- converged? " + converged);
}
// update regression parameters
for (int kk = 0; kk < K; kk++) {
regParams[kk] = optimizable.getParameter(kk);
}
// update current predictions
updatePredictionValues();
}
示例12: createMaxer
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
private Optimizer createMaxer (Optimizable.ByGradientValue macrf)
{
if (maxer == null) {
return new LimitedMemoryBFGS (macrf);
} else return maxer;
}
示例13: main
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
public static void main(String[] args) {
Random rand = new Random(11235813);
double sigma = 1.0;
double rho = 100;
double mean = 0.0;
int D = 10000;
int V = 5;
double[] trueParams = new double[V];
for (int v = 0; v < V; v++) {
trueParams[v] = SamplerUtils.getGaussian(mean, sigma);
}
double[][] designMatrix = new double[D][V];
for (int d = 0; d < D; d++) {
for (int v = 0; v < V; v++) {
double w = rand.nextFloat();
designMatrix[d][v] = w;
}
}
// generate response
double[] responseVector = new double[D];
for (int d = 0; d < D; d++) {
for (int v = 0; v < V; v++) {
responseVector[d] += designMatrix[d][v] * trueParams[v];
}
}
for (int d = 0; d < D; d++) {
responseVector[d] = SamplerUtils.getGaussian(responseVector[d], rho);
}
double[] initParams = new double[V];
for (int v = 0; v < V; v++) {
initParams[v] = SamplerUtils.getGaussian(mean, sigma);
}
System.out.println("I params: " + MiscUtils.arrayToString(initParams));
GaussianIndLinearRegObjective obj =
new GaussianIndLinearRegObjective(initParams,
designMatrix, responseVector, rho, mean, sigma);
LimitedMemoryBFGS opt = new LimitedMemoryBFGS(obj);
boolean converged = false;
try {
converged = opt.optimize();
} catch (Exception e) {
e.printStackTrace();
}
System.out.println("converged: " + converged);
System.out.println("T params: " + MiscUtils.arrayToString(trueParams));
double[] ps = new double[V];
for (int v = 0; v < V; v++) {
ps[v] = obj.getParameter(v);
}
System.out.println("L params: " + MiscUtils.arrayToString(ps));
}
示例14: optimizeTopicRegressionParametersLBFGS
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
/**
* Optimize topic regression parameters using L-BFGS.
*/
private void optimizeTopicRegressionParametersLBFGS() {
if (verbose) {
logln("--- Optimizing topic regression parameters ...");
}
ArrayList<SNode> flattenTree = flattenTreeWithoutRoot();
int numNodes = flattenTree.size();
double[] curParams = new double[numNodes];
for (int ii = 0; ii < curParams.length; ii++) {
curParams[ii] = flattenTree.get(ii).getRegressionParameter();
}
double[] nodeSigmas = new double[numNodes];
double[] nodeMeans = new double[numNodes];
HashMap<SNode, Integer> nodeIndices = new HashMap<SNode, Integer>();
for (int i = 0; i < flattenTree.size(); i++) {
SNode node = flattenTree.get(i);
nodeIndices.put(node, i);
nodeSigmas[i] = sigmas[node.getLevel()];
nodeMeans[i] = mus[node.getLevel()];
}
// design matrix
double[][] designMatrix = new double[D][numNodes];
for (int d = 0; d < D; d++) {
for (int s = 0; s < words[d].length; s++) {
SNode[] path = getPathFromNode(c[d][s].getContent());
for (int l = 1; l < L; l++) {
int nodeIdx = nodeIndices.get(path[l]);
int count = sentLevelCounts[d][s][l];
designMatrix[d][nodeIdx] += count;
}
}
for (int i = 0; i < numNodes; i++) {
designMatrix[d][i] /= docTokenCounts[d];
}
}
// adjusted response vector
double[] responseVector = new double[D];
for (int d = 0; d < D; d++) {
responseVector[d] = responses[d] - docLexicalWeights[d] / docTokenCounts[d];
}
// optimize using L-BFGS
GaussianIndLinearRegObjective optimizable = new GaussianIndLinearRegObjective(
curParams, designMatrix, responseVector,
hyperparams.get(RHO),
nodeMeans,
nodeSigmas);
LimitedMemoryBFGS optimizer = new LimitedMemoryBFGS(optimizable);
boolean converged = false;
try {
converged = optimizer.optimize();
} catch (Exception ex) {
// This exception may be thrown if L-BFGS
// cannot step in the current direction.
// This condition does not necessarily mean that
// the optimizer has failed, but it doesn't want
// to claim to have succeeded...
// do nothing
ex.printStackTrace();
}
if (verbose) {
logln("--- converged? " + converged);
}
// update regression parameters
for (int i = 0; i < numNodes; i++) {
flattenTree.get(i).setRegressionParameter(optimizable.getParameter(i));
}
this.updateDocumentTopicWeights();
}
示例15: optimize
import cc.mallet.optimize.LimitedMemoryBFGS; //导入依赖的package包/类
private void optimize(SNode root) {
ArrayList<SNode> flattenSubtree = flattenTree(root);
int numNodes = flattenSubtree.size();
double[] regParams = new double[numNodes];
double[] priorMeans = new double[numNodes];
double[] priorStdvs = new double[numNodes];
for (int i = 0; i < numNodes; i++) {
SNode node = flattenSubtree.get(i);
regParams[i] = node.getRegressionParameter();
priorMeans[i] = mus[node.getLevel()];
priorStdvs[i] = sigmas[node.getLevel()];
}
double[] adjustedResponses = new double[D];
double[][] designMatrix = new double[D][numNodes];
for (int d = 0; d < D; d++) {
adjustedResponses[d] = responses[d];
for (int s = 0; s < words[d].length; s++) {
SNode[] path = getPathFromNode(c[d][s].getContent());
for (int l = 0; l < L; l++) {
int count = sentLevelCounts[d][s].getCount(l);
int nodeIdx = flattenSubtree.indexOf(path[l]);
if (nodeIdx == -1) {
adjustedResponses[d] -= path[l].getRegressionParameter() * count / docTokenCounts[d];
} else {
designMatrix[d][nodeIdx] += count;
}
}
}
}
for (int d = 0; d < D; d++) {
for (int i = 0; i < numNodes; i++) {
designMatrix[d][i] /= docTokenCounts[d];
}
}
this.optimizable = new GaussianIndLinearRegObjective(
regParams, designMatrix, adjustedResponses,
hyperparams.get(RHO),
priorMeans, priorStdvs);
this.optimizer = new LimitedMemoryBFGS(optimizable);
boolean converged = false;
try {
converged = optimizer.optimize();
} catch (Exception ex) {
// This exception may be thrown if L-BFGS
// cannot step in the current direction.
// This condition does not necessarily mean that
// the optimizer has failed, but it doesn't want
// to claim to have succeeded...
// do nothing
}
// if the number of observations is less than or equal to the number of parameters
if (converged) {
numConverged++;
}
// update regression parameters
for (int i = 0; i < flattenSubtree.size(); i++) {
flattenSubtree.get(i).setRegressionParameter(optimizable.getParameter(i));
}
}