本文整理汇总了Java中cc.mallet.fst.Transducer.Incrementor方法的典型用法代码示例。如果您正苦于以下问题:Java Transducer.Incrementor方法的具体用法?Java Transducer.Incrementor怎么用?Java Transducer.Incrementor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cc.mallet.fst.Transducer
的用法示例。
在下文中一共展示了Transducer.Incrementor方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: EntropyLattice
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
/**
* Runs constrained forward-backward. <p>
*
* If <tt>incrementor</tt> is null then do not update expectations due to
* these computations. <p>
*
* The contribution of entropy to the expectations is multiplies by the
* scaling factor.
*/
public EntropyLattice(FeatureVectorSequence fvs, double[][] gammas,
double[][][] xis, Transducer transducer,
Transducer.Incrementor incrementor,
double scalingFactor) {
inputLength = fvs.size();
latticeLength = inputLength + 1;
this.transducer = transducer;
numStates = transducer.numStates();
nodes = new LatticeNode[latticeLength][numStates];
// run forward-backward and compute the entropy
entropy = this.forwardLattice(gammas, xis);
double backwardEntropy = this.backwardLattice(gammas, xis);
assert(Maths.almostEquals(entropy, backwardEntropy)) : entropy + " " + backwardEntropy;
if (incrementor != null) {
// add the entropy to expectations
this.updateCounts(fvs, gammas, xis, scalingFactor, incrementor);
}
}
示例2: gatherConstraints
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
protected void gatherConstraints() {
// Set the constraints by running forward-backward with the *output
// label sequence provided*, thus restricting it to only those
// paths that agree with the label sequence.
// Zero the constraints[]
// Reset constraints[] to zero before we fill them again
assert (constraints.structureMatches(crf.parameters));
constraints.zero();
for (Instance instance : sourceInstances) {
FeatureVectorSequence input = (FeatureVectorSequence) instance
.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
double instanceWeight = sourceInstances.getInstanceWeight(instance);
// System.out.println
// ("Constraint-gathering on instance "+i+" of "+ilist.size());
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? constraints.new Incrementor()
: constraints.new WeightedIncrementor(instanceWeight);
new SumLatticeDefault(this.crf, input, output, incrementor);
}
// System.out.println ("testing Value and Gradient");
// TestOptimizable.testValueAndGradientCurrentParameters (this);
}
示例3: getSourceExpectations
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
protected void getSourceExpectations() {
assert (sourceExpectations.structureMatches(crf.parameters));
sourceExpectations.zero();
for (Instance instance : sourceInstances) {
FeatureVectorSequence input = (FeatureVectorSequence) instance
.getData();
FeatureSequence output = (FeatureSequence) instance.getTarget();
double instanceWeight = sourceInstances.getInstanceWeight(instance);
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? sourceExpectations.new Incrementor()
: sourceExpectations.new WeightedIncrementor(instanceWeight);
new SumLatticeDefault(this.crf, input, output, incrementor);
}
double factor = 1.0 / sourceInstances.size();
for (int i = 0; i < sourceExpectations.weights.length; i++) {
log(sourceExpectations.weights[i], factor);
}
}
示例4: getTargetExpectations
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
protected void getTargetExpectations() {
// Reset expectations to zero before we fill them again
assert (targetExpectations.structureMatches(crf.parameters));
targetExpectations.zero();
// Calculate the value of each instance, and also fill in expectations
for (Instance instance : targetInstances) {
FeatureVectorSequence input = (FeatureVectorSequence) instance
.getData();
double instanceWeight = targetInstances.getInstanceWeight(instance);
Transducer.Incrementor incrementor = instanceWeight == 1.0 ? targetExpectations.new Incrementor()
: targetExpectations.new WeightedIncrementor(instanceWeight);
new SumLatticeDefault(this.crf, input, null, incrementor);
}
double factor = 1.0 / targetInstances.size();
for (int i = 0; i < targetExpectations.weights.length; i++) {
log(targetExpectations.weights[i], factor);
}
}
示例5: updateCounts
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
/**
* Updates the expectations due to the entropy. <p>
*/
private void updateCounts(FeatureVectorSequence fvs, double[][] gammas,
double[][][] xis, double scalingFactor, Transducer.Incrementor incrementor) {
for (int ip = 0; ip < inputLength; ++ip) {
for (int a = 0 ; a < numStates; ++a) {
if (nodes[ip][a] == null) {
continue;
}
Transducer.State sourceState = transducer.getState(a);
Transducer.TransitionIterator iter = sourceState.transitionIterator(fvs, ip, null, ip);
while (iter.hasNext()) {
int b = iter.next().getIndex();
double xi = xis[ip][a][b];
if (xi == Transducer.IMPOSSIBLE_WEIGHT) {
continue;
}
double xiProb = Math.exp(xi);
// This is obtained after substituting and re-arranging the equation
// at the end of the third page of the paper into the equation of
// d/d_theta -H(Y|x) at the end of the second page.
// \sum_(y_i,y_{i+1})
// f_k(y_i,y_{i+1},x) p(y_i, y_{i+1}) *
// (log p(y_i,y_{i+1}) + H^a(Y_{1..(i-1)},y_i) +
// H^b(Y_{(i+2)..T}|y_{i+1}))
double constrEntropy = xiProb * (xi + nodes[ip][a].alpha + nodes[ip+1][b].beta);
assert(constrEntropy <= 0) : "Negative entropy should be negative! " + constrEntropy;
// full covariance, (note: it could be positive *or* negative)
double covContribution = constrEntropy - xiProb * entropy;
assert(!Double.isNaN(covContribution))
: "xi: " + xi + ", nodes[" + ip + "][" + a + "].alpha: " +
nodes[ip][a].alpha + ", nodes[" + (ip+1) + "][" + b +
"].beta: " + nodes[ip+1][b].beta;
incrementor.incrementTransition(iter, covContribution * scalingFactor);
}
}
}
}
示例6: SumLatticeKL
import cc.mallet.fst.Transducer; //导入方法依赖的package包/类
public SumLatticeKL(Transducer trans, Sequence input,
double[] initProbs, double[] finalProbs, double[][][] xis,
double[][][] cachedDots,
Transducer.Incrementor incrementor) {
assert (xis != null) : "Need transition probabilities";
// Initialize some structures
this.t = trans;
this.input = input;
latticeLength = input.size() + 1;
int numStates = t.numStates();
this.xis = xis;
totalWeight = 0;
// increment initial states
for (int i = 0; i < numStates; i++) {
if (t.getState(i).getInitialWeight() == Transducer.IMPOSSIBLE_WEIGHT)
continue;
if (initProbs != null) {
totalWeight += initProbs[i] * t.getState(i).getInitialWeight();
if (incrementor != null)
incrementor.incrementInitialState(t.getState(i),
initProbs[i]);
}
}
for (int ip = 0; ip < latticeLength - 1; ip++)
for (int i = 0; i < numStates; i++) {
State s = t.getState(i);
TransitionIterator iter = s.transitionIterator(input, ip);
while (iter.hasNext()) {
State destination = iter.next();
double weight = iter.getWeight();
double p = xis[ip][i][destination.getIndex()];
totalWeight += p * weight;
if (cachedDots != null) {
cachedDots[ip][i][destination.getIndex()] = weight;
}
if (incrementor != null) {
// this is used to gather "constraints",
// so only probabilities under q are used
incrementor.incrementTransition(iter, p);
}
}
}
for (int i = 0; i < numStates; i++) {
if (t.getState(i).getFinalWeight() == Transducer.IMPOSSIBLE_WEIGHT)
continue;
if (finalProbs != null) {
totalWeight += finalProbs[i] * t.getState(i).getFinalWeight();
if (incrementor != null)
incrementor.incrementFinalState(t.getState(i),
finalProbs[i]);
}
}
assert (totalWeight > Transducer.IMPOSSIBLE_WEIGHT) : "Total weight="
+ totalWeight;
}