本文整理汇总了Java中edu.stanford.nlp.util.Function.apply方法的典型用法代码示例。如果您正苦于以下问题:Java Function.apply方法的具体用法?Java Function.apply怎么用?Java Function.apply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类edu.stanford.nlp.util.Function
的用法示例。
在下文中一共展示了Function.apply方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* This method just tests the functionality of the included transformers.
*/
public static void main(String[] args) {
//TreeFactory tf = new LabeledScoredTreeFactory();
Tree stringyTree = null;
try {
stringyTree = (new PennTreeReader(new StringReader("(S (VP (VBZ Try) (NP (DT this))) (. .))"), new LabeledScoredTreeFactory(new StringLabelFactory()))).readTree();
} catch (IOException e) {
}
System.out.println(stringyTree);
Function<Tree, Tree> a = getLabeledTreeToCategoryWordTagTreeFunction();
Tree adaptyTree = a.apply(stringyTree);
System.out.println(adaptyTree);
adaptyTree.percolateHeads(new CollinsHeadFinder());
System.out.println(adaptyTree);
Function<Tree, Tree> b = getLabeledTreeToStringLabeledTreeFunction();
Tree stringLabelTree = b.apply(adaptyTree);
System.out.println(stringLabelTree);
}
示例2: main
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* This method just tests the functionality of the included transformers.
*/
public static void main(String[] args) {
//TreeFactory tf = new LabeledScoredTreeFactory();
Tree stringyTree = null;
try {
stringyTree = (new PennTreeReader(new StringReader("(S (VP (VBZ Try) (NP (DT this))) (. .))"), new LabeledScoredTreeFactory(new StringLabelFactory()))).readTree();
} catch (IOException e) {
// do nothing
}
System.out.println(stringyTree);
Function<Tree, Tree> a = getLabeledTreeToCategoryWordTagTreeFunction();
Tree adaptyTree = a.apply(stringyTree);
System.out.println(adaptyTree);
adaptyTree.percolateHeads(new CollinsHeadFinder());
System.out.println(adaptyTree);
Function<Tree, Tree> b = getLabeledTreeToStringLabeledTreeFunction();
Tree stringLabelTree = b.apply(adaptyTree);
System.out.println(stringLabelTree);
}
示例3: getSingleAnnotationExtractor
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
private static SingleAnnotationExtractor getSingleAnnotationExtractor(final Function<CoreMap, Temporal> temporalFunc)
{
SingleAnnotationExtractor extractFunc = new SingleAnnotationExtractor();
extractFunc.valueExtractor = new Function<CoreMap, Value>() {
public Value apply(CoreMap in) {
Temporal t = temporalFunc.apply(in);
return new Expressions.PrimitiveValue("Temporal", t);
}
};
extractFunc.tokensAnnotationField = CoreAnnotations.NumerizedTokensAnnotation.class;
extractFunc.resultAnnotationField = Collections.singletonList((Class) TimeExpression.Annotation.class);
extractFunc.resultNestedAnnotationField = TimeExpression.ChildrenAnnotation.class;
extractFunc.resultAnnotationExtractor = TimeExpressionConverter;
extractFunc.tokensAggregators = CoreMapAttributeAggregator.DEFAULT_NUMERIC_TOKENS_AGGREGATORS;
return extractFunc;
}
示例4: tallyTreeIterator
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
protected void tallyTreeIterator(Iterator<Tree> treeIterator, Function<Tree, Tree> f) {
while (treeIterator.hasNext()) {
Tree tree = treeIterator.next();
try {
tree = f.apply(tree);
} catch (Exception e) {
if (Test.verbose) {
e.printStackTrace();
}
}
tallyTree(tree);
}
}
示例5: lexicalize
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* Returns a lexicalized Tree whose Labels are CategoryWordTag
* instances, all corresponds to the input tree.
*/
public static Tree lexicalize(Tree t, HeadFinder hf) {
Function<Tree,Tree> a = TreeFunctions.getLabeledTreeToCategoryWordTagTreeFunction();
Tree t1 = a.apply(t);
t1.percolateHeads(hf);
return t1;
}
示例6: tallyTreeIterator
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
protected void tallyTreeIterator(Iterator<Tree> treeIterator,
Function<Tree, Tree> f, double weight) {
while (treeIterator.hasNext()) {
Tree tree = treeIterator.next();
try {
tree = f.apply(tree);
} catch (Exception e) {
if (op.testOptions.verbose) {
e.printStackTrace();
}
}
tallyTree(tree, weight);
}
}
示例7: lexicalize
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* Returns a lexicalized Tree whose Labels are CategoryWordTag
* instances, all corresponds to the input tree.
*/
public static Tree lexicalize(Tree t, HeadFinder hf) {
Function<Tree,Tree> a =
TreeFunctions.getLabeledTreeToCategoryWordTagTreeFunction();
Tree t1 = a.apply(t);
t1.percolateHeads(hf);
return t1;
}
示例8: updateDiagMinErr
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
private void updateDiagMinErr(double[] diag,double[] s,double[] y){
double low = 0.0;
double high = 0.0;
for(int i=0;i<s.length;i++){
double tmp = s[i] * (y[i] - diag[i]);
high += tmp*tmp;
}
say("M");
double alpha = Math.sqrt((ArrayMath.norm(y)/ArrayMath.norm(s))) *Math.sqrt(( 50.0/ (50.0 + k) ));
alpha = alpha*Math.sqrt(ArrayMath.average(diag));
say(" alpha " + nf.format(alpha));
high = Math.sqrt(high)/(2*alpha);
Function<Double,Double> func = new lagrange(s,y,diag,alpha);
double lamStar;
if( func.apply(low) > 0 ){
lamStar = getRoot(func,low,high);
} else{
lamStar = 0.0;
say(" * ");
}
for(int i=0;i<s.length;i++){
diag[i] = ( Math.abs(y[i]*s[i]) + 2*lamStar*diag[i])/(s[i]*s[i] + 1e-8 + 2*lamStar);
//diag[i] = (y[i]*s[i] + 2*lamStar*diag[i])/(s[i]*s[i] + 2*lamStar);
if (diag[i] <= 1.0/aMax) {
diag[i] = 1.0/gain;
}
}
}
示例9: getRoot
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
private double getRoot(Function<Double,Double> func, double lower, double upper){
double mid = 0.5*(lower + upper);
double TOL = 1e-8;
double skew = 0.4;
int count = 0;
if(func.apply(upper) > 0 || func.apply(lower) < 0){
say("LOWER AND UPPER SUPPLIED TO GET ROOT DO NOT BOUND THE ROOT.");
}
double fval = func.apply(mid);
while( Math.abs(fval) > TOL ){
count += 1;
if( fval > 0 ){
lower = mid;
} else if( fval < 0){
upper = mid;
}
mid = skew*lower + (1-skew)*upper;
fval = func.apply(mid);
if (count > 100){
break;
}
}
say( " " + nf.format(mid) + " f" + nf.format(fval) );
return mid;
}
示例10: discretizeCompute
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
public void discretizeCompute(Function<Double, Double> function, int numPoints, double low, double high) {
double inc = (high - low) / numPoints;
memory = Generics.newHashMap();
for (int i = 0; i < numPoints; i++) {
double x = low + i * inc;
double y = function.apply(x);
memory.put(x, y);
System.err.println("for point " + x + "\t" + y);
}
dumpMemory();
}
示例11: computeAverage
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* This computes the average over all folds of the function we're trying to optimize.
* The input triple contains, in order, the train set, the test set, and the saved state.
* You don't have to use the saved state if you don't want to.
*/
public double computeAverage (Function<Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState>,Double> function)
{
double sum = 0;
Iterator<Triple<GeneralDataset<L, F>,GeneralDataset<L, F>,SavedState>> foldIt = iterator();
while (foldIt.hasNext()) {
sum += function.apply(foldIt.next());
}
return sum / kFold;
}
示例12: getRoot
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
private double getRoot(Function<Double,Double> func, double lower, double upper){
double mid = 0.5*(lower + upper);
double fval = 0.0;
double TOL = 1e-8;
double skew = 0.4;
int count = 0;
if(func.apply(upper) > 0 || func.apply(lower) < 0){
say("LOWER AND UPPER SUPPLIED TO GET ROOT DO NOT BOUND THE ROOT.");
}
fval = func.apply(mid);
while( Math.abs(fval) > TOL ){
count += 1;
if( fval > 0 ){
lower = mid;
} else if( fval < 0){
upper = mid;
}
mid = skew*lower + (1-skew)*upper;
fval = func.apply(mid);
if (count > 100){
break;
}
}
say( " " + nf.format(mid) + " f" + nf.format(fval) );
return mid;
}
示例13: annotate
import edu.stanford.nlp.util.Function; //导入方法依赖的package包/类
/**
* Annotate a collection of input annotations IN PARALLEL, making use of threads given in numThreads
*
* @param annotations
* The input annotations to process
* @param numThreads
* The number of threads to run on
* @param callback
* A function to be called when an annotation finishes. The return value of the callback is ignored.
*/
public void annotate(final Iterable<Annotation> annotations, int numThreads, final Function<Annotation, Object> callback) {
// case: single thread (no point in spawning threads)
if (numThreads == 1) {
for (Annotation ann : annotations) {
annotate(ann);
callback.apply(ann);
}
}
// Java's equivalent to ".map{ lambda(annotation) => annotate(annotation) }
Iterable<Runnable> threads = new Iterable<Runnable>() {
@Override
public Iterator<Runnable> iterator() {
final Iterator<Annotation> iter = annotations.iterator();
return new Iterator<Runnable>() {
@Override
public boolean hasNext() {
return iter.hasNext();
}
@Override
public Runnable next() {
if (!iter.hasNext()) {
throw new NoSuchElementException();
}
final Annotation input = iter.next();
return new Runnable() {
@Override
public void run() {
// (logging)
String beginningOfDocument = input.toString().substring(0, Math.min(50, input.toString().length()));
Redwood.startTrack("Annotating \"" + beginningOfDocument + "...\"");
// (annotate)
annotate(input);
// (callback)
callback.apply(input);
// (logging again)
Redwood.endTrack("Annotating \"" + beginningOfDocument + "...\"");
}
};
}
@Override
public void remove() {
iter.remove();
}
};
}
};
// Thread
Redwood.Util.threadAndRun(this.getClass().getSimpleName(), threads, numThreads);
}