本文整理汇总了Java中org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo.getBoost方法的典型用法代码示例。如果您正苦于以下问题:Java WeightedPhraseInfo.getBoost方法的具体用法?Java WeightedPhraseInfo.getBoost怎么用?Java WeightedPhraseInfo.getBoost使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo
的用法示例。
在下文中一共展示了WeightedPhraseInfo.getBoost方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
float totalBoost = 0;
List<SubInfo> subInfos = new ArrayList<>();
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum(), phraseInfo.getBoost() ) );
totalBoost += phraseInfo.getBoost();
}
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
}
示例2: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
List<SubInfo> tempSubInfos = new ArrayList<>();
List<SubInfo> realSubInfos = new ArrayList<>();
HashSet<String> distinctTerms = new HashSet<>();
int length = 0;
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
float phraseTotalBoost = 0;
for ( TermInfo ti : phraseInfo.getTermsInfos()) {
if ( distinctTerms.add( ti.getText() ) )
phraseTotalBoost += ti.getWeight() * phraseInfo.getBoost();
length++;
}
tempSubInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(),
phraseInfo.getSeqnum(), phraseTotalBoost ) );
}
// We want that terms per fragment (length) is included into the weight. Otherwise a one-word-query
// would cause an equal weight for all fragments regardless of how much words they contain.
// To avoid that fragments containing a high number of words possibly "outrank" more relevant fragments
// we "bend" the length with a standard-normalization a little bit.
float norm = length * ( 1 / (float)Math.sqrt( length ) );
float totalBoost = 0;
for ( SubInfo tempSubInfo : tempSubInfos ) {
float subInfoBoost = tempSubInfo.getBoost() * norm;
realSubInfos.add( new SubInfo( tempSubInfo.getText(), tempSubInfo.getTermsOffsets(),
tempSubInfo.getSeqnum(), subInfoBoost ));
totalBoost += subInfoBoost;
}
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, realSubInfos, totalBoost ) );
}
示例3: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
float totalBoost = 0;
List<SubInfo> subInfos = new ArrayList<SubInfo>();
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum() ) );
totalBoost += phraseInfo.getBoost();
}
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
}
示例4: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
float totalBoost = 0;
List<SubInfo> subInfos = new ArrayList<SubInfo>();
HashSet<String> distinctTerms = new HashSet<String>();
int length = 0;
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum() ) );
for ( TermInfo ti : phraseInfo.getTermsInfos()) {
if ( distinctTerms.add( ti.getText() ) )
totalBoost += ti.getWeight() * phraseInfo.getBoost();
length++;
}
}
// We want that terms per fragment (length) is included into the weight. Otherwise a one-word-query
// would cause an equal weight for all fragments regardless of how much words they contain.
// To avoid that fragments containing a high number of words possibly "outrank" more relevant fragments
// we "bend" the length with a standard-normalization a little bit.
totalBoost *= length * ( 1 / Math.sqrt( length ) );
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
}
示例5: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
float totalBoost = 0;
List<SubInfo> subInfos = new ArrayList<SubInfo>();
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
subInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(), phraseInfo.getSeqnum(), phraseInfo.getBoost() ) );
totalBoost += phraseInfo.getBoost();
}
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, subInfos, totalBoost ) );
}
示例6: add
import org.apache.lucene.search.vectorhighlight.FieldPhraseList.WeightedPhraseInfo; //导入方法依赖的package包/类
@Override
public void add( int startOffset, int endOffset, List<WeightedPhraseInfo> phraseInfoList ) {
List<SubInfo> tempSubInfos = new ArrayList<SubInfo>();
List<SubInfo> realSubInfos = new ArrayList<SubInfo>();
HashSet<String> distinctTerms = new HashSet<String>();
int length = 0;
for( WeightedPhraseInfo phraseInfo : phraseInfoList ){
float phraseTotalBoost = 0;
for ( TermInfo ti : phraseInfo.getTermsInfos()) {
if ( distinctTerms.add( ti.getText() ) )
phraseTotalBoost += ti.getWeight() * phraseInfo.getBoost();
length++;
}
tempSubInfos.add( new SubInfo( phraseInfo.getText(), phraseInfo.getTermsOffsets(),
phraseInfo.getSeqnum(), phraseTotalBoost ) );
}
// We want that terms per fragment (length) is included into the weight. Otherwise a one-word-query
// would cause an equal weight for all fragments regardless of how much words they contain.
// To avoid that fragments containing a high number of words possibly "outrank" more relevant fragments
// we "bend" the length with a standard-normalization a little bit.
float norm = length * ( 1 / (float)Math.sqrt( length ) );
float totalBoost = 0;
for ( SubInfo tempSubInfo : tempSubInfos ) {
float subInfoBoost = tempSubInfo.getBoost() * norm;
realSubInfos.add( new SubInfo( tempSubInfo.getText(), tempSubInfo.getTermsOffsets(),
tempSubInfo.getSeqnum(), subInfoBoost ));
totalBoost += subInfoBoost;
}
getFragInfos().add( new WeightedFragInfo( startOffset, endOffset, realSubInfos, totalBoost ) );
}