本文整理汇总了Java中org.apache.lucene.analysis.payloads.PayloadHelper.decodeFloat方法的典型用法代码示例。如果您正苦于以下问题:Java PayloadHelper.decodeFloat方法的具体用法?Java PayloadHelper.decodeFloat怎么用?Java PayloadHelper.decodeFloat使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.lucene.analysis.payloads.PayloadHelper
的用法示例。
在下文中一共展示了PayloadHelper.decodeFloat方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: payloadBoost
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
float payloadBoost() throws IOException {
if (doc != docID()) {
final int freq = postings.freq();
payloadBoost = 0;
for (int i = 0; i < freq; ++i) {
postings.nextPosition();
final BytesRef payload = postings.getPayload();
float boost;
if (payload == null) {
boost = 1;
} else if (payload.length == 1) {
boost = SmallFloat.byte315ToFloat(payload.bytes[payload.offset]);
} else if (payload.length == 4) {
// TODO: for bw compat only, remove this in 6.0
boost = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
} else {
throw new IllegalStateException("Payloads are expected to have a length of 1 or 4 but got: "
+ payload);
}
payloadBoost += boost;
}
payloadBoost /= freq;
doc = docID();
}
return payloadBoost;
}
示例2: testEncoder
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public void testEncoder() throws Exception {
Reader reader = new StringReader("the|0.1 quick|0.1 red|0.1");
TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
stream = tokenFilterFactory("DelimitedPayload", "encoder", "float").create(stream);
stream.reset();
while (stream.incrementToken()) {
PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
assertNotNull(payAttr);
byte[] payData = payAttr.getPayload().bytes;
assertNotNull(payData);
float payFloat = PayloadHelper.decodeFloat(payData);
assertEquals(0.1f, payFloat, 0.0f);
}
stream.end();
stream.close();
}
示例3: testDelim
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public void testDelim() throws Exception {
Reader reader = new StringReader("the*0.1 quick*0.1 red*0.1");
TokenStream stream = new MockTokenizer(reader, MockTokenizer.WHITESPACE, false);
stream = tokenFilterFactory("DelimitedPayload",
"encoder", "float",
"delimiter", "*").create(stream);
stream.reset();
while (stream.incrementToken()) {
PayloadAttribute payAttr = stream.getAttribute(PayloadAttribute.class);
assertNotNull(payAttr);
byte[] payData = payAttr.getPayload().bytes;
assertNotNull(payData);
float payFloat = PayloadHelper.decodeFloat(payData);
assertEquals(0.1f, payFloat, 0.0f);
}
stream.end();
stream.close();
}
示例4: testEncoder
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public void testEncoder() throws Exception {
Map<String,String> args = new HashMap<String, String>();
args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, "float");
DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
factory.init(args);
ResourceLoader loader = new StringMockResourceLoader("solr/collection1");
factory.inform(loader);
TokenStream input = new MockTokenizer(new StringReader("the|0.1 quick|0.1 red|0.1"), MockTokenizer.WHITESPACE, false);
DelimitedPayloadTokenFilter tf = factory.create(input);
tf.reset();
while (tf.incrementToken()){
PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
assertTrue("payAttr is null and it shouldn't be", payAttr != null);
byte[] payData = payAttr.getPayload().bytes;
assertTrue("payData is null and it shouldn't be", payData != null);
assertTrue("payData is null and it shouldn't be", payData != null);
float payFloat = PayloadHelper.decodeFloat(payData);
assertTrue(payFloat + " does not equal: " + 0.1f, payFloat == 0.1f);
}
}
示例5: testDelim
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public void testDelim() throws Exception {
Map<String,String> args = new HashMap<String, String>();
args.put(DelimitedPayloadTokenFilterFactory.ENCODER_ATTR, FloatEncoder.class.getName());
args.put(DelimitedPayloadTokenFilterFactory.DELIMITER_ATTR, "*");
DelimitedPayloadTokenFilterFactory factory = new DelimitedPayloadTokenFilterFactory();
factory.init(args);
ResourceLoader loader = new StringMockResourceLoader("solr/collection1");
factory.inform(loader);
TokenStream input = new MockTokenizer(new StringReader("the*0.1 quick*0.1 red*0.1"), MockTokenizer.WHITESPACE, false);
DelimitedPayloadTokenFilter tf = factory.create(input);
tf.reset();
while (tf.incrementToken()){
PayloadAttribute payAttr = tf.getAttribute(PayloadAttribute.class);
assertTrue("payAttr is null and it shouldn't be", payAttr != null);
byte[] payData = payAttr.getPayload().bytes;
assertTrue("payData is null and it shouldn't be", payData != null);
float payFloat = PayloadHelper.decodeFloat(payData);
assertTrue(payFloat + " does not equal: " + 0.1f, payFloat == 0.1f);
}
}
示例6: payloadAsFloat
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public float payloadAsFloat(float defaultMissing) {
if (payload != null && payload.length != 0) {
return PayloadHelper.decodeFloat(payload.bytes, payload.offset);
} else {
return defaultMissing;
}
}
示例7: incrementToken
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
@Override
public final boolean incrementToken() throws IOException {
if (input.incrementToken()) {
CharTermAttribute termAtt = this.getAttribute(CharTermAttribute.class);
final String term = termAtt.toString();
termAtt.setEmpty();
PayloadAttribute payloadAtt = this.getAttribute(PayloadAttribute.class);
final BytesRef payload = payloadAtt.getPayload();
if(payload == null) {
return true;
}
float payloadValue = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
if(payloadValue == 0.0f){
return true;
}
String weight = Float.toString(payloadValue);
// set weights to zero if in scientific notation
if(weight.contains("E-")){
return true;
}
String boostedTerm = term + "^" + weight;
termAtt.append(boostedTerm);
return true;
}
return false;
}
示例8: scorePayload
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
@Override
public float scorePayload(int doc, int start, int end, BytesRef payload) {
if (payload != null) {
float x = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
return x;
}
return 1.0F;
}
示例9: scorePayload
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
@Override
public float scorePayload(int docID, int start, int end, BytesRef payload) {
float pload = 1.0f;
if (payload != null) {
//pload = PayloadHelper.decodeFloat(payload.bytes);
pload = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
}
System.out.println("===> docid: " + docID + " payload: " + pload);
return pload;
}
示例10: scorePayload
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
@Override
public float scorePayload(int docID, int start, int end, BytesRef payload) {
float pload = 1.0f;
if (payload != null) {
pload = PayloadHelper.decodeFloat(payload.bytes);
}
logger.info("===> docid: " + docID + " payload: " + pload);
return pload;
}
示例11: scorePayload
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
public float scorePayload(int docID, String fieldName, int start, int end, byte[] payload, int offset, int length) {
if (payload != null) {
return PayloadHelper.decodeFloat(payload, offset);
}
else {
return 1.0F;
}
}
示例12: addTermWeights
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
/**
* Adds term weights found by tokenizing text from reader into the Map words
*
* @param reader a source of text to be tokenized
* @param termWeightMap a Map of terms and their weights
* @param fieldName Used by analyzer for any special per-field analysis
*/
private void addTermWeights(Reader reader, Map<String, Flt> termWeightMap, String fieldName)
throws IOException {
if (analyzer == null) {
throw new UnsupportedOperationException("To use RelevancyFeedback without " +
"term vectors, you must provide an Analyzer");
}
TokenStream ts = analyzer.tokenStream(fieldName, reader);
try {
int tokenCount = 0;
// for every token
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
PayloadAttribute payloadAttr = ts.addAttribute(PayloadAttribute.class);
ts.reset();
while (ts.incrementToken()) {
String word = termAtt.toString();
tokenCount++;
if (tokenCount > maxNumTokensParsedPerField) {
break;
}
if(word.trim().length() == 0){
continue;
}
if (isNoiseWord(word)) {
continue;
}
BytesRef payload = payloadAttr.getPayload();
float tokenWeight = 1.0f; // 1.0 or payload if set and a payload field
if(isPayloadField(fieldName) && payload != null){
tokenWeight = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
}
// increment frequency
Flt termWeight = termWeightMap.get(word);
if (termWeight == null) {
termWeightMap.put(word, new Flt(tokenWeight));
} else {
termWeight.x += tokenWeight;
}
}
ts.end();
} finally {
IOUtils.closeWhileHandlingException(ts);
}
}
示例13: addTermWeights
import org.apache.lucene.analysis.payloads.PayloadHelper; //导入方法依赖的package包/类
/**
* Adds term weights found by tokenizing text from reader into the Map words
*
* @param reader a source of text to be tokenized
* @param termWeightMap a Map of terms and their weights
* @param fieldName Used by analyzer for any special per-field analysis
*/
private void addTermWeights(Reader reader, Map<String, Flt> termWeightMap, String fieldName)
throws IOException {
if (analyzer == null) {
throw new UnsupportedOperationException("To use MoreLikeThis without " +
"term vectors, you must provide an Analyzer");
}
TokenStream ts = analyzer.tokenStream(fieldName, reader);
try {
int tokenCount = 0;
// for every token
CharTermAttribute termAtt = ts.addAttribute(CharTermAttribute.class);
PayloadAttribute payloadAttr = ts.addAttribute(PayloadAttribute.class);
ts.reset();
while (ts.incrementToken()) {
String word = termAtt.toString();
tokenCount++;
if (tokenCount > maxNumTokensParsedPerField) {
break;
}
if(word.trim().length() == 0){
continue;
}
if (isNoiseWord(word)) {
continue;
}
BytesRef payload = payloadAttr.getPayload();
float tokenWeight = 1.0f; // 1.0 or payload if set and a payload field
if(isPayloadField(fieldName) && payload != null){
tokenWeight = PayloadHelper.decodeFloat(payload.bytes, payload.offset);
}
// increment frequency
Flt termWeight = termWeightMap.get(word);
if (termWeight == null) {
termWeightMap.put(word, new Flt(tokenWeight));
} else {
termWeight.x += tokenWeight;
}
}
ts.end();
} finally {
IOUtils.closeWhileHandlingException(ts);
}
}