本文整理汇总了Java中org.apache.solr.schema.FieldType类的典型用法代码示例。如果您正苦于以下问题:Java FieldType类的具体用法?Java FieldType怎么用?Java FieldType使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FieldType类属于org.apache.solr.schema包,在下文中一共展示了FieldType类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getInstance
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Override
public UpdateRequestProcessor getInstance(SolrQueryRequest req,
SolrQueryResponse rsp,
UpdateRequestProcessor next) {
final IndexSchema schema = req.getSchema();
return new FieldMutatingUpdateProcessor(getSelector(), next) {
@Override
protected SolrInputField mutate(SolrInputField src) {
if (src.getValueCount() <= 1)
return src;//short circuit single value
SchemaField field = schema.getField(src.getName());
FieldType ft = field.getType();
IndexableField result = ft.createField(field, src, src.getBoost());
if (result == null)
return null;//remove
src.setValue(result, src.getBoost());
return src;
}
};
}
示例2: TokenizeText
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
public TokenizeText(CommandBuilder builder, Config config, Command parent, Command child, MorphlineContext context) {
super(builder, config, parent, child, context);
this.inputFieldName = getConfigs().getString(config, "inputField");
this.outputFieldName = getConfigs().getString(config, "outputField");
String solrFieldType = getConfigs().getString(config, "solrFieldType");
Config solrLocatorConfig = getConfigs().getConfig(config, "solrLocator");
SolrLocator locator = new SolrLocator(solrLocatorConfig, context);
LOG.debug("solrLocator: {}", locator);
IndexSchema schema = locator.getIndexSchema();
FieldType fieldType = schema.getFieldTypeByName(solrFieldType);
if (fieldType == null) {
throw new MorphlineCompilationException("Missing Solr field type in schema.xml for name: " + solrFieldType, config);
}
this.analyzer = fieldType.getIndexAnalyzer();
Preconditions.checkNotNull(analyzer);
try { // register CharTermAttribute for later (implicit) reuse
this.token = analyzer.tokenStream("content", reader).addAttribute(CharTermAttribute.class);
} catch (IOException e) {
throw new MorphlineCompilationException("Cannot create token stream", config, e);
}
Preconditions.checkNotNull(token);
validateArguments();
}
示例3: getFieldQuery
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws SyntaxError {
checkNullField(field);
// intercept magic field name of "_" to use as a hook for our
// own functions.
if (field.charAt(0) == '_' && parser != null) {
MagicFieldName magic = MagicFieldName.get(field);
if (null != magic) {
subQParser = parser.subQuery(queryText, magic.subParser);
return subQParser.getQuery();
}
}
SchemaField sf = schema.getFieldOrNull(field);
if (sf != null) {
FieldType ft = sf.getType();
// delegate to type for everything except tokenized fields
if (ft.isTokenized() && sf.indexed()) {
return newFieldQuery(getAnalyzer(), field, queryText, quoted || (ft instanceof TextField && ((TextField)ft).getAutoGeneratePhraseQueries()));
} else {
return sf.getType().getFieldQuery(parser, sf, queryText);
}
}
// default to a normal field query
return newFieldQuery(getAnalyzer(), field, queryText, quoted);
}
示例4: serializeSearchGroup
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
private NamedList serializeSearchGroup(Collection<SearchGroup<BytesRef>> data, Sort groupSort) {
NamedList<Object[]> result = new NamedList<>();
for (SearchGroup<BytesRef> searchGroup : data) {
Object[] convertedSortValues = new Object[searchGroup.sortValues.length];
for (int i = 0; i < searchGroup.sortValues.length; i++) {
Object sortValue = searchGroup.sortValues[i];
SchemaField field = groupSort.getSort()[i].getField() != null ? searcher.getSchema().getFieldOrNull(groupSort.getSort()[i].getField()) : null;
if (field != null) {
FieldType fieldType = field.getType();
if (sortValue != null) {
sortValue = fieldType.marshalSortValue(sortValue);
}
}
convertedSortValues[i] = sortValue;
}
String groupValue = searchGroup.groupValue != null ? searchGroup.groupValue.utf8ToString() : null;
result.add(groupValue, convertedSortValues);
}
return result;
}
示例5: createParser
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Override
public QParser createParser(String qstr, SolrParams localParams, SolrParams params, SolrQueryRequest req) {
return new QParser(qstr, localParams, params, req) {
@Override
public Query parse() {
String fname = localParams.get(QueryParsing.F);
FieldType ft = req.getSchema().getFieldTypeNoEx(fname);
String val = localParams.get(QueryParsing.V);
BytesRef term = new BytesRef();
if (ft != null) {
ft.readableToIndexed(val, term);
} else {
term.copyChars(val);
}
return new TermQuery(new Term(fname, term));
}
};
}
示例6: get
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Override
public Representation get() {
try {
List<SimpleOrderedMap<Object>> props = new ArrayList<>();
Map<String,FieldType> sortedFieldTypes = new TreeMap<>(getSchema().getFieldTypes());
for (FieldType fieldType : sortedFieldTypes.values()) {
props.add(getFieldTypeProperties(fieldType));
}
getSolrResponse().add(IndexSchema.FIELD_TYPES, props);
} catch (Exception e) {
getSolrResponse().setException(e);
}
handlePostExecution(log);
return new SolrOutputRepresentation();
}
示例7: create
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Override
public Lookup create(NamedList params, SolrCore core) {
Object fieldTypeName = params.get(QUERY_ANALYZER);
if (fieldTypeName == null) {
throw new IllegalArgumentException("Error in configuration: " + QUERY_ANALYZER + " parameter is mandatory");
}
FieldType ft = core.getLatestSchema().getFieldTypeByName(fieldTypeName.toString());
if (ft == null) {
throw new IllegalArgumentException("Error in configuration: " + fieldTypeName.toString() + " is not defined in the schema");
}
Analyzer indexAnalyzer = ft.getIndexAnalyzer();
Analyzer queryAnalyzer = ft.getQueryAnalyzer();
int grams = (params.get(NGRAMS) != null)
? Integer.parseInt(params.get(NGRAMS).toString())
: FreeTextSuggester.DEFAULT_GRAMS;
byte separator = (params.get(SEPARATOR) != null)
? params.get(SEPARATOR).toString().getBytes(StandardCharsets.UTF_8)[0]
: FreeTextSuggester.DEFAULT_SEPARATOR;
return new FreeTextSuggester(indexAnalyzer, queryAnalyzer, grams, separator);
}
示例8: testWithPolyFieldsAndFieldBoost
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Test
public void testWithPolyFieldsAndFieldBoost() {
SolrCore core = h.getCore();
IndexSchema schema = core.getLatestSchema();
assertFalse(schema.getField("store").omitNorms());
assertTrue(schema.getField("store_0_coordinate").omitNorms());
assertTrue(schema.getField("store_1_coordinate").omitNorms());
assertFalse(schema.getField("amount").omitNorms());
assertTrue(schema.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_currency").omitNorms());
assertTrue(schema.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_amount_raw").omitNorms());
SolrInputDocument doc = new SolrInputDocument();
doc.addField( "store", "40.7143,-74.006", 3.0f );
doc.addField( "amount", "10.5", 3.0f );
Document out = DocumentBuilder.toDocument( doc, schema );
assertNotNull( out.get( "store" ) );
assertNotNull( out.get( "amount" ) );
assertNotNull(out.getField("store_0_coordinate"));
//NOTE: As the subtypes have omitNorm=true, they must have boost=1F, otherwise this is going to fail when adding the doc to Lucene.
assertTrue(1f == out.getField("store_0_coordinate").boost());
assertTrue(1f == out.getField("store_1_coordinate").boost());
assertTrue(1f == out.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_currency").boost());
assertTrue(1f == out.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_amount_raw").boost());
}
示例9: testWithPolyFieldsAndDocumentBoost
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Test
public void testWithPolyFieldsAndDocumentBoost() {
SolrCore core = h.getCore();
IndexSchema schema = core.getLatestSchema();
assertFalse(schema.getField("store").omitNorms());
assertTrue(schema.getField("store_0_coordinate").omitNorms());
assertTrue(schema.getField("store_1_coordinate").omitNorms());
assertFalse(schema.getField("amount").omitNorms());
assertTrue(schema.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_currency").omitNorms());
assertTrue(schema.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_amount_raw").omitNorms());
SolrInputDocument doc = new SolrInputDocument();
doc.setDocumentBoost(3.0f);
doc.addField( "store", "40.7143,-74.006");
doc.addField( "amount", "10.5");
Document out = DocumentBuilder.toDocument( doc, schema );
assertNotNull( out.get( "store" ) );
assertNotNull(out.getField("store_0_coordinate"));
//NOTE: As the subtypes have omitNorm=true, they must have boost=1F, otherwise this is going to fail when adding the doc to Lucene.
assertTrue(1f == out.getField("store_0_coordinate").boost());
assertTrue(1f == out.getField("store_1_coordinate").boost());
assertTrue(1f == out.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_currency").boost());
assertTrue(1f == out.getField("amount" + FieldType.POLY_FIELD_SEPARATOR + "_amount_raw").boost());
}
示例10: testStandardTokenizerVersions
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
public void testStandardTokenizerVersions() throws Exception {
assertEquals(DEFAULT_VERSION, solrConfig.luceneMatchVersion);
final IndexSchema schema = h.getCore().getLatestSchema();
FieldType type = schema.getFieldType("textDefault");
TokenizerChain ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(DEFAULT_VERSION, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(DEFAULT_VERSION, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("text40");
ana = (TokenizerChain) type.getIndexAnalyzer();
assertEquals(Version.LUCENE_4_0_0_ALPHA, (ana.getTokenizerFactory()).getLuceneMatchVersion());
assertEquals(Version.LUCENE_4_3_0, (ana.getTokenFilterFactories()[2]).getLuceneMatchVersion());
type = schema.getFieldType("textTurkishAnalyzerDefault");
Analyzer ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(DEFAULT_VERSION, ana1.getVersion());
type = schema.getFieldType("textTurkishAnalyzer40");
ana1 = type.getIndexAnalyzer();
assertTrue(ana1 instanceof TurkishAnalyzer);
assertEquals(Version.LUCENE_4_0_0_ALPHA, ana1.getVersion());
}
示例11: handleFieldTypeAnalyzers
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
private void handleFieldTypeAnalyzers(Map<String, Analyzer> fieldTypeAnalyzers, AnalyzerMode analyzerMode,
String fieldTypeName) {
if (StringUtils.isEmpty(fieldTypeName)) {
return;
}
FieldType fieldType = this.schema.getFieldTypeByName(fieldTypeName);
if (null == fieldType && !this.setting.isIgnoreMissingMappings()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid FieldType: " + fieldTypeName);
}
if (analyzerMode == AnalyzerMode.query) {
fieldTypeAnalyzers.put(fieldTypeName, fieldType.getQueryAnalyzer());
} else if (analyzerMode == AnalyzerMode.multiTerm) {
fieldTypeAnalyzers.put(fieldTypeName, ((TextField) fieldType).getMultiTermAnalyzer());
} else {
fieldTypeAnalyzers.put(fieldTypeName, fieldType.getAnalyzer());
}
}
示例12: setUp
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Before
public void setUp() {
fieldType = new TextField();
Map<String, FieldType> fieldTypes = Maps.newHashMap();
fieldTypes.put("test", fieldType);
when(searcher.getSchema()).thenReturn(schema);
when(schema.getFieldTypes()).thenReturn(fieldTypes);
indexAnalyzer = new TokenizerChain(
new WhitespaceTokenizerFactory(Maps.<String, String> newHashMap()),
new TokenFilterFactory[] { indexTokenFilterFactory });
queryAnalyzer = new TokenizerChain(
new WhitespaceTokenizerFactory(Maps.<String, String> newHashMap()),
new TokenFilterFactory[] { queryTokenFilterFactory });
reloader = new SearcherAwareReloader(null);
}
示例13: selectAnalyzer
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
private Analyzer selectAnalyzer(FieldType fieldType) {
if(mode == null)
{
return fieldType.getAnalyzer();
}
else if(mode == Mode.INDEX)
{
return fieldType.getIndexAnalyzer();
}
else if(mode == Mode.QUERY)
{
return fieldType.getQueryAnalyzer();
}
else
{
return null;
}
}
示例14: getFieldQuery
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
protected Query getFieldQuery(String field, String queryText, boolean quoted) throws SyntaxError {
checkNullField(field);
// intercept magic field name of "_" to use as a hook for our
// own functions.
if (field.charAt(0) == '_' && parser != null) {
MagicFieldName magic = MagicFieldName.get(field);
if (null != magic) {
subQParser = parser.subQuery(queryText, magic.subParser);
return subQParser.getQuery();
}
}
SchemaField sf = schema.getFieldOrNull(field);
if (sf != null) {
FieldType ft = sf.getType();
// delegate to type for everything except tokenized fields
if (ft.isTokenized() && sf.indexed()) {
return newFieldQuery(analyzer, field, queryText, quoted || (ft instanceof TextField && ((TextField)ft).getAutoGeneratePhraseQueries()));
} else {
return sf.getType().getFieldQuery(parser, sf, queryText);
}
}
// default to a normal field query
return newFieldQuery(analyzer, field, queryText, quoted);
}
示例15: get
import org.apache.solr.schema.FieldType; //导入依赖的package包/类
@Override
public Representation get() {
try {
List<SimpleOrderedMap<Object>> props = new ArrayList<SimpleOrderedMap<Object>>();
Map<String,FieldType> sortedFieldTypes = new TreeMap<String, FieldType>(getSchema().getFieldTypes());
for (FieldType fieldType : sortedFieldTypes.values()) {
props.add(getFieldTypeProperties(fieldType));
}
getSolrResponse().add(FIELD_TYPES, props);
} catch (Exception e) {
getSolrResponse().setException(e);
}
handlePostExecution(log);
return new SolrOutputRepresentation();
}