本文整理匯總了Java中org.apache.solr.client.solrj.SolrClient.add方法的典型用法代碼示例。如果您正苦於以下問題:Java SolrClient.add方法的具體用法?Java SolrClient.add怎麽用?Java SolrClient.add使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.solr.client.solrj.SolrClient
的用法示例。
在下文中一共展示了SolrClient.add方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: testRuntimeLib
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
@Test
public void testRuntimeLib() throws SolrServerException, IOException {
SearchServer server = testSearchServer.getSearchServer();
SolrClient client = (SolrClient) server.getBackend();
SolrInputDocument document = new SolrInputDocument();
document.setField("_id_", "1");
document.setField("_type_", "doc");
document.setField("dynamic_multi_facet_string_f1", "test");
document.setField("dynamic_multi_facet_string_f2", "hello");
client.add(document);
client.commit();
SolrQuery query = new SolrQuery("t");
query.setRequestHandler("/suggester");
query.set("suggestion.df", "facets");
query.set("suggestion.field", "dynamic_multi_facet_string_f1");
QueryResponse response = client.query(query);
assertEquals(1, ((HashMap) response.getResponse().get("suggestions")).get("suggestion_count"));
}
示例2: updateDocs
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
private void updateDocs( List<SolrInputDocument> docs, boolean commit ) throws SolrServerException, IOException {
LOG.debug( "updateDocs " + docs.size( ) );
SolrClient suggestClient = getSuggestClient( );
try {
UpdateResponse resp = suggestClient.add( suggesterCollection, docs );
}
catch ( SolrServerException sse ) {
LOG.error( "Got SolrServerExeption " + sse );
throw sse;
}
if (commit) {
LOG.debug( "committing to " + suggestHost );
suggestClient.commit( suggesterCollection );
}
// check resp - if not OK, throw an Exception??
}
示例3: bulkIndex
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
/**
* Bulk index. The collection is split into parts of one thousand.
*
* @param solrDocumentList the list to index
*/
public static void bulkIndex(List<SolrInputDocument> solrDocumentList) {
SolrClient solrClient = getCore().getConcurrentClient();
List<List<SolrInputDocument>> partitionList = Lists.partition(solrDocumentList, 1000);
int docNum = solrDocumentList.size();
int added = 0;
for (List<SolrInputDocument> part : partitionList) {
try {
solrClient.add(part, 500);
LOGGER.info("Added {}/{} documents", added += part.size(), docNum);
} catch (SolrServerException | IOException e) {
LOGGER.error("Unable to add classification documents.", e);
}
}
}
示例4: setUp
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
/**
* Indexes some sample data as test fixture.
*
* @throws Exception hopefully never, otherwise the test will fail.
*/
@Before
public void setUp() throws Exception {
super.setUp();
final SolrClient indexer = getSolrClient();
final SolrInputDocument book1 = new SolrInputDocument();
book1.setField("id","1");
book1.setField("title","Apache Solr Essentials");
book1.setField("author","Andrea Gazzarini");
indexer.add(book1);
indexer.commit();
}
示例5: visit
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
private void visit(SolrClient solr, String modelName, JsonNode node) throws IOException, SolrServerException {
if (node != null && !node.has(VALUE)) {
String feature = node.get(FEATURE_SPLIT).asText();
double threshold = Double.parseDouble(node.get(THRESHOLD_SPLIT).asText());
SolrInputDocument doc = new SolrInputDocument();
doc.addField(MODEL_NAME, modelName);
doc.addField(FEATURE, feature);
doc.addField(THRESHOLD, threshold);
solr.add(doc);
visit(solr, modelName, node.get(LEFT));
visit(solr, modelName, node.get(RIGHT));
}
}
示例6: index
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
@Override
public void index() throws IOException, SolrServerException {
int totalCount = contentMap.size();
LOGGER.info("Handling {} documents", totalCount);
//multithread processing will result in too many http request
UpdateResponse updateResponse;
try {
Iterator<SolrInputDocument> documents = MCRSolrInputDocumentFactory.getInstance().getDocuments(contentMap);
SolrClient solrClient = getSolrClient();
if (solrClient instanceof ConcurrentUpdateSolrClient) {
//split up to speed up processing
splitup(documents);
return;
}
if (LOGGER.isDebugEnabled()) {
ArrayList<SolrInputDocument> debugList = new ArrayList<>();
while (documents.hasNext()) {
debugList.add(documents.next());
}
LOGGER.debug("Sending these documents: {}", debugList);
//recreate documents interator;
documents = debugList.iterator();
}
if (solrClient instanceof HttpSolrClient) {
updateResponse = solrClient.add(documents);
} else {
ArrayList<SolrInputDocument> docs = new ArrayList<>(totalCount);
while (documents.hasNext()) {
docs.add(documents.next());
}
updateResponse = solrClient.add(docs);
}
} catch (Throwable e) {
LOGGER.warn("Error while indexing document collection. Split and retry.", e);
splitup();
return;
}
if (updateResponse.getStatus() != 0) {
LOGGER.error("Error while indexing document collection. Split and retry: {}", updateResponse.getResponse());
splitup();
} else {
LOGGER.info("Sending {} documents was successful in {} ms.", totalCount, updateResponse.getElapsedTime());
}
}
示例7: main
import org.apache.solr.client.solrj.SolrClient; //導入方法依賴的package包/類
public static void main(String[] args) throws CorruptIndexException, IOException, SolrServerException {
if (args.length < 3) {
System.err.println("Usage: java -Dfile.encoding=UTF8 -Dclient.encoding.override=UTF-8 -Xmx256m -Xms256m -server " + IndexLoader.class.getName()
+ " </path/to/index> <AutoCompleteSolrUrl> <indexField1,acField1> [indexField2,acField2 ... ]");
System.exit(0);
}
Map<String,String> fieldMap = getFieldMapping(args, 2);
DirectoryReader reader = DirectoryReader.open(FSDirectory.open(Paths.get(args[0])));
int docs = reader.maxDoc();
SolrClient solr = new ConcurrentUpdateSolrClient.Builder(args[1]).withQueueSize(10000).withThreadCount(2).build();
Set<SolrInputDocument> batch = new HashSet<SolrInputDocument>(1000);
Bits liveDocs = MultiFields.getLiveDocs(reader);
// go through all docs in the index
for (int i = 0; i < docs; i++) {
// process doc only if not deleted
if (liveDocs == null || liveDocs.get(i)) {
// loop through all fields to be looked at
SolrInputDocument doc = new SolrInputDocument();
Iterator<String> iter = fieldMap.keySet().iterator();
boolean phraseFieldEmpty = false;
while (iter.hasNext()) {
String indexField = iter.next();
String acField = fieldMap.get(indexField);
IndexableField field = reader.document(i).getField(indexField);
String value = field != null ? reader.document(i).getField(indexField).stringValue() : null;
if (field != null && value != null && !value.isEmpty()) {
doc.addField(acField, value);
} else {
// not very relevant piece of info
// System.err.println("Field is null or empty, skipping: " + indexField);
if (acField.equalsIgnoreCase("phrase")) {
System.err.println("Since AC phrase field would be null, this doc will not be created: " + reader.document(i));
phraseFieldEmpty = true;
break;
}
}
}
if (!phraseFieldEmpty) {
solr.add(doc);
if (docs % 1000 == 0) {
System.out.println("Docs: " + docs);
}
}
}
}
if (!batch.isEmpty())
solr.add(batch);
reader.close();
System.out.println("Optimizing...");
solr.optimize();
solr.close();
}