本文整理汇总了Java中org.apache.solr.common.SolrException.ErrorCode.SERVER_ERROR属性的典型用法代码示例。如果您正苦于以下问题:Java ErrorCode.SERVER_ERROR属性的具体用法?Java ErrorCode.SERVER_ERROR怎么用?Java ErrorCode.SERVER_ERROR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类org.apache.solr.common.SolrException.ErrorCode
的用法示例。
在下文中一共展示了ErrorCode.SERVER_ERROR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: retryOnThrowable
public static void retryOnThrowable(Class clazz, long timeoutms, long intervalms, RetryCmd cmd) throws Throwable {
long timeout = System.nanoTime() + TimeUnit.NANOSECONDS.convert(timeoutms, TimeUnit.MILLISECONDS);
while (true) {
try {
cmd.execute();
} catch (Throwable t) {
if (clazz.isInstance(t) && System.nanoTime() < timeout) {
Thread.sleep(intervalms);
continue;
}
throw new SolrException(ErrorCode.SERVER_ERROR, t);
}
// success
break;
}
}
示例2: getLatestVersion
/**
* Gets the latest commit version and generation from the master
*/
@SuppressWarnings("unchecked")
NamedList getLatestVersion() throws IOException {
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(COMMAND, CMD_INDEX_VERSION);
params.set(CommonParams.WT, "javabin");
params.set(CommonParams.QT, "/replication");
QueryRequest req = new QueryRequest(params);
HttpSolrServer server = new HttpSolrServer(masterUrl, myHttpClient); //XXX modify to use shardhandler
NamedList rsp;
try {
server.setSoTimeout(60000);
server.setConnectionTimeout(15000);
rsp = server.request(req);
} catch (SolrServerException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e.getMessage(), e);
} finally {
server.shutdown();
}
return rsp;
}
示例3: createManagedResource
/**
* Creates a ManagedResource using registration information.
*/
protected ManagedResource createManagedResource(ManagedResourceRegistration reg) throws SolrException {
ManagedResource res = null;
try {
Constructor<? extends ManagedResource> ctor =
reg.implClass.getConstructor(String.class, SolrResourceLoader.class, StorageIO.class);
res = ctor.newInstance(reg.resourceId, loader, storageIO);
res.loadManagedDataAndNotify(reg.observers);
} catch (Exception e) {
String errMsg =
String.format(Locale.ROOT,
"Failed to create new ManagedResource %s of type %s due to: %s",
reg.resourceId, reg.implClass.getName(), e);
throw new SolrException(ErrorCode.SERVER_ERROR, errMsg, e);
}
return res;
}
示例4: inform
@Override
public void inform(SolrCore core) {
final SchemaField field = core.getLatestSchema().getFieldOrNull(getSignatureField());
if (null == field) {
throw new SolrException
(ErrorCode.SERVER_ERROR,
"Can't use signatureField which does not exist in schema: "
+ getSignatureField());
}
if (getOverwriteDupes() && ( ! field.indexed() ) ) {
throw new SolrException
(ErrorCode.SERVER_ERROR,
"Can't set overwriteDupes when signatureField is not indexed: "
+ getSignatureField());
}
}
示例5: waitToSeeReplicasInState
private Map<String, Replica> waitToSeeReplicasInState(String collectionName, Collection<String> coreNames) throws InterruptedException {
Map<String, Replica> result = new HashMap<>();
long endTime = System.nanoTime() + TimeUnit.NANOSECONDS.convert(30, TimeUnit.SECONDS);
while (true) {
DocCollection coll = zkStateReader.getClusterState().getCollection(
collectionName);
for (String coreName : coreNames) {
if (result.containsKey(coreName)) continue;
for (Slice slice : coll.getSlices()) {
for (Replica replica : slice.getReplicas()) {
if (coreName.equals(replica.getStr(ZkStateReader.CORE_NAME_PROP))) {
result.put(coreName, replica);
break;
}
}
}
}
if (result.size() == coreNames.size()) {
return result;
}
if (System.nanoTime() > endTime) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Timed out waiting to see all replicas in cluster state.");
}
Thread.sleep(100);
}
}
示例6: readFromResourceLoader
public static AliasConfig readFromResourceLoader(SolrResourceLoader loader, String name) {
try {
return new AliasConfig(loader, name, null);
}
catch (Exception e) {
String resource;
if (loader instanceof ZkSolrResourceLoader) {
resource = name;
} else {
resource = loader.getConfigDir() + name;
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading aliasing config from " + resource, e);
}
}
示例7: readFromResourceLoader
public static SolrConfig readFromResourceLoader(SolrResourceLoader loader, String name) {
try {
return new SolrConfig(loader, name, null);
}
catch (Exception e) {
String resource;
if (loader instanceof ZkSolrResourceLoader) {
resource = name;
} else {
resource = loader.getConfigDir() + name;
}
throw new SolrException(ErrorCode.SERVER_ERROR, "Error loading solr config from " + resource, e);
}
}
示例8: getDataHome
@Override
public String getDataHome(CoreDescriptor cd) throws IOException {
if (hdfsDataDir == null) {
throw new SolrException(ErrorCode.SERVER_ERROR, "You must set the "
+ this.getClass().getSimpleName() + " param " + HDFS_HOME
+ " for relative dataDir paths to work");
}
// by default, we go off the instance directory
String path;
if (cd.getCloudDescriptor() != null) {
path = URLEncoder.encode(cd.getCloudDescriptor().getCollectionName(),
"UTF-8")
+ "/"
+ URLEncoder.encode(cd.getCloudDescriptor().getCoreNodeName(),
"UTF-8");
} else {
path = cd.getName();
}
return normalize(SolrResourceLoader.normalizeDir(ZkController
.trimLeadingAndTrailingSlashes(hdfsDataDir)
+ "/"
+ path
+ "/"
+ cd.getDataDir()));
}
示例9: loadConfigSolr
private ConfigSolr loadConfigSolr(SolrResourceLoader loader) {
String solrxmlLocation = System.getProperty("solr.solrxml.location", "solrhome");
if (solrxmlLocation == null || "solrhome".equalsIgnoreCase(solrxmlLocation))
return ConfigSolr.fromSolrHome(loader, loader.getInstanceDir());
if ("zookeeper".equalsIgnoreCase(solrxmlLocation)) {
String zkHost = System.getProperty("zkHost");
log.info("Trying to read solr.xml from " + zkHost);
if (StringUtils.isEmpty(zkHost))
throw new SolrException(ErrorCode.SERVER_ERROR,
"Could not load solr.xml from zookeeper: zkHost system property not set");
SolrZkClient zkClient = new SolrZkClient(zkHost, 30000);
try {
if (!zkClient.exists("/solr.xml", true))
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not load solr.xml from zookeeper: node not found");
byte[] data = zkClient.getData("/solr.xml", null, null, true);
return ConfigSolr.fromInputStream(loader, new ByteArrayInputStream(data));
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, "Could not load solr.xml from zookeeper", e);
} finally {
zkClient.close();
}
}
throw new SolrException(ErrorCode.SERVER_ERROR,
"Bad solr.solrxml.location set: " + solrxmlLocation + " - should be 'solrhome' or 'zookeeper'");
}
示例10: replay
private Future<RecoveryInfo> replay(SolrCore core)
throws InterruptedException, ExecutionException {
Future<RecoveryInfo> future = core.getUpdateHandler().getUpdateLog().applyBufferedUpdates();
if (future == null) {
// no replay needed\
log.info("No replay needed. core=" + coreName);
} else {
log.info("Replaying buffered documents. core=" + coreName);
// wait for replay
RecoveryInfo report = future.get();
if (report.failed) {
SolrException.log(log, "Replay failed");
throw new SolrException(ErrorCode.SERVER_ERROR, "Replay failed");
}
}
// solrcloud_debug
if (log.isDebugEnabled()) {
try {
RefCounted<SolrIndexSearcher> searchHolder = core
.getNewestSearcher(false);
SolrIndexSearcher searcher = searchHolder.get();
try {
log.debug(core.getCoreDescriptor().getCoreContainer()
.getZkController().getNodeName()
+ " replayed "
+ searcher.search(new MatchAllDocsQuery(), 1).totalHits);
} finally {
searchHolder.decref();
}
} catch (Exception e) {
throw new SolrException(ErrorCode.SERVER_ERROR, null, e);
}
}
return future;
}
示例11: addDynamicField
private void addDynamicField(List<DynamicField> dFields, SchemaField f) {
if (isDuplicateDynField(dFields, f)) {
String msg = "[schema.xml] Duplicate DynamicField definition for '" + f.getName() + "'";
throw new SolrException(ErrorCode.SERVER_ERROR, msg);
} else {
addDynamicFieldNoDupCheck(dFields, f);
}
}
示例12: onManagedResourceInitialized
/**
* Called once, during core initialization, to initialize any analysis components
* that depend on the data managed by this resource. It is important that the
* analysis component is only initialized once during core initialization so that
* text analysis is consistent, especially in a distributed environment, as we
* don't want one server applying a different set of stop words than other servers.
*/
@SuppressWarnings("unchecked")
@Override
public void onManagedResourceInitialized(NamedList<?> initArgs, final ManagedResource res)
throws SolrException
{
NamedList<Object> args = (NamedList<Object>)initArgs;
args.add("synonyms", getResourceId());
args.add("expand", "false");
args.add("format", "solr");
Map<String,String> filtArgs = new HashMap<>();
for (Map.Entry<String,?> entry : args) {
filtArgs.put(entry.getKey(), entry.getValue().toString());
}
// create the actual filter factory that pulls the synonym mappings
// from synonymMappings using a custom parser implementation
delegate = new FSTSynonymFilterFactory(filtArgs) {
@Override
protected SynonymMap loadSynonyms
(ResourceLoader loader, String cname, boolean dedup, Analyzer analyzer)
throws IOException, ParseException {
ManagedSynonymParser parser =
new ManagedSynonymParser((SynonymManager)res, dedup, analyzer);
// null is safe here because there's no actual parsing done against a input Reader
parser.parse(null);
return parser.build();
}
};
try {
delegate.inform(res.getResourceLoader());
} catch (IOException e) {
throw new SolrException(ErrorCode.SERVER_ERROR, e);
}
}
示例13: doDistribIntervals
private void doDistribIntervals(FacetInfo fi, NamedList facet_counts) {
@SuppressWarnings("unchecked")
SimpleOrderedMap<SimpleOrderedMap<Integer>> facet_intervals =
(SimpleOrderedMap<SimpleOrderedMap<Integer>>)
facet_counts.get("facet_intervals");
if (facet_intervals != null) {
for (Map.Entry<String, SimpleOrderedMap<Integer>> entry : facet_intervals) {
final String field = entry.getKey();
SimpleOrderedMap<Integer> existingCounts = fi.intervalFacets.get(field);
if (existingCounts == null) {
// first time we've seen this field, no merging
fi.intervalFacets.add(field, entry.getValue());
} else {
// not the first time, merge current field counts
Iterator<Map.Entry<String, Integer>> newItr = entry.getValue().iterator();
Iterator<Map.Entry<String, Integer>> exItr = existingCounts.iterator();
// all intervals should be returned by each shard, even if they have zero count,
// and in the same order
while (exItr.hasNext()) {
Map.Entry<String, Integer> exItem = exItr.next();
if (!newItr.hasNext()) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response missing key: " + exItem.getKey());
}
Map.Entry<String, Integer> newItem = newItr.next();
if (!newItem.getKey().equals(exItem.getKey())) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response has extra key: " + newItem.getKey());
}
exItem.setValue(exItem.getValue() + newItem.getValue());
}
if (newItr.hasNext()) {
throw new SolrException(ErrorCode.SERVER_ERROR,
"Interval facet shard response has at least one extra key: "
+ newItr.next().getKey());
}
}
}
}
}
示例14: getCore
/**
* Gets a core by name and increase its refcount.
*
* @see SolrCore#close()
* @param name the core name
* @return the core if found, null if a SolrCore by this name does not exist
* @exception SolrException if a SolrCore with this name failed to be initialized
*/
public SolrCore getCore(String name) {
name = checkDefault(name);
// Do this in two phases since we don't want to lock access to the cores over a load.
SolrCore core = solrCores.getCoreFromAnyList(name, true);
if (core != null) {
return core;
}
// OK, it's not presently in any list, is it in the list of dynamic cores but not loaded yet? If so, load it.
CoreDescriptor desc = solrCores.getDynamicDescriptor(name);
if (desc == null) { //Nope, no transient core with this name
// if there was an error initalizing this core, throw a 500
// error with the details for clients attempting to access it.
CoreLoadFailure loadFailure = getCoreInitFailures().get(name);
if (null != loadFailure) {
throw new SolrException(ErrorCode.SERVER_ERROR, "SolrCore '" + name +
"' is not available due to init failure: " +
loadFailure.exception.getMessage(), loadFailure.exception);
}
// otherwise the user is simply asking for something that doesn't exist.
return null;
}
// This will put an entry in pending core ops if the core isn't loaded
core = solrCores.waitAddPendingCoreOps(name);
if (isShutDown) return null; // We're quitting, so stop. This needs to be after the wait above since we may come off
// the wait as a consequence of shutting down.
try {
if (core == null) {
if (zkSys.getZkController() != null) {
zkSys.getZkController().throwErrorIfReplicaReplaced(desc);
}
core = create(desc); // This should throw an error if it fails.
}
core.open();
}
finally {
solrCores.removeFromPendingOps(name);
}
return core;
}
示例15: setup
/**
* Setup the field according to the provided parameters
*/
private void setup(ResourceLoader loader, Map<String,String> args) {
String custom = args.remove("custom");
String language = args.remove("language");
String country = args.remove("country");
String variant = args.remove("variant");
String strength = args.remove("strength");
String decomposition = args.remove("decomposition");
final Collator collator;
if (custom == null && language == null)
throw new SolrException(ErrorCode.SERVER_ERROR, "Either custom or language is required.");
if (custom != null &&
(language != null || country != null || variant != null))
throw new SolrException(ErrorCode.SERVER_ERROR, "Cannot specify both language and custom. "
+ "To tailor rules for a built-in language, see the javadocs for RuleBasedCollator. "
+ "Then save the entire customized ruleset to a file, and use with the custom parameter");
if (language != null) {
// create from a system collator, based on Locale.
collator = createFromLocale(language, country, variant);
} else {
// create from a custom ruleset
collator = createFromRules(custom, loader);
}
// set the strength flag, otherwise it will be the default.
if (strength != null) {
if (strength.equalsIgnoreCase("primary"))
collator.setStrength(Collator.PRIMARY);
else if (strength.equalsIgnoreCase("secondary"))
collator.setStrength(Collator.SECONDARY);
else if (strength.equalsIgnoreCase("tertiary"))
collator.setStrength(Collator.TERTIARY);
else if (strength.equalsIgnoreCase("identical"))
collator.setStrength(Collator.IDENTICAL);
else
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid strength: " + strength);
}
// set the decomposition flag, otherwise it will be the default.
if (decomposition != null) {
if (decomposition.equalsIgnoreCase("no"))
collator.setDecomposition(Collator.NO_DECOMPOSITION);
else if (decomposition.equalsIgnoreCase("canonical"))
collator.setDecomposition(Collator.CANONICAL_DECOMPOSITION);
else if (decomposition.equalsIgnoreCase("full"))
collator.setDecomposition(Collator.FULL_DECOMPOSITION);
else
throw new SolrException(ErrorCode.SERVER_ERROR, "Invalid decomposition: " + decomposition);
}
analyzer = new CollationKeyAnalyzer(collator);
}